repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
grlee77/scipy | scipy/integrate/_bvp.py | 16 | 41051 | """Boundary value problem solver."""
from warnings import warn
import numpy as np
from numpy.linalg import pinv
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse.linalg import splu
from scipy.optimize import OptimizeResult
EPS = np.finfo(float).eps
def estimate_fun_jac(fun, x, y, p, f0=None):
"""Estimate derivatives of an ODE system rhs with forward differences.
Returns
-------
df_dy : ndarray, shape (n, n, m)
Derivatives with respect to y. An element (i, j, q) corresponds to
d f_i(x_q, y_q) / d (y_q)_j.
df_dp : ndarray with shape (n, k, m) or None
Derivatives with respect to p. An element (i, j, q) corresponds to
d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
"""
n, m = y.shape
if f0 is None:
f0 = fun(x, y, p)
dtype = y.dtype
df_dy = np.empty((n, n, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(y))
for i in range(n):
y_new = y.copy()
y_new[i] += h[i]
hi = y_new[i] - y[i]
f_new = fun(x, y_new, p)
df_dy[:, i, :] = (f_new - f0) / hi
k = p.shape[0]
if k == 0:
df_dp = None
else:
df_dp = np.empty((n, k, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(p))
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
f_new = fun(x, y, p_new)
df_dp[:, i, :] = (f_new - f0) / hi
return df_dy, df_dp
def estimate_bc_jac(bc, ya, yb, p, bc0=None):
"""Estimate derivatives of boundary conditions with forward differences.
Returns
-------
dbc_dya : ndarray, shape (n + k, n)
Derivatives with respect to ya. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dyb : ndarray, shape (n + k, n)
Derivatives with respect to yb. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dp : ndarray with shape (n + k, k) or None
Derivatives with respect to p. An element (i, j) corresponds to
d bc_i / d p_j. If `p` is empty, None is returned.
"""
n = ya.shape[0]
k = p.shape[0]
if bc0 is None:
bc0 = bc(ya, yb, p)
dtype = ya.dtype
dbc_dya = np.empty((n, n + k), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(ya))
for i in range(n):
ya_new = ya.copy()
ya_new[i] += h[i]
hi = ya_new[i] - ya[i]
bc_new = bc(ya_new, yb, p)
dbc_dya[i] = (bc_new - bc0) / hi
dbc_dya = dbc_dya.T
h = EPS**0.5 * (1 + np.abs(yb))
dbc_dyb = np.empty((n, n + k), dtype=dtype)
for i in range(n):
yb_new = yb.copy()
yb_new[i] += h[i]
hi = yb_new[i] - yb[i]
bc_new = bc(ya, yb_new, p)
dbc_dyb[i] = (bc_new - bc0) / hi
dbc_dyb = dbc_dyb.T
if k == 0:
dbc_dp = None
else:
h = EPS**0.5 * (1 + np.abs(p))
dbc_dp = np.empty((k, n + k), dtype=dtype)
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
bc_new = bc(ya, yb, p_new)
dbc_dp[i] = (bc_new - bc0) / hi
dbc_dp = dbc_dp.T
return dbc_dya, dbc_dyb, dbc_dp
def compute_jac_indices(n, m, k):
"""Compute indices for the collocation system Jacobian construction.
See `construct_global_jac` for the explanation.
"""
i_col = np.repeat(np.arange((m - 1) * n), n)
j_col = (np.tile(np.arange(n), n * (m - 1)) +
np.repeat(np.arange(m - 1) * n, n**2))
i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
j_bc = np.tile(np.arange(n), n + k)
i_p_col = np.repeat(np.arange((m - 1) * n), k)
j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
j = np.hstack((j_col, j_col + n,
j_bc, j_bc + (m - 1) * n,
j_p_col, j_p_bc))
return i, j
def stacked_matmul(a, b):
"""Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
Empirical optimization. Use outer Python loop and BLAS for large
matrices, otherwise use a single einsum call.
"""
if a.shape[1] > 50:
out = np.empty((a.shape[0], a.shape[1], b.shape[2]))
for i in range(a.shape[0]):
out[i] = np.dot(a[i], b[i])
return out
else:
return np.einsum('...ij,...jk->...ik', a, b)
def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
"""Construct the Jacobian of the collocation system.
There are n * m + k functions: m - 1 collocations residuals, each
containing n components, followed by n + k boundary condition residuals.
There are n * m + k variables: m vectors of y, each containing n
components, followed by k values of vector p.
For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
the following sparsity structure:
1 1 2 2 0 0 0 0 5
1 1 2 2 0 0 0 0 5
0 0 1 1 2 2 0 0 5
0 0 1 1 2 2 0 0 5
0 0 0 0 1 1 2 2 5
0 0 0 0 1 1 2 2 5
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
Zeros denote identically zero values, other values denote different kinds
of blocks in the matrix (see below). The blank row indicates the separation
of collocation residuals from boundary conditions. And the blank column
indicates the separation of y values from p values.
Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
of collocation residuals with respect to y.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
k : int
Number of the unknown parameters.
i_jac, j_jac : ndarray
Row and column indices returned by `compute_jac_indices`. They
represent different blocks in the Jacobian matrix in the following
order (see the scheme above):
* 1: m - 1 diagonal n x n blocks for the collocation residuals.
* 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
* 3 : (n + k) x n block for the dependency of the boundary
conditions on ya.
* 4: (n + k) x n block for the dependency of the boundary
conditions on yb.
* 5: (m - 1) * n x k block for the dependency of the collocation
residuals on p.
* 6: (n + k) x k block for the dependency of the boundary
conditions on p.
df_dy : ndarray, shape (n, n, m)
Jacobian of f with respect to y computed at the mesh nodes.
df_dy_middle : ndarray, shape (n, n, m - 1)
Jacobian of f with respect to y computed at the middle between the
mesh nodes.
df_dp : ndarray with shape (n, k, m) or None
Jacobian of f with respect to p computed at the mesh nodes.
df_dp_middle: ndarray with shape (n, k, m - 1) or None
Jacobian of f with respect to p computed at the middle between the
mesh nodes.
dbc_dya, dbc_dyb : ndarray, shape (n, n)
Jacobian of bc with respect to ya and yb.
dbc_dp: ndarray with shape (n, k) or None
Jacobian of bc with respect to p.
Returns
-------
J : csc_matrix, shape (n * m + k, n * m + k)
Jacobian of the collocation system in a sparse form.
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
df_dy = np.transpose(df_dy, (2, 0, 1))
df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
h = h[:, np.newaxis, np.newaxis]
dtype = df_dy.dtype
# Computing diagonal n x n blocks.
dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_0[:] = -np.identity(n)
dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[:-1])
dPhi_dy_0 -= h**2 / 12 * T
# Computing off-diagonal n x n blocks.
dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_1[:] = np.identity(n)
dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[1:])
dPhi_dy_1 += h**2 / 12 * T
values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
dbc_dyb.ravel()))
if k > 0:
df_dp = np.transpose(df_dp, (2, 0, 1))
df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
df_dp_middle += 0.125 * h * T
dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
J = coo_matrix((values, (i_jac, j_jac)))
return csc_matrix(J)
def collocation_fun(fun, y, p, x, h):
"""Evaluate collocation residuals.
This function lies in the core of the method. The solution is sought
as a cubic C1 continuous spline with derivatives matching the ODE rhs
at given nodes `x`. Collocation conditions are formed from the equality
of the spline derivatives and rhs of the ODE system in the middle points
between nodes.
Such method is classified to Lobbato IIIA family in ODE literature.
Refer to [1]_ for the formula and some discussion.
Returns
-------
col_res : ndarray, shape (n, m - 1)
Collocation residuals at the middle points of the mesh intervals.
y_middle : ndarray, shape (n, m - 1)
Values of the cubic spline evaluated at the middle points of the mesh
intervals.
f : ndarray, shape (n, m)
RHS of the ODE system evaluated at the mesh nodes.
f_middle : ndarray, shape (n, m - 1)
RHS of the ODE system evaluated at the middle points of the mesh
intervals (and using `y_middle`).
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
f = fun(x, y, p)
y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
0.125 * h * (f[:, 1:] - f[:, :-1]))
f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
4 * f_middle)
return col_res, y_middle, f, f_middle
def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
"""Create the function and the Jacobian for the collocation system."""
x_middle = x[:-1] + 0.5 * h
i_jac, j_jac = compute_jac_indices(n, m, k)
def col_fun(y, p):
return collocation_fun(fun, y, p, x, h)
def sys_jac(y, p, y_middle, f, f_middle, bc0):
if fun_jac is None:
df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
df_dy_middle, df_dp_middle = estimate_fun_jac(
fun, x_middle, y_middle, p, f_middle)
else:
df_dy, df_dp = fun_jac(x, y, p)
df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
if bc_jac is None:
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
p, bc0)
else:
dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
df_dy_middle, df_dp, df_dp_middle, dbc_dya,
dbc_dyb, dbc_dp)
return col_fun, sys_jac
def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol, bc_tol):
"""Solve the nonlinear collocation system by a Newton method.
This is a simple Newton method with a backtracking line search. As
advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
is used, where J is the Jacobian matrix at the current iteration and r is
the vector or collocation residuals (values of the system lhs).
The method alters between full Newton iterations and the fixed-Jacobian
iterations based
There are other tricks proposed in [1]_, but they are not used as they
don't seem to improve anything significantly, and even break the
convergence on some test problems I tried.
All important parameters of the algorithm are defined inside the function.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
h : ndarray, shape (m-1,)
Mesh intervals.
col_fun : callable
Function computing collocation residuals.
bc : callable
Function computing boundary condition residuals.
jac : callable
Function computing the Jacobian of the whole system (including
collocation and boundary condition residuals). It is supposed to
return csc_matrix.
y : ndarray, shape (n, m)
Initial guess for the function values at the mesh nodes.
p : ndarray, shape (k,)
Initial guess for the unknown parameters.
B : ndarray with shape (n, n) or None
Matrix to force the S y(a) = 0 condition for a problems with the
singular term. If None, the singular term is assumed to be absent.
bvp_tol : float
Tolerance to which we want to solve a BVP.
bc_tol : float
Tolerance to which we want to satisfy the boundary conditions.
Returns
-------
y : ndarray, shape (n, m)
Final iterate for the function values at the mesh nodes.
p : ndarray, shape (k,)
Final iterate for the unknown parameters.
singular : bool
True, if the LU decomposition failed because Jacobian turned out
to be singular.
References
----------
.. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations"
"""
# We know that the solution residuals at the middle points of the mesh
# are connected with collocation residuals r_middle = 1.5 * col_res / h.
# As our BVP solver tries to decrease relative residuals below a certain
# tolerance, it seems reasonable to terminated Newton iterations by
# comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
# which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
# the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
# should be computed as follows:
tol_r = 2/3 * h * 5e-2 * bvp_tol
# Maximum allowed number of Jacobian evaluation and factorization, in
# other words, the maximum number of full Newton iterations. A small value
# is recommended in the literature.
max_njev = 4
# Maximum number of iterations, considering that some of them can be
# performed with the fixed Jacobian. In theory, such iterations are cheap,
# but it's not that simple in Python.
max_iter = 8
# Minimum relative improvement of the criterion function to accept the
# step (Armijo constant).
sigma = 0.2
# Step size decrease factor for backtracking.
tau = 0.5
# Maximum number of backtracking steps, the minimum step is then
# tau ** n_trial.
n_trial = 4
col_res, y_middle, f, f_middle = col_fun(y, p)
bc_res = bc(y[:, 0], y[:, -1], p)
res = np.hstack((col_res.ravel(order='F'), bc_res))
njev = 0
singular = False
recompute_jac = True
for iteration in range(max_iter):
if recompute_jac:
J = jac(y, p, y_middle, f, f_middle, bc_res)
njev += 1
try:
LU = splu(J)
except RuntimeError:
singular = True
break
step = LU.solve(res)
cost = np.dot(step, step)
y_step = step[:m * n].reshape((n, m), order='F')
p_step = step[m * n:]
alpha = 1
for trial in range(n_trial + 1):
y_new = y - alpha * y_step
if B is not None:
y_new[:, 0] = np.dot(B, y_new[:, 0])
p_new = p - alpha * p_step
col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
res = np.hstack((col_res.ravel(order='F'), bc_res))
step_new = LU.solve(res)
cost_new = np.dot(step_new, step_new)
if cost_new < (1 - 2 * alpha * sigma) * cost:
break
if trial < n_trial:
alpha *= tau
y = y_new
p = p_new
if njev == max_njev:
break
if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
np.all(np.abs(bc_res) < bc_tol)):
break
# If the full step was taken, then we are going to continue with
# the same Jacobian. This is the approach of BVP_SOLVER.
if alpha == 1:
step = step_new
cost = cost_new
recompute_jac = False
else:
recompute_jac = True
return y, p, singular
def print_iteration_header():
print("{:^15}{:^15}{:^15}{:^15}{:^15}".format(
"Iteration", "Max residual", "Max BC residual", "Total nodes",
"Nodes added"))
def print_iteration_progress(iteration, residual, bc_residual, total_nodes,
nodes_added):
print("{:^15}{:^15.2e}{:^15.2e}{:^15}{:^15}".format(
iteration, residual, bc_residual, total_nodes, nodes_added))
class BVPResult(OptimizeResult):
pass
TERMINATION_MESSAGES = {
0: "The algorithm converged to the desired accuracy.",
1: "The maximum number of mesh nodes is exceeded.",
2: "A singular Jacobian encountered when solving the collocation system.",
3: "The solver was unable to satisfy boundary conditions tolerance on iteration 10."
}
def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
"""Estimate rms values of collocation residuals using Lobatto quadrature.
The residuals are defined as the difference between the derivatives of
our solution and rhs of the ODE system. We use relative residuals, i.e.,
normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
normalized integrals of the squared relative residuals over each interval.
Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
fact that residuals at the mesh nodes are identically zero.
In [2] they don't normalize integrals by interval lengths, which gives
a higher rate of convergence of the residuals by the factor of h**0.5.
I chose to do such normalization for an ease of interpretation of return
values as RMS estimates.
Returns
-------
rms_res : ndarray, shape (m - 1,)
Estimated rms values of the relative residuals over each interval.
References
----------
.. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
.. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
x_middle = x[:-1] + 0.5 * h
s = 0.5 * h * (3/7)**0.5
x1 = x_middle + s
x2 = x_middle - s
y1 = sol(x1)
y2 = sol(x2)
y1_prime = sol(x1, 1)
y2_prime = sol(x2, 1)
f1 = fun(x1, y1, p)
f2 = fun(x2, y2, p)
r1 = y1_prime - f1
r2 = y2_prime - f2
r_middle /= 1 + np.abs(f_middle)
r1 /= 1 + np.abs(f1)
r2 /= 1 + np.abs(f2)
r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
def create_spline(y, yp, x, h):
"""Create a cubic spline given values and derivatives.
Formulas for the coefficients are taken from interpolate.CubicSpline.
Returns
-------
sol : PPoly
Constructed spline as a PPoly instance.
"""
from scipy.interpolate import PPoly
n, m = y.shape
c = np.empty((4, n, m - 1), dtype=y.dtype)
slope = (y[:, 1:] - y[:, :-1]) / h
t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
c[0] = t / h
c[1] = (slope - yp[:, :-1]) / h - t
c[2] = yp[:, :-1]
c[3] = y[:, :-1]
c = np.rollaxis(c, 1)
return PPoly(c, x, extrapolate=True, axis=1)
def modify_mesh(x, insert_1, insert_2):
"""Insert nodes into a mesh.
Nodes removal logic is not established, its impact on the solver is
presumably negligible. So, only insertion is done in this function.
Parameters
----------
x : ndarray, shape (m,)
Mesh nodes.
insert_1 : ndarray
Intervals to each insert 1 new node in the middle.
insert_2 : ndarray
Intervals to each insert 2 new nodes, such that divide an interval
into 3 equal parts.
Returns
-------
x_new : ndarray
New mesh nodes.
Notes
-----
`insert_1` and `insert_2` should not have common values.
"""
# Because np.insert implementation apparently varies with a version of
# NumPy, we use a simple and reliable approach with sorting.
return np.sort(np.hstack((
x,
0.5 * (x[insert_1] + x[insert_1 + 1]),
(2 * x[insert_2] + x[insert_2 + 1]) / 3,
(x[insert_2] + 2 * x[insert_2 + 1]) / 3
)))
def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
"""Wrap functions for unified usage in the solver."""
if fun_jac is None:
fun_jac_wrapped = None
if bc_jac is None:
bc_jac_wrapped = None
if k == 0:
def fun_p(x, y, _):
return np.asarray(fun(x, y), dtype)
def bc_wrapped(ya, yb, _):
return np.asarray(bc(ya, yb), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, _):
return np.asarray(fun_jac(x, y), dtype), None
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, _):
dbc_dya, dbc_dyb = bc_jac(ya, yb)
return (np.asarray(dbc_dya, dtype),
np.asarray(dbc_dyb, dtype), None)
else:
def fun_p(x, y, p):
return np.asarray(fun(x, y, p), dtype)
def bc_wrapped(x, y, p):
return np.asarray(bc(x, y, p), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, p):
df_dy, df_dp = fun_jac(x, y, p)
return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, p):
dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
np.asarray(dbc_dp, dtype))
if S is None:
fun_wrapped = fun_p
else:
def fun_wrapped(x, y, p):
f = fun_p(x, y, p)
if x[0] == a:
f[:, 0] = np.dot(D, f[:, 0])
f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
else:
f += np.dot(S, y) / (x - a)
return f
if fun_jac is not None:
if S is None:
fun_jac_wrapped = fun_jac_p
else:
Sr = S[:, :, np.newaxis]
def fun_jac_wrapped(x, y, p):
df_dy, df_dp = fun_jac_p(x, y, p)
if x[0] == a:
df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
df_dy[:, :, 1:] += Sr / (x[1:] - a)
else:
df_dy += Sr / (x - a)
return df_dy, df_dp
return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
tol=1e-3, max_nodes=1000, verbose=0, bc_tol=None):
"""Solve a boundary value problem for a system of ODEs.
This function numerically solves a first order system of ODEs subject to
two-point boundary conditions::
dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
bc(y(a), y(b), p) = 0
Here x is a 1-D independent variable, y(x) is an N-D
vector-valued function and p is a k-D vector of unknown
parameters which is to be found along with y(x). For the problem to be
determined, there must be n + k boundary conditions, i.e., bc must be an
(n + k)-D function.
The last singular term on the right-hand side of the system is optional.
It is defined by an n-by-n matrix S, such that the solution must satisfy
S y(a) = 0. This condition will be forced during iterations, so it must not
contradict boundary conditions. See [2]_ for the explanation how this term
is handled when solving BVPs numerically.
Problems in a complex domain can be solved as well. In this case, y and p
are considered to be complex, and f and bc are assumed to be complex-valued
functions, but x stays real. Note that f and bc must be complex
differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
should rewrite your problem for real and imaginary parts separately. To
solve a problem in a complex domain, pass an initial guess for y with a
complex data type (see below).
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(x, y)``,
or ``fun(x, y, p)`` if parameters are present. All arguments are
ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
return value must be an array with shape (n, m) and with the same
layout as ``y``.
bc : callable
Function evaluating residuals of the boundary conditions. The calling
signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
and ``p`` with shape (k,). The return value must be an array with
shape (n + k,).
x : array_like, shape (m,)
Initial mesh. Must be a strictly increasing sequence of real numbers
with ``x[0]=a`` and ``x[-1]=b``.
y : array_like, shape (n, m)
Initial guess for the function values at the mesh nodes, ith column
corresponds to ``x[i]``. For problems in a complex domain pass `y`
with a complex data type (even if the initial guess is purely real).
p : array_like with shape (k,) or None, optional
Initial guess for the unknown parameters. If None (default), it is
assumed that the problem doesn't depend on any parameters.
S : array_like with shape (n, n) or None
Matrix defining the singular term. If None (default), the problem is
solved without the singular term.
fun_jac : callable or None, optional
Function computing derivatives of f with respect to y and p. The
calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
parameters are present. The return must contain 1 or 2 elements in the
following order:
* df_dy : array_like with shape (n, n, m), where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
* df_dp : array_like with shape (n, k, m), where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
Here q numbers nodes at which x and y are defined, whereas i and j
number vector components. If the problem is solved without unknown
parameters, df_dp should not be returned.
If `fun_jac` is None (default), the derivatives will be estimated
by the forward finite differences.
bc_jac : callable or None, optional
Function computing derivatives of bc with respect to ya, yb, and p.
The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
if parameters are present. The return must contain 2 or 3 elements in
the following order:
* dbc_dya : array_like with shape (n, n), where an element (i, j)
equals to d bc_i(ya, yb, p) / d ya_j.
* dbc_dyb : array_like with shape (n, n), where an element (i, j)
equals to d bc_i(ya, yb, p) / d yb_j.
* dbc_dp : array_like with shape (n, k), where an element (i, j)
equals to d bc_i(ya, yb, p) / d p_j.
If the problem is solved without unknown parameters, dbc_dp should not
be returned.
If `bc_jac` is None (default), the derivatives will be estimated by
the forward finite differences.
tol : float, optional
Desired tolerance of the solution. If we define ``r = y' - f(x, y)``,
where y is the found solution, then the solver tries to achieve on each
mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
estimated in a root mean squared sense (using a numerical quadrature
formula). Default is 1e-3.
max_nodes : int, optional
Maximum allowed number of the mesh nodes. If exceeded, the algorithm
terminates. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
bc_tol : float, optional
Desired absolute tolerance for the boundary condition residuals: `bc`
value should satisfy ``abs(bc) < bc_tol`` component-wise.
Equals to `tol` by default. Up to 10 iterations are allowed to achieve this
tolerance.
Returns
-------
Bunch object with the following fields defined:
sol : PPoly
Found solution for y as `scipy.interpolate.PPoly` instance, a C1
continuous cubic spline.
p : ndarray or None, shape (k,)
Found parameters. None, if the parameters were not present in the
problem.
x : ndarray, shape (m,)
Nodes of the final mesh.
y : ndarray, shape (n, m)
Solution values at the mesh nodes.
yp : ndarray, shape (n, m)
Solution derivatives at the mesh nodes.
rms_residuals : ndarray, shape (m - 1,)
RMS values of the relative residuals over each mesh interval (see the
description of `tol` parameter).
niter : int
Number of completed iterations.
status : int
Reason for algorithm termination:
* 0: The algorithm converged to the desired accuracy.
* 1: The maximum number of mesh nodes is exceeded.
* 2: A singular Jacobian encountered when solving the collocation
system.
message : string
Verbal description of the termination reason.
success : bool
True if the algorithm converged to the desired accuracy (``status=0``).
Notes
-----
This function implements a 4th order collocation algorithm with the
control of residuals similar to [1]_. A collocation system is solved
by a damped Newton method with an affine-invariant criterion function as
described in [3]_.
Note that in [1]_ integral residuals are defined without normalization
by interval lengths. So, their definition is different by a multiplier of
h**0.5 (h is an interval length) from the definition used here.
.. versionadded:: 0.18.0
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
.. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
Solver".
.. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations".
.. [4] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
Examples
--------
In the first example, we solve Bratu's problem::
y'' + k * exp(y) = 0
y(0) = y(1) = 0
for k = 1.
We rewrite the equation as a first-order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -exp(y1)
>>> def fun(x, y):
... return np.vstack((y[1], -np.exp(y[0])))
Implement evaluation of the boundary condition residuals:
>>> def bc(ya, yb):
... return np.array([ya[0], yb[0]])
Define the initial mesh with 5 nodes:
>>> x = np.linspace(0, 1, 5)
This problem is known to have two solutions. To obtain both of them, we
use two different initial guesses for y. We denote them by subscripts
a and b.
>>> y_a = np.zeros((2, x.size))
>>> y_b = np.zeros((2, x.size))
>>> y_b[0] = 3
Now we are ready to run the solver.
>>> from scipy.integrate import solve_bvp
>>> res_a = solve_bvp(fun, bc, x, y_a)
>>> res_b = solve_bvp(fun, bc, x, y_b)
Let's plot the two found solutions. We take an advantage of having the
solution in a spline form to produce a smooth plot.
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot_a = res_a.sol(x_plot)[0]
>>> y_plot_b = res_b.sol(x_plot)[0]
>>> import matplotlib.pyplot as plt
>>> plt.plot(x_plot, y_plot_a, label='y_a')
>>> plt.plot(x_plot, y_plot_b, label='y_b')
>>> plt.legend()
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
We see that the two solutions have similar shape, but differ in scale
significantly.
In the second example, we solve a simple Sturm-Liouville problem::
y'' + k**2 * y = 0
y(0) = y(1) = 0
It is known that a non-trivial solution y = A * sin(k * x) is possible for
k = pi * n, where n is an integer. To establish the normalization constant
A = 1 we add a boundary condition::
y'(0) = k
Again, we rewrite our equation as a first-order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -k**2 * y1
>>> def fun(x, y, p):
... k = p[0]
... return np.vstack((y[1], -k**2 * y[0]))
Note that parameters p are passed as a vector (with one element in our
case).
Implement the boundary conditions:
>>> def bc(ya, yb, p):
... k = p[0]
... return np.array([ya[0], yb[0], ya[1] - k])
Set up the initial mesh and guess for y. We aim to find the solution for
k = 2 * pi, to achieve that we set values of y to approximately follow
sin(2 * pi * x):
>>> x = np.linspace(0, 1, 5)
>>> y = np.zeros((2, x.size))
>>> y[0, 1] = 1
>>> y[0, 3] = -1
Run the solver with 6 as an initial guess for k.
>>> sol = solve_bvp(fun, bc, x, y, p=[6])
We see that the found k is approximately correct:
>>> sol.p[0]
6.28329460046
And, finally, plot the solution to see the anticipated sinusoid:
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot = sol.sol(x_plot)[0]
>>> plt.plot(x_plot, y_plot)
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
"""
x = np.asarray(x, dtype=float)
if x.ndim != 1:
raise ValueError("`x` must be 1 dimensional.")
h = np.diff(x)
if np.any(h <= 0):
raise ValueError("`x` must be strictly increasing.")
a = x[0]
y = np.asarray(y)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
if y.ndim != 2:
raise ValueError("`y` must be 2 dimensional.")
if y.shape[1] != x.shape[0]:
raise ValueError("`y` is expected to have {} columns, but actually "
"has {}.".format(x.shape[0], y.shape[1]))
if p is None:
p = np.array([])
else:
p = np.asarray(p, dtype=dtype)
if p.ndim != 1:
raise ValueError("`p` must be 1 dimensional.")
if tol < 100 * EPS:
warn("`tol` is too low, setting to {:.2e}".format(100 * EPS))
tol = 100 * EPS
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
n = y.shape[0]
k = p.shape[0]
if S is not None:
S = np.asarray(S, dtype=dtype)
if S.shape != (n, n):
raise ValueError("`S` is expected to have shape {}, "
"but actually has {}".format((n, n), S.shape))
# Compute I - S^+ S to impose necessary boundary conditions.
B = np.identity(n) - np.dot(pinv(S), S)
y[:, 0] = np.dot(B, y[:, 0])
# Compute (I - S)^+ to correct derivatives at x=a.
D = pinv(np.identity(n) - S)
else:
B = None
D = None
if bc_tol is None:
bc_tol = tol
# Maximum number of iterations
max_iteration = 10
fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
f = fun_wrapped(x, y, p)
if f.shape != y.shape:
raise ValueError("`fun` return is expected to have shape {}, "
"but actually has {}.".format(y.shape, f.shape))
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
if bc_res.shape != (n + k,):
raise ValueError("`bc` return is expected to have shape {}, "
"but actually has {}.".format((n + k,), bc_res.shape))
status = 0
iteration = 0
if verbose == 2:
print_iteration_header()
while True:
m = x.shape[0]
col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
fun_jac_wrapped, bc_jac_wrapped, x, h)
y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
y, p, B, tol, bc_tol)
iteration += 1
col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
p, x, h)
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
max_bc_res = np.max(abs(bc_res))
# This relation is not trivial, but can be verified.
r_middle = 1.5 * col_res / h
sol = create_spline(y, f, x, h)
rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
r_middle, f_middle)
max_rms_res = np.max(rms_res)
if singular:
status = 2
break
insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
insert_2, = np.nonzero(rms_res >= 100 * tol)
nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
if m + nodes_added > max_nodes:
status = 1
if verbose == 2:
nodes_added = "({})".format(nodes_added)
print_iteration_progress(iteration, max_rms_res, max_bc_res,
m, nodes_added)
break
if verbose == 2:
print_iteration_progress(iteration, max_rms_res, max_bc_res, m,
nodes_added)
if nodes_added > 0:
x = modify_mesh(x, insert_1, insert_2)
h = np.diff(x)
y = sol(x)
elif max_bc_res <= bc_tol:
status = 0
break
elif iteration >= max_iteration:
status = 3
break
if verbose > 0:
if status == 0:
print("Solved in {} iterations, number of nodes {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, x.shape[0], max_rms_res, max_bc_res))
elif status == 1:
print("Number of nodes is exceeded after iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
elif status == 2:
print("Singular Jacobian encountered when solving the collocation "
"system on iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
elif status == 3:
print("The solver was unable to satisfy boundary conditions "
"tolerance on iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
if p.size == 0:
p = None
return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
niter=iteration, status=status,
message=TERMINATION_MESSAGES[status], success=status == 0)
| bsd-3-clause |
saullocastro/pyNastran | pyNastran/op2/result_objects/scalar_table_object.py | 1 | 36453 | from __future__ import print_function, unicode_literals
from struct import Struct, pack
from six.moves import zip, range
import numpy as np
from numpy import zeros, float32, searchsorted, unique, where
from numpy import allclose, asarray, vstack, array_equal
from pyNastran.op2.result_objects.op2_objects import ScalarObject
from pyNastran.op2.result_objects.table_object import append_sort1_sort2
from pyNastran.f06.f06_formatting import write_floats_13e, write_float_12e
try:
import pandas as pd
except ImportError:
pass
class ScalarTableArray(ScalarObject): # displacement style table
def __init__(self, data_code, is_sort1, isubcase, dt):
self.nonlinear_factor = None
self.table_name = None
self.approach_code = None
self.analysis_code = None
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True) # no double inheritance
self.is_sort1()
self.is_sort2()
#self.dt = dt
#self.eType = {}
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
self.ntotal = 0
self._nnodes = 0 # result specific
def __eq__(self, table):
self._eq_header(table)
assert self.is_sort1() == table.is_sort1()
if not np.array_equal(self.node_gridtype, table.node_gridtype):
assert self.node_gridtype.shape == table.node_gridtype.shape, 'shape=%s table.shape=%s' % (self.node_gridtype.shape, table.node_gridtype.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
for (nid, grid_type), (nid2, grid_type2) in zip(self.node_gridtype, table.node_gridtype):
msg += '(%s, %s) (%s, %s)\n' % (nid, grid_type, nid2, grid_type2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1():
for itime in range(ntimes):
for inid, nid_gridtype, in enumerate(self.node_gridtype):
(nid, grid_type) = nid_gridtype
t1 = self.data[itime, inid, 0]
t2 = table.data[itime, inid, 0]
tx1 = t1[0]
tx2 = t2[0]
if not allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '(%s, %s)\n (%s, %s)\n' % (
nid, grid_type, tx1, tx2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2())
if i > 0:
print(msg)
raise ValueError(msg)
return True
def combine(self, result, is_sort1=True):
#print("combine; result=%s" % result)
assert self.is_sort1() != result.is_sort1()
assert self.nonlinear_factor is not None
assert result.nonlinear_factor is not None
# self.ntimes += result.ntimes
self.ntotal += result.data.shape[0]
self.data = append_sort1_sort2(self.data, result.data)
#print(self._times)
#print(result._times)
# self._times = hstack([self._times, result._times])
self.node_gridtype = vstack([self.node_gridtype, result.node_gridtype])
#print('%s' % ''.join(self.get_stats()))
def _get_msgs(self, is_mag_phase):
raise NotImplementedError()
def data_type(self):
raise NotImplementedError()
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
#ngrids = len(self.gridTypes)
if short:
return self._get_stats_short()
msg = []
ntimesi, ntotal = self.data.shape[:2]
ntimes = len(self._times)
nnodes = self.node_gridtype.shape[0]
nmajor = self.ntimes
nminor = self.ntotal
if self.is_sort1():
assert nmajor == ntimes, 'ntimes=%s expected=%s' % (nmajor, ntimes)
assert nminor == ntotal, 'ntotal=%s expected=%s' % (nminor, nnodes)
else:
assert nmajor == nnodes, 'nnodes=%s expected=%s' % (nmajor, nnodes)
assert nminor == ntotal, 'ntotal=%s expected=%s' % (nminor, ntimes)
msg.append(' isubcase = %s\n' % self.isubcase)
if self.nonlinear_factor is not None: # transient
msg.append(' type=%s ntimes=%s nnodes=%s\n'
% (self.__class__.__name__, ntimes, nnodes))
else:
msg.append(' type=%s nnodes=%s\n'
% (self.__class__.__name__, nnodes))
headers = ', '.join(self._get_headers())
#msg.append(' data: [%s] shape=%s dtype=%s\n'
#% (headers, [int(i) for i in self.data.shape], self.data.dtype))
msg.append(' data: [%s] shape=%s dtype=%s\n'
% (headers,
[int(i) for i in self.data.shape], self.data.dtype))
msg.append(' gridTypes\n ')
msg += self.get_data_code()
return msg
@property
def headers(self):
return ['t1']
def _get_headers(self):
return self.headers
def get_headers(self):
return self._get_headers()
def _reset_indices(self):
self.itotal = 0
def build(self):
#print('_nnodes=%s ntimes=%s sort1?=%s ntotal=%s -> _nnodes=%s' % (self._nnodes, self.ntimes, self.is_sort1(),
#self.ntotal, self._nnodes // self.ntimes))
if self.is_built:
#print("resetting...")
#self.itotal = 0
return
self._nnodes //= self.ntimes
self.itime = 0
self.itotal = 0
self.is_built = True
if self.is_sort1():
ntimes = self.ntimes
nnodes = self.ntotal
nx = ntimes
ny = self.ntotal
#print("ntimes=%s nnodes=%s" % (ntimes, nnodes))
if self.is_sort2():
ntotal = self.ntotal
nnodes = self.ntimes
ntimes = self.ntotal
nx = nnodes
ny = ntimes
#print("ntotal=%s nnodes=%s ntimes=%s" % (ntotal, nnodes, ntimes))
self._times = zeros(ntimes, dtype=self._times_dtype)
#self.types = array(self.nelements, dtype='|S1')
self.node_gridtype = zeros((nnodes, 2), dtype='int32')
#[t1]
self.data = zeros((nx, ny, 1), self.data_type())
def build_dataframe(self):
headers = self.get_headers()
node_gridtype = [self.node_gridtype[:, 0], self.gridtype_str]
ugridtype_str = unique(self.gridtype_str)
if self.nonlinear_factor is not None:
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = pd.Panel(self.data, items=column_values, major_axis=node_gridtype, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['NodeID', 'Type', 'Item']
letter_dims = [
('G', 1),
('E', 1),
('S', 1),
('H', 1),
('L', 1),
]
cat_keys = []
for (letter, dim) in letter_dims:
if letter not in ugridtype_str:
continue
eig = self.data_frame.xs(letter, level=1)
cat_keys.append(eig)
self.data_frame = pd.concat(cat_keys)
else:
#self.data_frame = pd.Panel(self.data[0, :, :], major_axis=node_gridtype, minor_axis=headers).to_frame()
#self.data_frame.columns.names = ['Static']
#self.data_frame.index.names = ['NodeID', 'Type', 'Item']
df1 = pd.DataFrame(self.node_gridtype[:, 0])
df1.columns = ['NodeID']
df2 = pd.DataFrame(self.gridtype_str)
df2.columns = ['Type']
df3 = pd.DataFrame(self.data[0])
df3.columns = headers
self.data_frame = df1.join([df2, df3])
#print(self.data_frame)
def finalize(self):
gridtypes = self.node_gridtype[:, 1]
nnodes = len(gridtypes)
self.gridtype_str = np.chararray((nnodes), unicode=True)
ugridtypes = unique(gridtypes)
for ugridtype in ugridtypes:
i = where(gridtypes == ugridtype)
self.gridtype_str[i] = self.recast_gridtype_as_string(ugridtype)
def _write_xlsx(self, sheet, is_mag_phase=False):
from xlwings import Range, Chart
# 0.3.5 doesn't work, 0.5 does
#from numpy import astype
# print('xlsx_filename = %r' % xlsx_filename)
#f = None
#wb = Workbook() # Creates a connection with a new workbook
#wb.save(xlsx_filename)
#Range('A1').value = 'Foo 1'
#print(Range('A1').value)
#'Foo 1'
# Range('A1').value = xlsx_filename
name = str(self.__class__.__name__)
Range(sheet, 'A1').value = [name]
Range(sheet, 'A2').value = ['Node', 'GridType'] + self.headers
Range(sheet, 'A3').value = self.node_gridtype
if self.is_real():
Range(sheet, 'C3').value = self.data[0, :, :]
else:
pass
#from numpy.core.defchararray import add as sadd
#n, m = self.data[0, :, :].shape
#nm = n * m
#scomplex = array(['=complex('] * nm, dtype='|S10').reshape(n, m)
#scomma = array([','] * nm, dtype='|S40').reshape(n, m)
#sparen = array([')'] * nm, dtype='|S40').reshape(n, m)
#data = sadd(
#sadd(scomplex, self.data.real.astype('|S10')), # complex(5.
#sadd(
#scomma, # ,
#sadd(self.data.imag.astype('|U10'), sparen), # 3j)
#)
#)
#data = sadd(
#scomplex,
#self.data.real.astype('|S10'),
#scomma,
#self.data.imag.astype('|S10'),
#sparen)
#print(self.data.real)
#Range(sheet, 'C3', atleast_2d=True).table.value = self.data.real
#Range(sheet, 'C3').value = self.data.real
#Range('C4').value = self.data[0, :, 0]
#Range('D4').value = self.data[0, :, 1:]
#print(Range('A1').table.value) # or: Range('A1:C2').value
#[['Foo 1', 'Foo 2', 'Foo 3'], [10.0, 20.0, 30.0]]
#print(Sheet(1).name)
#Sheet(isheet).name = 'displacements'
#'Sheet1'
#nrows = self.data.shape[1]
#end_row = '%s' % (4 + nrows)
#t1 = self.data[0, :, 0]
#chart = Chart.add(source_data=Range('C4').value)
#wb.save()
# wb.save()
def add(self, node_id, grid_type, v1):
self.add_sort1(None, node_id, grid_type, v1)
def add_sort1(self, dt, node_id, grid_type, v1):
# itotal - the node number
# itime - the time/frequency step
# the times/freqs
self._times[self.itime] = dt
self.node_gridtype[self.itotal, :] = [node_id, grid_type]
self.data[self.itime, self.itotal, 0] = v1
self.itotal += 1
def add_sort2(self, dt, node_id, grid_type, v1):
self._times[self.itotal] = dt
if 1: # this is needed for SORT1 tables
inode = self.itime
self.node_gridtype[self.itime, :] = [node_id, grid_type]
self.data[self.itime, self.itotal, 0] = v1
# itotal - the node number
# itime - the time/frequency step
else:
self.node_gridtype[self.itime, :] = [node_id, grid_type]
self.data[self.itotal, self.itime, 0] = v1
# itotal - the time/frequency step
# itime - the node number
self.itotal += 1
#self.itime += 1
#def two_dee_string_add(string_lists):
#string0 = string_lists[0]
#n, m = string0.shape
#s = []
#for string_list in string_lists:
#for string in string_list:
#pass
#return sumned
class RealScalarTableArray(ScalarTableArray): # temperature style table
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
def is_real(self):
return True
def is_complex(self):
return False
def data_type(self):
return 'float32'
def _write_table_3(self, f, fascii, itable=-3, itime=0):
import inspect
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
fascii.write('%s.write_table_3: %s\n' % (self.__class__.__name__, call_frame[1][3]))
f.write(pack('12i', *[4, itable, 4,
4, 1, 4,
4, 0, 4,
4, 146, 4,
]))
approach_code = self.approach_code
table_code = self.table_code
isubcase = self.isubcase
random_code = self.random_code
format_code = 1
num_wide = self.num_wide
acoustic_flag = 0
thermal = 0
title = b'%-128s' % bytes(self.title)
subtitle = b'%-128s' % bytes(self.subtitle)
label = b'%-128s' % bytes(self.label)
ftable3 = b'50i 128s 128s 128s'
oCode = 0
if self.analysis_code == 1:
lsdvmn = self.lsdvmn
else:
raise NotImplementedError(self.analysis_code)
table3 = [
approach_code, table_code, 0, isubcase, lsdvmn,
0, 0, random_code, format_code, num_wide,
oCode, acoustic_flag, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, thermal, thermal, 0,
title, subtitle, label,
]
n = 0
for v in table3:
if isinstance(v, (int, float)):
n += 4
else:
n += len(v)
assert n == 584, n
data = [584] + table3 + [584]
fmt = 'i' + ftable3 + 'i'
#print(fmt)
#f.write(pack(fascii, '%s header 3c' % self.table_name, fmt, data))
fascii.write('%s header 3c = %s\n' % (self.table_name, data))
f.write(pack(fmt, *data))
def write_op2(self, f, fascii, itable, date, is_mag_phase=False, endian='>'):
import inspect
assert self.table_name in ['OUGV1', 'OQMG1', 'OQG1'], self.table_name
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
fascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
#print('data_code =', self.data_code)
if itable == -1:
self._write_table_header(f, fascii, date)
itable = -3
if isinstance(self.nonlinear_factor, float):
op2_format = endian + b'%sif' % (7 * self.ntimes)
raise NotImplementedError()
else:
op2_format = endian + b'2i6f' * self.ntimes
s = Struct(op2_format)
node = self.node_gridtype[:, 0]
gridtype = self.node_gridtype[:, 1]
#format_table4_1 = Struct(b(self._endian + '15i'))
#format_table4_2 = Struct(b(self._endian + '3i'))
# table 4 info
#ntimes = self.data.shape[0]
nnodes = self.data.shape[1]
nnodes_device = self.node_gridtype[:, 0] * 10 + self.device_code
#(2+6) => (node_id, gridtypei, t1i, t2i, t3i, r1i, r2i, r3i)
ntotal = self.ntimes * nnodes * (2 + 6)
#print('shape = %s' % str(self.data.shape))
assert nnodes > 1, nnodes
assert ntotal > 1, ntotal
device_code = self.device_code
fascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
for itime in range(self.ntimes):
self._write_table_3(f, fascii, itable, itime)
# record 4
header = [4, -4, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4*ntotal]
f.write(pack(b'%ii' % len(header), *header))
fascii.write('r4 [4, 0, 4]\n')
fascii.write('r4 [4, %s, 4]\n' % (itable-1))
fascii.write('r4 [4, %i, 4]\n' % (4*ntotal))
t1 = self.data[itime, :, 0]
for node_id, gridtypei, t1i in zip(nnodes_device, gridtype, t1):
data = [node_id, gridtypei, t1i, 0., 0., 0., 0., 0.]
fascii.write(' nid, grid_type, dx, dy, dz, rx, ry, rz = %s\n' % data)
f.write(s.pack(*data))
itable -= 2
header = [4 * ntotal,]
f.write(pack(b'i', *header))
fascii.write('footer = %s' % header)
header = [
4, itable, 4,
4, 1, 4,
4, 0, 4,
]
f.write(pack(b'%ii' % len(header), *header))
return itable
#def spike():
#import xlwings as xw
#wb = xw.Workbook() # Creates a connection with a new workbook
#xw.Range('A1').value = 'Foo 1'
#xw.Range('A1').value
#'Foo 1'
#xw.Range('A1').value = [['Foo 1', 'Foo 2', 'Foo 3'], [10.0, 20.0, 30.0]]
#xw.Range('A1').table.value # or: Range('A1:C2').value
#[['Foo 1', 'Foo 2', 'Foo 3'], [10.0, 20.0, 30.0]]
#xw.Sheet(1).name
#'Sheet1'
#chart = xw.Chart.add(source_data=xw.Range('A1').table)
def _write_f06_block(self, words, header, page_stamp, page_num, f, write_words,
is_mag_phase=False, is_sort1=True):
if write_words:
words += [' \n', ' POINT ID. TYPE T1 T2 T3 R1 R2 R3\n']
#words += self.getTableMarker()
f.write(''.join(header + words))
node = self.node_gridtype[:, 0]
gridtype = self.node_gridtype[:, 1]
t1 = self.data[0, :, 0]
for node_id, gridtypei, t1i in zip(node, gridtype, t1):
sgridtype = self.recast_gridtype_as_string(gridtypei)
vals = [t1i]
vals2 = write_floats_13e(vals)
dx = vals2[0]
f.write('%14i %6s %s\n' % (node_id, sgridtype, dx))
f.write(page_stamp % page_num)
return page_num
def _write_sort1_as_sort2(self, f, page_num, page_stamp, header, words):
nodes = self.node_gridtype[:, 0]
gridtypes = self.node_gridtype[:, 1]
times = self._times
for inode, (node_id, gridtypei) in enumerate(zip(nodes, gridtypes)):
t1 = self.data[:, inode, 0].ravel()
header[1] = ' POINT-ID = %10i\n' % node_id
f.write(''.join(header + words))
for dt, t1i in zip(times, t1):
sgridtype = self.recast_gridtype_as_string(gridtypei)
vals = [t1i]
vals2 = write_floats_13e(vals)
dx = vals2[0]
if sgridtype == 'G':
f.write('%14s %6s %s\n' % (write_float_12e(dt), sgridtype, dx))
elif sgridtype == 'S':
f.write('%14s %6s %s\n' % (node_id, sgridtype, dx))
elif sgridtype == 'H':
f.write('%14s %6s %s\n' % (write_float_12e(dt), sgridtype, dx))
elif sgridtype == 'L':
f.write('%14s %6s %s\n' % (write_float_12e(dt), sgridtype, dx))
else:
raise NotImplementedError(sgridtype)
f.write(page_stamp % page_num)
page_num += 1
return page_num
def _write_sort1_as_sort1(self, f, page_num, page_stamp, header, words):
nodes = self.node_gridtype[:, 0]
gridtypes = self.node_gridtype[:, 1]
#times = self._times
for itime in range(self.ntimes):
dt = self._times[itime]
t1 = self.data[itime, :, 0]
if isinstance(dt, (float, float32)):
header[1] = ' %s = %10.4E\n' % (self.data_code['name'], dt)
else:
header[1] = ' %s = %10i\n' % (self.data_code['name'], dt)
f.write(''.join(header + words))
for node_id, gridtypei, t1i in zip(nodes, gridtypes, t1):
sgridtype = self.recast_gridtype_as_string(gridtypei)
vals = [t1i]
vals2 = write_floats_13e(vals)
dx = vals2[0]
if sgridtype == 'G':
f.write('%14i %6s %s\n' % (node_id, sgridtype, dx))
elif sgridtype == 'S':
f.write('%14i %6s %s\n' % (node_id, sgridtype, dx))
elif sgridtype == 'H':
f.write('%14i %6s %s\n' % (node_id, sgridtype, dx))
elif sgridtype == 'L':
f.write('%14i %6s %s\n' % (node_id, sgridtype, dx))
else:
raise NotImplementedError(sgridtype)
f.write(page_stamp % page_num)
page_num += 1
return page_num
def _write_sort2_as_sort2(self, f, page_num, page_stamp, header, words):
nodes = self.node_gridtype[:, 0]
gridtypes = self.node_gridtype[:, 1]
times = self._times
for inode, (node_id, gridtypei) in enumerate(zip(nodes, gridtypes)):
t1 = self.data[inode, :, 0]
header[1] = ' POINT-ID = %10i\n' % node_id
f.write(''.join(header + words))
for dt, t1i in zip(times, t1):
sgridtype = self.recast_gridtype_as_string(gridtypei)
vals = [t1i]
vals2 = write_floats_13e(vals)
dx = vals2[0]
if sgridtype == 'G':
f.write('%14s %6s %s\n' % (write_float_12e(dt), sgridtype, dx))
elif sgridtype == 'S':
f.write('%14s %6s %s\n' % (node_id, sgridtype, dx))
elif sgridtype == 'H':
f.write('%14s %6s %s\n' % (write_float_12e(dt), sgridtype, dx))
elif sgridtype == 'L':
f.write('%14s %6s %s\n' % (write_float_12e(dt), sgridtype, dx))
else:
raise NotImplementedError(sgridtype)
f.write(page_stamp % page_num)
page_num += 1
return page_num
def _write_f06_transient_block(self, words, header, page_stamp, page_num, f, write_words,
is_mag_phase=False, is_sort1=True):
if write_words:
words += [' \n', ' POINT ID. TYPE T1 T2 T3 R1 R2 R3\n']
#words += self.getTableMarker()
if not len(header) >= 3:
header.append('')
is_sort2 = not is_sort1
if self.is_sort1() or self.nonlinear_factor is None:
if is_sort2 and self.nonlinear_factor is not None:
page_num = self._write_sort1_as_sort2(f, page_num, page_stamp, header, words)
else:
page_num = self._write_sort1_as_sort1(f, page_num, page_stamp, header, words)
else:
page_num = self._write_sort2_as_sort2(f, page_num, page_stamp, header, words)
return page_num - 1
def extract_xyplot(self, node_ids, index):
node_ids = asarray(node_ids, dtype='int32')
i = index - 1
assert index in [1, 2, 3, 4, 5, 6], index
nids = self.node_gridtype[:, 0]
inids = searchsorted(nids, node_ids)
assert all(nids[inids] == node_ids), 'nids=%s expected=%s; all=%s' % (nids[inids], node_ids, nids)
return self.data[:, inids, i]
#class ComplexScalarTableArray(TableArray): # displacement style table
#def __init__(self, data_code, is_sort1, isubcase, dt):
#ScalarTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#raise NotImplementedError()
#def extract_xyplot(self, node_ids, index, index_str):
#index_str = index_str.lower().strip()
#if index_str in ['real', 'r']:
#j = 1
#elif index_str in ['imag', 'i']:
#j = 2
#elif index_str in ['mag', 'magnitude', 'm']:
#j = 3
#elif index_str in ['phase', 'p']:
#j = 4
#else:
#raise ValueError('index_str=%r' % index_str)
#node_ids = asarray(node_ids, dtype='int32')
#i = index - 1
#assert index in [1, 2, 3, 4, 5, 6,
#7, 8, 9, 10, 11, 12], index
#nids = self.node_gridtype[:, 0]
#inids = searchsorted(nids, node_ids)
#assert all(nids[inids] == node_ids), 'nids=%s expected=%s; all=%s' % (nids[inids], node_ids, nids)
#if j == 1:
## real
#return self.data[:, inids, i].real
#elif j == 2:
## imag
#return self.data[:, inids, i].imag
#elif j == 3:
## mag
#return abs(self.data[:, inids, i])
#elif j == 4:
## phase
#return angle(self.data[:, inids, i])
#else:
#raise RuntimeError()
#def is_real(self):
#return False
#def is_complex(self):
#return True
#def data_type(self):
#return 'complex64'
##def _write_f06_block(self, words, header, page_stamp, page_num, f, is_mag_phase):
##self._write_f06_transient_block(words, header, page_stamp, page_num, f, is_mag_phase, is_sort1)
#def _write_f06_transient_block(self, words, header, page_stamp, page_num, f,
#is_mag_phase, is_sort1):
#if is_mag_phase:
#words += [' (MAGNITUDE/PHASE)\n', ]
#else:
#words += [' (REAL/IMAGINARY)\n', ]
#if not len(header) >= 3:
#header.append('')
#if self.is_sort1():
#if is_sort1:
#words += [' \n', ' POINT ID. TYPE T1 T2 T3 R1 R2 R3\n']
#page_num = self.write_sort1_as_sort1(f, page_num, page_stamp, header, words, is_mag_phase)
#else:
#words += [' \n', ' FREQUENCY TYPE T1 T2 T3 R1 R2 R3\n']
#page_num = self.write_sort1_as_sort2(f, page_num, page_stamp, header, words, is_mag_phase)
#else:
#words += [' \n', ' FREQUENCY TYPE T1 T2 T3 R1 R2 R3\n']
#page_num = self.write_sort2_as_sort2(f, page_num, page_stamp, header, words, is_mag_phase)
#return page_num - 1
#def write_sort1_as_sort1(self, f, page_num, page_stamp, header, words, is_mag_phase):
#assert self.ntimes == len(self._times), 'ntimes=%s len(self._times)=%s' % (self.ntimes, self._times)
#for itime, dt in enumerate(self._times):
#node = self.node_gridtype[:, 0]
#gridtype = self.node_gridtype[:, 1]
#t1 = self.data[itime, :, 0]
#t2 = self.data[itime, :, 1]
#t3 = self.data[itime, :, 2]
#r1 = self.data[itime, :, 3]
#r2 = self.data[itime, :, 4]
#r3 = self.data[itime, :, 5]
#header[2] = ' %s = %10.4E\n' % (self.data_code['name'], dt)
#f.write(''.join(header + words))
#for node_id, gridtypei, t1i, t2i, t3i, r1i, r2i, r3i in zip(node, gridtype, t1, t2, t3, r1, r2, r3):
#sgridtype = self.recast_gridtype_as_string(gridtypei)
#vals = [t1i, t2i, t3i, r1i, r2i, r3i]
#vals2 = write_imag_floats_13e(vals, is_mag_phase)
#[dxr, dyr, dzr, rxr, ryr, rzr,
#dxi, dyi, dzi, rxi, ryi, rzi] = vals2
#if sgridtype == 'G':
#f.write('0 %12i %6s %-13s %-13s %-13s %-13s %-13s %-s\n'
#' %12s %6s %-13s %-13s %-13s %-13s %-13s %-s\n' % (
#node_id, sgridtype, dxr, dyr, dzr, rxr, ryr, rzr,
#'', '', dxi, dyi, dzi, rxi, ryi, rzi))
#elif sgridtype == 'S':
#f.write('0 %12i %6s %-13s\n'
#' %12s %6s %-13s\n' % (node_id, sgridtype, dxr, '', '', dxi))
#else:
#raise NotImplementedError(sgridtype)
#f.write(page_stamp % page_num)
#page_num += 1
#return page_num
#def write_sort1_as_sort2(self, f, page_num, page_stamp, header, words, is_mag_phase):
#node = self.node_gridtype[:, 0]
#gridtype = self.node_gridtype[:, 1]
#times = self._times
## print(self.data.shape)
#for inode, (node_id, gridtypei) in enumerate(zip(node, gridtype)):
## TODO: for SORT1 pretending to be SORT2
##t1 = self.data[:, inode, 0].ravel()
#t1 = self.data[:, inode, 0].ravel()
#t2 = self.data[:, inode, 1].ravel()
#t3 = self.data[:, inode, 2].ravel()
#r1 = self.data[:, inode, 3].ravel()
#r2 = self.data[:, inode, 4].ravel()
#r3 = self.data[:, inode, 5].ravel()
#if len(r3) != len(times):
#raise RuntimeError('len(d)=%s len(times)=%s' % (len(r3), len(times)))
#header[2] = ' POINT-ID = %10i\n' % node_id
#f.write(''.join(header + words))
#for dt, t1i, t2i, t3i, r1i, r2i, r3i in zip(times, t1, t2, t3, r1, r2, r3):
#sgridtype = self.recast_gridtype_as_string(gridtypei)
#vals = [t1i, t2i, t3i, r1i, r2i, r3i]
#vals2 = write_imag_floats_13e(vals, is_mag_phase)
#[dxr, dyr, dzr, rxr, ryr, rzr,
#dxi, dyi, dzi, rxi, ryi, rzi] = vals2
#sdt = write_float_12e(dt)
##if not is_all_zeros:
#if sgridtype == 'G':
#f.write('0 %12s %6s %-13s %-13s %-13s %-13s %-13s %-s\n'
#' %13s %6s %-13s %-13s %-13s %-13s %-13s %-s\n' % (
#sdt, sgridtype, dxr, dyr, dzr, rxr, ryr, rzr,
#'', '', dxi, dyi, dzi, rxi, ryi, rzi))
#elif sgridtype == 'S':
#f.write('0 %12s %6s %-13s\n'
#' %12s %6s %-13s\n' % (sdt, sgridtype, dxr, '', '', dxi))
#else:
#msg = 'nid=%s dt=%s type=%s dx=%s dy=%s dz=%s rx=%s ry=%s rz=%s' % (
#node_id, dt, sgridtype, t1i, t2i, t3i, r1i, r2i, r3i)
#raise NotImplementedError(msg)
#f.write(page_stamp % page_num)
#page_num += 1
#return page_num
#def write_sort2_as_sort2(self, f, page_num, page_stamp, header, words, is_mag_phase):
#node = self.node_gridtype[:, 0]
#gridtype = self.node_gridtype[:, 1]
#times = self._times
#for inode, (node_id, gridtypei) in enumerate(zip(node, gridtype)):
## TODO: for SORT1 pretending to be SORT2
##t1 = self.data[:, inode, 0].ravel()
#t1 = self.data[inode, :, 0]
#t2 = self.data[inode, :, 1]
#t3 = self.data[inode, :, 2]
#r1 = self.data[inode, :, 3]
#r2 = self.data[inode, :, 4]
#r3 = self.data[inode, :, 5]
#if len(r3) != len(times):
#raise RuntimeError('len(d)=%s len(times)=%s' % (len(r3), len(times)))
#header[2] = ' POINT-ID = %10i\n' % node_id
#f.write(''.join(header + words))
#for dt, t1i, t2i, t3i, r1i, r2i, r3i in zip(times, t1, t2, t3, r1, r2, r3):
#sgridtype = self.recast_gridtype_as_string(gridtypei)
#vals = [t1i, t2i, t3i, r1i, r2i, r3i]
#vals2 = write_imag_floats_13e(vals, is_mag_phase)
#[dxr, dyr, dzr, rxr, ryr, rzr,
#dxi, dyi, dzi, rxi, ryi, rzi] = vals2
#sdt = write_float_12e(dt)
##if not is_all_zeros:
#if sgridtype == 'G':
#f.write('0 %12s %6s %-13s %-13s %-13s %-13s %-13s %-s\n'
#' %13s %6s %-13s %-13s %-13s %-13s %-13s %-s\n' % (
#sdt, sgridtype, dxr, dyr, dzr, rxr, ryr, rzr,
#'', '', dxi, dyi, dzi, rxi, ryi, rzi))
#elif sgridtype == 'S':
#f.write('0 %12s %6s %-13s\n'
#' %12s %6s %-13s\n' % (sdt, sgridtype, dxr, '', '', dxi))
#else:
#msg = 'nid=%s dt=%s type=%s dx=%s dy=%s dz=%s rx=%s ry=%s rz=%s' % (
#node_id, dt, sgridtype, t1i, t2i, t3i, r1i, r2i, r3i)
#raise NotImplementedError(msg)
#f.write(page_stamp % page_num)
#page_num += 1
#return page_num
#class StaticArrayNode(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def node_ids(self):
#return self.node_gridtype[:, 0]
#class StaticArrayElement(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def element_ids(self):
#return self.node_gridtype[:, 0]
#class TimeArrayNodeSort1(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def times(self):
#return self._times
#@property
#def node_ids(self):
#return self.node_gridtype[:, 0]
#class TimeArrayElementSort1(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def times(self):
#return self._times
#@property
#def element_ids(self):
#return self.node_gridtype[:, 0]
#class TimeArrayNodeSort2(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def times(self):
#return self._times
#@property
#def node_ids(self):
#return self.node_gridtype[:, 0]
#class TimeArrayElementSort2(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def times(self):
#return self._times
#@property
#def element_ids(self):
#return self.node_gridtype[:, 0]
#class FrequencyArrayNodeSort2(ComplexTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#ComplexTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def frequencies(self):
#return self._times
#@property
#def node_ids(self):
#return self.node_gridtype[:, 0]
#class FrequencyArrayElementSort2(ComplexTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#ComplexTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def frequencies(self):
#return self._times
#@property
#def node_ids(self):
#return self.node_gridtype[:, 0]
| lgpl-3.0 |
bthirion/scikit-learn | examples/svm/plot_svm_nonlinear.py | 62 | 1119 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
mikelum/pyspeckit | pyspeckit/cubes/mapplot.py | 1 | 16567 | """
MapPlot
-------
Make plots of the cube and interactively connect them to spectrum plotting.
This is really an interactive component of the package; nothing in here is
meant for publication-quality plots, but more for user interactive analysis.
That said, the plotter makes use of `APLpy <https://github.com/aplpy/aplpy>`_,
so it is possible to make publication-quality plots.
:author: Adam Ginsburg
:date: 03/17/2011
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function
import matplotlib
import matplotlib.pyplot
import matplotlib.figure
import numpy as np
import copy
import itertools
from astropy.extern import six
try:
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
pywcsOK = True
except ImportError:
try:
import pyfits
import pywcs
pywcsOK = True
except ImportError:
pywcsOK = False
try:
import aplpy
icanhasaplpy = True
except: # aplpy fails with generic exceptions instead of ImportError
icanhasaplpy = False
from . import cubes
class MapPlotter(object):
"""
Class to plot a spectrum
See `mapplot` for use documentation; this docstring is only for
initialization.
"""
def __init__(self, Cube=None, figure=None, doplot=False, **kwargs):
"""
Create a map figure for future plotting
"""
# figure out where to put the plot
if isinstance(figure,matplotlib.figure.Figure):
self.figure = figure
elif type(figure) is int:
self.figure = matplotlib.pyplot.figure(figure)
else:
self.figure = None
self.axis = None
self.FITSFigure = None
self._click_marks = []
self._circles = []
self._clickX = None
self._clickY = None
self.overplot_colorcycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y'])
self.overplot_linestyle = '-'
self.Cube = Cube
if self.Cube is not None:
self.header = cubes.flatten_header(self.Cube.header, delete=True)
if pywcsOK:
self.wcs = pywcs.WCS(self.header)
if doplot: self.mapplot(**kwargs)
def __call__(self, **kwargs):
""" see mapplot """
return self.mapplot(**kwargs)
def mapplot(self, convention='calabretta', colorbar=True, useaplpy=True,
vmin=None, vmax=None, cmap=None, plotkwargs={}, **kwargs):
"""
Plot up a map based on an input data cube.
The map to be plotted is selected using `makeplane`.
The `estimator` keyword argument is passed to that function.
The plotted map, once shown, is interactive. You can click on it with any
of the three mouse buttons.
Button 1 or keyboard '1':
Plot the selected pixel's spectrum in another window. Mark the
clicked pixel with an 'x'
Button 2 or keyboard 'o':
Overplot a second (or third, fourth, fifth...) spectrum in the
external plot window
Button 3:
Disconnect the interactive viewer
You can also click-and-drag with button 1 to average over a circular
region. This same effect can be achieved by using the 'c' key to
set the /c/enter of a circle and the 'r' key to set its /r/adius (i.e.,
hover over the center and press 'c', then hover some distance away and
press 'r').
Parameters
----------
convention : 'calabretta' or 'griesen'
The default projection to assume for Galactic data when plotting
with aplpy.
colorbar : bool
Whether to show a colorbar
plotkwargs : dict, optional
A dictionary of keyword arguments to pass to aplpy.show_colorscale
or matplotlib.pyplot.imshow
useaplpy : bool
Use aplpy if a FITS header is available
vmin, vmax: float or None
Override values for the vmin/vmax values. Will be automatically
determined if left as None
.. todo:
Allow mapplot in subfigure
"""
if self.figure is None:
self.figure = matplotlib.pyplot.figure()
else:
self._disconnect()
self.figure.clf()
# this is where the map is created; everything below this is just plotting
self.makeplane(**kwargs)
# have tot pop out estimator so that kwargs can be passed to imshow
if 'estimator' in kwargs:
kwargs.pop('estimator')
# Below here is all plotting stuff
if vmin is None: vmin = self.plane[self.plane==self.plane].min()
if vmax is None: vmax = self.plane[self.plane==self.plane].max()
if icanhasaplpy and useaplpy:
self.fitsfile = pyfits.PrimaryHDU(data=self.plane,header=self.header)
self.FITSFigure = aplpy.FITSFigure(self.fitsfile,figure=self.figure,convention=convention)
self.FITSFigure.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
self.axis = self.FITSFigure._ax1
if colorbar:
try:
self.FITSFigure.add_colorbar()
except Exception as ex:
print("ERROR: Could not create colorbar! Error was %s" % str(ex))
self._origin = 0 # FITS convention
# TODO: set _origin to 1 if using PIXEL units, not real wcs
else:
self.axis = self.figure.add_subplot(111)
if hasattr(self,'colorbar') and self.colorbar is not None:
if self.colorbar.ax in self.axis.figure.axes:
self.axis.figure.delaxes(self.colorbar.ax)
self.axis.imshow(self.plane, vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
if colorbar:
try:
self.colorbar = matplotlib.pyplot.colorbar(self.axis.images[0])
except Exception as ex:
print("ERROR: Could not create colorbar! Error was %s" % str(ex))
self._origin = 0 # normal convention
self.canvas = self.axis.figure.canvas
self._connect()
def _connect(self):
""" Connect click, click up (release click), and key press to events """
self.clickid = self.canvas.callbacks.connect('button_press_event',self.click)
self.clickupid = self.canvas.callbacks.connect('button_release_event',self.plot_spectrum)
self.keyid = self.canvas.callbacks.connect('key_press_event',self.plot_spectrum)
def _disconnect(self):
""" Disconnect click, click up (release click), and key press from events """
if hasattr(self,'canvas'):
self.canvas.mpl_disconnect(self.clickid)
self.canvas.mpl_disconnect(self.clickupid)
self.canvas.mpl_disconnect(self.keyid)
def makeplane(self, estimator=np.nanmean):
"""
Create a "plane" view of the cube, either by slicing or projecting it
or by showing a slice from the best-fit model parameter cube.
Parameters
----------
estimator : [ function | 'max' | 'int' | FITS filename | integer | slice ]
A non-pythonic, non-duck-typed variable. If it's a function, apply that function
along the cube's spectral axis to obtain an estimate (e.g., mean, min, max, etc.).
'max' will do the same thing as passing np.max
'int' will attempt to integrate the image (which is why I didn't duck-type)
(integrate means sum and multiply by dx)
a .fits filename will be read using pyfits (so you can make your own cover figure)
an integer will get the n'th slice in the parcube if it exists
If it's a slice, slice the input data cube along the Z-axis with this slice
"""
# THIS IS A HACK!!! isinstance(a function, function) must be a thing...
FUNCTION = type(np.max)
# estimator is NOT duck-typed
if type(estimator) is FUNCTION:
self.plane = estimator(self.Cube.cube,axis=0)
elif isinstance(estimator, six.string_types):
if estimator == 'max':
self.plane = self.Cube.cube.max(axis=0)
elif estimator == 'int':
dx = np.abs(self.Cube.xarr[1:] - self.Cube.xarr[:-1])
dx = np.concatenate([dx,[dx[-1]]])
self.plane = (self.Cube.cube * dx[:,np.newaxis,np.newaxis]).sum(axis=0)
elif estimator[-5:] == ".fits":
self.plane = pyfits.getdata(estimator)
elif type(estimator) is slice:
self.plane = self.Cube.cube[estimator,:,:]
elif type(estimator) is int:
if hasattr(self.Cube,'parcube'):
self.plane = self.Cube.parcube[estimator,:,:]
if self.plane is None:
raise ValueError("Invalid estimator %s" % (str(estimator)))
if np.sum(np.isfinite(self.plane)) == 0:
raise ValueError("Map is all NaNs or infs. Check your estimator or your input cube.")
def click(self,event):
"""
Record location of downclick
"""
if event.inaxes:
self._clickX = np.round(event.xdata) - self._origin
self._clickY = np.round(event.ydata) - self._origin
def plot_spectrum(self, event, plot_fit=True):
"""
Connects map cube to Spectrum...
"""
self.event = event
if event.inaxes:
clickX = np.round(event.xdata) - self._origin
clickY = np.round(event.ydata) - self._origin
# grab toolbar info so that we don't do anything if a tool is selected
tb = self.canvas.toolbar
if tb.mode != '':
return
elif event.key is not None:
if event.key == 'c':
self._center = (clickX-1,clickY-1)
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
elif event.key == 'r':
x,y = self._center
self._add_circle(x,y,clickX,clickY)
self.circle(x,y,clickX-1,clickY-1)
elif event.key == 'o':
clickX,clickY = round(clickX),round(clickY)
print("OverPlotting spectrum from point %i,%i" % (clickX-1,clickY-1))
color=self.overplot_colorcycle.next()
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.key in ('1','2'):
event.button = int(event.key)
event.key = None
self.plot_spectrum(event)
elif (hasattr(event,'button') and event.button in (1,2)
and not (self._clickX == clickX and self._clickY == clickY)):
if event.button == 1:
self._remove_circle()
clear=True
color = 'k'
linestyle = 'steps-mid'
else:
color = self.overplot_colorcycle.next()
linestyle = self.overplot_linestyle
clear=False
rad = ( (self._clickX-clickX)**2 + (self._clickY-clickY)**2 )**0.5
print("Plotting circle from point %i,%i to %i,%i (r=%f)" % (self._clickX-1,self._clickY-1,clickX-1,clickY-1,rad))
self._add_circle(self._clickX,self._clickY,clickX,clickY)
self.circle(self._clickX-1,self._clickY-1,clickX-1,clickY-1,clear=clear,linestyle=linestyle,color=color)
elif hasattr(event,'button') and event.button is not None:
if event.button==1:
clickX,clickY = round(clickX),round(clickY)
print("Plotting spectrum from point %i,%i" % (clickX-1,clickY-1))
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=True)
if plot_fit: self.Cube.plot_fit(clickX-1, clickY-1, silent=True)
elif event.button==2:
clickX,clickY = round(clickX),round(clickY)
print("OverPlotting spectrum from point %i,%i" % (clickX-1,clickY-1))
color=self.overplot_colorcycle.next()
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.button==3:
print("Disconnecting GAIA-like tool")
self._disconnect()
else:
print("Call failed for some reason: ")
print("event: ",event)
else:
pass
# never really needed... warn("Click outside of axes")
def _add_click_mark(self,x,y,clear=False,color='k'):
"""
Add an X at some position
"""
if clear:
self._clear_click_marks()
if self.FITSFigure is not None:
label = 'xmark%i' % (len(self._click_marks)+1)
x,y = self.FITSFigure.pixel2world(x,y)
self.FITSFigure.show_markers(x,y,marker='x',c=color,layer=label)
self._click_marks.append( label )
else:
self._click_marks.append( self.axis.plot(x,y,'kx') )
self.refresh()
def _clear_click_marks(self):
"""
Remove all marks added by previous clicks
"""
if self.FITSFigure is not None:
for mark in self._click_marks:
if mark in self.FITSFigure._layers:
self.FITSFigure.remove_layer(mark)
else:
for mark in self._click_marks:
self._click_marks.remove(mark)
if mark in self.axis.lines:
self.axis.lines.remove(mark)
self.refresh()
def _add_circle(self,x,y,x2,y2,**kwargs):
"""
"""
if self.FITSFigure is not None:
x,y = self.FITSFigure.pixel2world(x,y)
x2,y2 = self.FITSFigure.pixel2world(x2,y2)
r = (np.linalg.norm(np.array([x,y])-np.array([x2,y2])))
#self.FITSFigure.show_markers(x,y,s=r,marker='o',facecolor='none',edgecolor='black',layer='circle')
layername = "circle%02i" % len(self._circles)
self.FITSFigure.show_circles(x,y,r,edgecolor='black',facecolor='none',layer=layername,**kwargs)
self._circles.append(layername)
else:
r = np.linalg.norm(np.array([x,y])-np.array([x2,y2]))
circle = matplotlib.patches.Circle([x,y],radius=r,**kwargs)
self._circles.append( circle )
self.axis.patches.append(circle)
self.refresh()
def _remove_circle(self):
"""
"""
if self.FITSFigure is not None:
for layername in self._circles:
if layername in self.FITSFigure._layers:
self.FITSFigure.remove_layer(layername)
else:
for circle in self._circles:
if circle in self.axis.patches:
self.axis.patches.remove(circle)
self._circles.remove(circle)
self.refresh()
def refresh(self):
if self.axis is not None:
self.axis.figure.canvas.draw()
def circle(self,x1,y1,x2,y2,**kwargs):
"""
Plot the spectrum of a circular aperture
"""
r = (np.linalg.norm(np.array([x1,y1])-np.array([x2,y2])))
self.Cube.plot_apspec([x1,y1,r],**kwargs)
#self.Cube.data = cubes.extract_aperture( self.Cube.cube, [x1,y1,r] , coordsys=None )
#self.Cube.plotter()
def copy(self, parent=None):
"""
Create a copy of the map plotter with blank (uninitialized) axis & figure
[ parent ]
A spectroscopic axis instance that is the parent of the specfit
instance. This needs to be specified at some point, but defaults
to None to prevent overwriting a previous plot.
"""
newmapplot = copy.copy(self)
newmapplot.Cube = parent
newmapplot.axis = None
newmapplot.figure = None
return newmapplot
| mit |
jmmease/pandas | pandas/io/msgpack/__init__.py | 26 | 1233 | # coding: utf-8
from collections import namedtuple
from pandas.io.msgpack.exceptions import * # noqa
from pandas.io.msgpack._version import version # noqa
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os # noqa
from pandas.io.msgpack._packer import Packer # noqa
from pandas.io.msgpack._unpacker import unpack, unpackb, Unpacker # noqa
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| bsd-3-clause |
evgchz/scikit-learn | sklearn/datasets/tests/test_20news.py | 42 | 2416 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
colour-science/colour | utilities/generate_plots.py | 1 | 34172 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate Plots
==============
"""
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt # noqa
import numpy as np # noqa
import os # noqa
import colour # noqa
from colour.characterisation import SDS_COLOURCHECKERS # noqa
from colour.colorimetry import ( # noqa
SDS_ILLUMINANTS, SDS_LIGHT_SOURCES, SDS_LEFS_PHOTOPIC, SDS_LEFS_SCOTOPIC,
MSDS_CMFS_STANDARD_OBSERVER, SpectralDistribution, SpectralShape,
sd_blackbody, sd_mesopic_luminous_efficiency_function, sd_to_XYZ)
from colour.io import read_image # noqa
from colour.models import sRGB_to_XYZ, XYZ_to_sRGB, XYZ_to_xy # noqa
from colour.plotting import ( # noqa
colour_style, ColourSwatch, plot_automatic_colour_conversion_graph,
plot_blackbody_colours, plot_blackbody_spectral_radiance,
plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS,
plot_chromaticity_diagram_CIE1976UCS, plot_constant_hue_loci,
plot_corresponding_chromaticities_prediction,
plot_cvd_simulation_Machado2009,
plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1931,
plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1960UCS,
plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1976UCS, plot_image,
plot_multi_cctfs, plot_multi_cmfs, plot_multi_colour_checkers,
plot_multi_colour_swatches, plot_multi_functions,
plot_multi_illuminant_sds, plot_multi_lightness_functions,
plot_multi_luminance_functions, plot_multi_munsell_value_functions,
plot_multi_sds_colour_quality_scales_bars,
plot_multi_sds_colour_rendering_indexes_bars, plot_multi_sds,
plot_planckian_locus_in_chromaticity_diagram_CIE1931,
plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS,
plot_pointer_gamut,
plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931,
plot_RGB_chromaticities_in_chromaticity_diagram_CIE1960UCS,
plot_RGB_chromaticities_in_chromaticity_diagram_CIE1976UCS,
plot_RGB_colourspaces_gamuts,
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1931,
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1960UCS,
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1976UCS, plot_RGB_scatter,
plot_sds_in_chromaticity_diagram_CIE1931,
plot_sds_in_chromaticity_diagram_CIE1960UCS,
plot_sds_in_chromaticity_diagram_CIE1976UCS, plot_single_cctf,
plot_single_cmfs, plot_single_colour_checker, plot_single_colour_swatch,
plot_single_function, plot_single_illuminant_sd,
plot_single_lightness_function, plot_single_luminance_function,
plot_single_munsell_value_function,
plot_single_sd_colour_quality_scale_bars,
plot_single_sd_colour_rendering_index_bars,
plot_single_sd_colour_rendition_report, plot_single_sd_rayleigh_scattering,
plot_single_sd, plot_the_blue_sky, plot_visible_spectrum, render)
from colour.plotting.diagrams import ( # noqa
plot_spectral_locus, plot_chromaticity_diagram_colours,
plot_chromaticity_diagram, plot_sds_in_chromaticity_diagram)
from colour.plotting.models import ( # noqa
plot_RGB_colourspaces_in_chromaticity_diagram,
plot_RGB_chromaticities_in_chromaticity_diagram,
plot_ellipses_MacAdam1942_in_chromaticity_diagram)
from colour.plotting.quality import plot_colour_quality_bars # noqa
from colour.plotting.temperature import ( # noqa
plot_planckian_locus, plot_planckian_locus_CIE1931,
plot_planckian_locus_CIE1960UCS,
plot_planckian_locus_in_chromaticity_diagram)
from colour.quality import colour_quality_scale # noqa
from colour.utilities import domain_range_scale, filter_warnings # noqa
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['generate_documentation_plots']
def generate_documentation_plots(output_directory):
"""
Generates documentation plots.
Parameters
----------
output_directory : unicode
Output directory.
"""
filter_warnings()
colour_style()
np.random.seed(0)
# *************************************************************************
# "README.rst"
# *************************************************************************
filename = os.path.join(output_directory,
'Examples_Colour_Automatic_Conversion_Graph.png')
plot_automatic_colour_conversion_graph(filename)
arguments = {
'tight_layout':
True,
'transparent_background':
True,
'filename':
os.path.join(output_directory,
'Examples_Plotting_Visible_Spectrum.png')
}
plt.close(
plot_visible_spectrum('CIE 1931 2 Degree Standard Observer',
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Examples_Plotting_Illuminant_F1_SD.png')
plt.close(plot_single_illuminant_sd('FL1', **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Examples_Plotting_Blackbodies.png')
blackbody_sds = [
sd_blackbody(i, SpectralShape(0, 10000, 10))
for i in range(1000, 15000, 1000)
]
plt.close(
plot_multi_sds(
blackbody_sds,
y_label='W / (sr m$^2$) / m',
plot_kwargs={'use_sd_colours': True},
normalise_sds_colours=True,
legend_location='upper right',
bounding_box=(0, 1250, 0, 2.5e15),
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Examples_Plotting_Cone_Fundamentals.png')
plt.close(
plot_single_cmfs(
'Stockman & Sharpe 2 Degree Cone Fundamentals',
y_label='Sensitivity',
bounding_box=(390, 870, 0, 1.1),
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Examples_Plotting_Luminous_Efficiency.png')
plt.close(
plot_multi_sds(
(sd_mesopic_luminous_efficiency_function(0.2),
SDS_LEFS_PHOTOPIC['CIE 1924 Photopic Standard Observer'],
SDS_LEFS_SCOTOPIC['CIE 1951 Scotopic Standard Observer']),
y_label='Luminous Efficiency',
legend_location='upper right',
y_tighten=True,
margins=(0, 0, 0, .1),
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Examples_Plotting_BabelColor_Average.png')
plt.close(
plot_multi_sds(
SDS_COLOURCHECKERS['BabelColor Average'].values(),
plot_kwargs={'use_sd_colours': True},
title=('BabelColor Average - '
'Spectral Distributions'),
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Examples_Plotting_ColorChecker_2005.png')
plt.close(
plot_single_colour_checker(
'ColorChecker 2005', text_kwargs={'visible': False},
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Examples_Plotting_Chromaticities_Prediction.png')
plt.close(
plot_corresponding_chromaticities_prediction(
2, 'Von Kries', 'Bianco 2010', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Examples_Plotting_CCT_CIE_1960_UCS_Chromaticity_Diagram.png')
plt.close(
plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS(
['A', 'B', 'C'], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Examples_Plotting_Chromaticities_CIE_1931_Chromaticity_Diagram.png')
RGB = np.random.random((32, 32, 3))
plt.close(
plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931(
RGB,
'ITU-R BT.709',
colourspaces=['ACEScg', 'S-Gamut'],
show_pointer_gamut=True,
**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Examples_Plotting_CRI.png')
plt.close(
plot_single_sd_colour_rendering_index_bars(SDS_ILLUMINANTS['FL2'],
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Examples_Plotting_Colour_Rendition_Report.png')
plt.close(
plot_single_sd_colour_rendition_report(SDS_ILLUMINANTS['FL2'],
**arguments)[0])
# *************************************************************************
# Documentation
# *************************************************************************
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_CVD_Simulation_Machado2009.png')
plt.close(plot_cvd_simulation_Machado2009(RGB, **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Single_Colour_Checker.png')
plt.close(plot_single_colour_checker('ColorChecker 2005', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Multi_Colour_Checkers.png')
plt.close(
plot_multi_colour_checkers(['ColorChecker 1976', 'ColorChecker 2005'],
**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Single_SD.png')
data = {
500: 0.0651,
520: 0.0705,
540: 0.0772,
560: 0.0870,
580: 0.1128,
600: 0.1360
}
sd = SpectralDistribution(data, name='Custom')
plt.close(plot_single_sd(sd, **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Multi_SDS.png')
data_1 = {
500: 0.004900,
510: 0.009300,
520: 0.063270,
530: 0.165500,
540: 0.290400,
550: 0.433450,
560: 0.594500
}
data_2 = {
500: 0.323000,
510: 0.503000,
520: 0.710000,
530: 0.862000,
540: 0.954000,
550: 0.994950,
560: 0.995000
}
spd1 = SpectralDistribution(data_1, name='Custom 1')
spd2 = SpectralDistribution(data_2, name='Custom 2')
plt.close(plot_multi_sds([spd1, spd2], **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Single_CMFS.png')
plt.close(
plot_single_cmfs('CIE 1931 2 Degree Standard Observer',
**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Multi_CMFS.png')
cmfs = ('CIE 1931 2 Degree Standard Observer',
'CIE 1964 10 Degree Standard Observer')
plt.close(plot_multi_cmfs(cmfs, **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Single_Illuminant_SD.png')
plt.close(plot_single_illuminant_sd('A', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Multi_Illuminant_SDS.png')
plt.close(plot_multi_illuminant_sds(['A', 'B', 'C'], **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Visible_Spectrum.png')
plt.close(plot_visible_spectrum(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Single_Lightness_Function.png')
plt.close(plot_single_lightness_function('CIE 1976', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Multi_Lightness_Functions.png')
plt.close(
plot_multi_lightness_functions(['CIE 1976', 'Wyszecki 1963'],
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Single_Luminance_Function.png')
plt.close(plot_single_luminance_function('CIE 1976', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Multi_Luminance_Functions.png')
plt.close(
plot_multi_luminance_functions(['CIE 1976', 'Newhall 1943'],
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Blackbody_Spectral_Radiance.png')
plt.close(
plot_blackbody_spectral_radiance(
3500, blackbody='VY Canis Major', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Blackbody_Colours.png')
plt.close(
plot_blackbody_colours(SpectralShape(150, 12500, 50), **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Single_Colour_Swatch.png')
RGB = ColourSwatch(RGB=(0.45620519, 0.03081071, 0.04091952))
plt.close(plot_single_colour_swatch(RGB, **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Multi_Colour_Swatches.png')
RGB_1 = ColourSwatch(RGB=(0.45293517, 0.31732158, 0.26414773))
RGB_2 = ColourSwatch(RGB=(0.77875824, 0.57726450, 0.50453169))
plt.close(plot_multi_colour_swatches([RGB_1, RGB_2], **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Single_Function.png')
plt.close(plot_single_function(lambda x: x ** (1 / 2.2), **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Multi_Functions.png')
functions = {
'Gamma 2.2': lambda x: x ** (1 / 2.2),
'Gamma 2.4': lambda x: x ** (1 / 2.4),
'Gamma 2.6': lambda x: x ** (1 / 2.6),
}
plt.close(plot_multi_functions(functions, **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Image.png')
path = os.path.join(output_directory, 'Logo_Medium_001.png')
plt.close(plot_image(read_image(str(path)), **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Corresponding_Chromaticities_Prediction.png')
plt.close(
plot_corresponding_chromaticities_prediction(1, 'Von Kries', 'CAT02',
**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Spectral_Locus.png')
plt.close(
plot_spectral_locus(spectral_locus_colours='RGB', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Chromaticity_Diagram_Colours.png')
plt.close(plot_chromaticity_diagram_colours(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Chromaticity_Diagram.png')
plt.close(plot_chromaticity_diagram(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Chromaticity_Diagram_CIE1931.png')
plt.close(plot_chromaticity_diagram_CIE1931(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Chromaticity_Diagram_CIE1960UCS.png')
plt.close(plot_chromaticity_diagram_CIE1960UCS(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Chromaticity_Diagram_CIE1976UCS.png')
plt.close(plot_chromaticity_diagram_CIE1976UCS(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_SDS_In_Chromaticity_Diagram.png')
A = SDS_ILLUMINANTS['A']
D65 = SDS_ILLUMINANTS['D65']
plt.close(plot_sds_in_chromaticity_diagram([A, D65], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_SDS_In_Chromaticity_Diagram_CIE1931.png')
plt.close(
plot_sds_in_chromaticity_diagram_CIE1931([A, D65], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_SDS_In_Chromaticity_Diagram_CIE1960UCS.png')
plt.close(
plot_sds_in_chromaticity_diagram_CIE1960UCS([A, D65], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_SDS_In_Chromaticity_Diagram_CIE1976UCS.png')
plt.close(
plot_sds_in_chromaticity_diagram_CIE1976UCS([A, D65], **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Pointer_Gamut.png')
plt.close(plot_pointer_gamut(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_RGB_Colourspaces_In_Chromaticity_Diagram.png')
plt.close(
plot_RGB_colourspaces_in_chromaticity_diagram(
['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_RGB_Colourspaces_In_Chromaticity_Diagram_CIE1931.png')
plt.close(
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1931(
['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_RGB_Colourspaces_In_'
'Chromaticity_Diagram_CIE1960UCS.png')
plt.close(
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1960UCS(
['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_RGB_Colourspaces_In_'
'Chromaticity_Diagram_CIE1976UCS.png')
plt.close(
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1976UCS(
['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'
'Chromaticity_Diagram.png')
RGB = np.random.random((128, 128, 3))
plt.close(
plot_RGB_chromaticities_in_chromaticity_diagram(
RGB, 'ITU-R BT.709', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'
'Chromaticity_Diagram_CIE1931.png')
plt.close(
plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931(
RGB, 'ITU-R BT.709', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'
'Chromaticity_Diagram_CIE1960UCS.png')
plt.close(
plot_RGB_chromaticities_in_chromaticity_diagram_CIE1960UCS(
RGB, 'ITU-R BT.709', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'
'Chromaticity_Diagram_CIE1976UCS.png')
plt.close(
plot_RGB_chromaticities_in_chromaticity_diagram_CIE1976UCS(
RGB, 'ITU-R BT.709', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Ellipses_MacAdam1942_In_Chromaticity_Diagram.png')
plt.close(
plot_ellipses_MacAdam1942_in_chromaticity_diagram(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Ellipses_MacAdam1942_In_'
'Chromaticity_Diagram_CIE1931.png')
plt.close(
plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1931(
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Ellipses_MacAdam1942_In_'
'Chromaticity_Diagram_CIE1960UCS.png')
plt.close(
plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1960UCS(
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Ellipses_MacAdam1942_In_'
'Chromaticity_Diagram_CIE1976UCS.png')
plt.close(
plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1976UCS(
**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Single_CCTF.png')
plt.close(plot_single_cctf('ITU-R BT.709', **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Multi_CCTFs.png')
plt.close(plot_multi_cctfs(['ITU-R BT.709', 'sRGB'], **arguments)[0])
data = np.array([
[
None,
np.array([0.95010000, 1.00000000, 1.08810000]),
np.array([0.40920000, 0.28120000, 0.30600000]),
np.array([
[0.02495100, 0.01908600, 0.02032900],
[0.10944300, 0.06235900, 0.06788100],
[0.27186500, 0.18418700, 0.19565300],
[0.48898900, 0.40749400, 0.44854600],
]),
None,
],
[
None,
np.array([0.95010000, 1.00000000, 1.08810000]),
np.array([0.30760000, 0.48280000, 0.42770000]),
np.array([
[0.02108000, 0.02989100, 0.02790400],
[0.06194700, 0.11251000, 0.09334400],
[0.15255800, 0.28123300, 0.23234900],
[0.34157700, 0.56681300, 0.47035300],
]),
None,
],
[
None,
np.array([0.95010000, 1.00000000, 1.08810000]),
np.array([0.39530000, 0.28120000, 0.18450000]),
np.array([
[0.02436400, 0.01908600, 0.01468800],
[0.10331200, 0.06235900, 0.02854600],
[0.26311900, 0.18418700, 0.12109700],
[0.43158700, 0.40749400, 0.39008600],
]),
None,
],
[
None,
np.array([0.95010000, 1.00000000, 1.08810000]),
np.array([0.20510000, 0.18420000, 0.57130000]),
np.array([
[0.03039800, 0.02989100, 0.06123300],
[0.08870000, 0.08498400, 0.21843500],
[0.18405800, 0.18418700, 0.40111400],
[0.32550100, 0.34047200, 0.50296900],
[0.53826100, 0.56681300, 0.80010400],
]),
None,
],
[
None,
np.array([0.95010000, 1.00000000, 1.08810000]),
np.array([0.35770000, 0.28120000, 0.11250000]),
np.array([
[0.03678100, 0.02989100, 0.01481100],
[0.17127700, 0.11251000, 0.01229900],
[0.30080900, 0.28123300, 0.21229800],
[0.52976000, 0.40749400, 0.11720000],
]),
None,
],
])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Constant_Hue_Loci.png')
plt.close(plot_constant_hue_loci(data, 'IPT', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Single_Munsell_Value_Function.png')
plt.close(plot_single_munsell_value_function('ASTM D1535', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Multi_Munsell_Value_Functions.png')
plt.close(
plot_multi_munsell_value_functions(['ASTM D1535', 'McCamy 1987'],
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Single_SD_Rayleigh_Scattering.png')
plt.close(plot_single_sd_rayleigh_scattering(**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_The_Blue_Sky.png')
plt.close(plot_the_blue_sky(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Colour_Quality_Bars.png')
illuminant = SDS_ILLUMINANTS['FL2']
light_source = SDS_LIGHT_SOURCES['Kinoton 75P']
light_source = light_source.copy().align(SpectralShape(360, 830, 1))
cqs_i = colour_quality_scale(illuminant, additional_data=True)
cqs_l = colour_quality_scale(light_source, additional_data=True)
plt.close(plot_colour_quality_bars([cqs_i, cqs_l], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Single_SD_Colour_Rendering_Index_Bars.png')
illuminant = SDS_ILLUMINANTS['FL2']
plt.close(
plot_single_sd_colour_rendering_index_bars(illuminant, **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Multi_SDS_Colour_Rendering_Indexes_Bars.png')
light_source = SDS_LIGHT_SOURCES['Kinoton 75P']
plt.close(
plot_multi_sds_colour_rendering_indexes_bars(
[illuminant, light_source], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Single_SD_Colour_Quality_Scale_Bars.png')
illuminant = SDS_ILLUMINANTS['FL2']
plt.close(
plot_single_sd_colour_quality_scale_bars(illuminant, **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Multi_SDS_Colour_Quality_Scales_Bars.png')
light_source = SDS_LIGHT_SOURCES['Kinoton 75P']
plt.close(
plot_multi_sds_colour_quality_scales_bars([illuminant, light_source],
**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_Planckian_Locus.png')
plt.close(plot_planckian_locus(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Planckian_Locus_CIE1931.png')
plt.close(plot_planckian_locus_CIE1931(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_Planckian_Locus_CIE1960UCS.png')
plt.close(plot_planckian_locus_CIE1960UCS(**arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Planckian_Locus_In_Chromaticity_Diagram.png')
plt.close(
plot_planckian_locus_in_chromaticity_diagram(['A', 'B', 'C'],
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Planckian_Locus_In_Chromaticity_Diagram_CIE1931.png')
plt.close(
plot_planckian_locus_in_chromaticity_diagram_CIE1931(['A', 'B', 'C'],
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Planckian_Locus_In_Chromaticity_Diagram_CIE1960UCS.png')
plt.close(
plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS(
['A', 'B', 'C'], **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Single_SD_Colour_Rendition_Report_Full.png')
plt.close(
plot_single_sd_colour_rendition_report(SDS_ILLUMINANTS['FL2'],
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Single_SD_Colour_Rendition_Report_Intermediate.png')
plt.close(
plot_single_sd_colour_rendition_report(SDS_ILLUMINANTS['FL2'],
'Intermediate', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory,
'Plotting_Plot_Single_SD_Colour_Rendition_Report_Simple.png')
plt.close(
plot_single_sd_colour_rendition_report(SDS_ILLUMINANTS['FL2'],
'Simple', **arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_RGB_Colourspaces_Gamuts.png')
plt.close(
plot_RGB_colourspaces_gamuts(['ITU-R BT.709', 'ACEScg', 'S-Gamut'],
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Plotting_Plot_RGB_Colourspaces_Gamuts.png')
plt.close(
plot_RGB_colourspaces_gamuts(['ITU-R BT.709', 'ACEScg', 'S-Gamut'],
**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Plotting_Plot_RGB_Scatter.png')
plt.close(plot_RGB_scatter(RGB, 'ITU-R BT.709', **arguments)[0])
filename = os.path.join(
output_directory,
'Plotting_Plot_Colour_Automatic_Conversion_Graph.png')
plot_automatic_colour_conversion_graph(filename)
# *************************************************************************
# "tutorial.rst"
# *************************************************************************
arguments['filename'] = os.path.join(output_directory,
'Tutorial_Visible_Spectrum.png')
plt.close(plot_visible_spectrum(**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Tutorial_Sample_SD.png')
sample_sd_data = {
380: 0.048,
385: 0.051,
390: 0.055,
395: 0.060,
400: 0.065,
405: 0.068,
410: 0.068,
415: 0.067,
420: 0.064,
425: 0.062,
430: 0.059,
435: 0.057,
440: 0.055,
445: 0.054,
450: 0.053,
455: 0.053,
460: 0.052,
465: 0.052,
470: 0.052,
475: 0.053,
480: 0.054,
485: 0.055,
490: 0.057,
495: 0.059,
500: 0.061,
505: 0.062,
510: 0.065,
515: 0.067,
520: 0.070,
525: 0.072,
530: 0.074,
535: 0.075,
540: 0.076,
545: 0.078,
550: 0.079,
555: 0.082,
560: 0.087,
565: 0.092,
570: 0.100,
575: 0.107,
580: 0.115,
585: 0.122,
590: 0.129,
595: 0.134,
600: 0.138,
605: 0.142,
610: 0.146,
615: 0.150,
620: 0.154,
625: 0.158,
630: 0.163,
635: 0.167,
640: 0.173,
645: 0.180,
650: 0.188,
655: 0.196,
660: 0.204,
665: 0.213,
670: 0.222,
675: 0.231,
680: 0.242,
685: 0.251,
690: 0.261,
695: 0.271,
700: 0.282,
705: 0.294,
710: 0.305,
715: 0.318,
720: 0.334,
725: 0.354,
730: 0.372,
735: 0.392,
740: 0.409,
745: 0.420,
750: 0.436,
755: 0.450,
760: 0.462,
765: 0.465,
770: 0.448,
775: 0.432,
780: 0.421
}
sd = SpectralDistribution(sample_sd_data, name='Sample')
plt.close(plot_single_sd(sd, **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Tutorial_SD_Interpolation.png')
sd_copy = sd.copy()
sd_copy.interpolate(SpectralShape(400, 770, 1))
plt.close(
plot_multi_sds(
[sd, sd_copy], bounding_box=[730, 780, 0.25, 0.5], **arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Tutorial_Sample_Swatch.png')
sd = SpectralDistribution(sample_sd_data)
cmfs = MSDS_CMFS_STANDARD_OBSERVER['CIE 1931 2 Degree Standard Observer']
illuminant = SDS_ILLUMINANTS['D65']
with domain_range_scale('1'):
XYZ = sd_to_XYZ(sd, cmfs, illuminant)
RGB = XYZ_to_sRGB(XYZ)
plt.close(
plot_single_colour_swatch(
ColourSwatch('Sample', RGB),
text_kwargs={'size': 'x-large'},
**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Tutorial_Neutral5.png')
patch_name = 'neutral 5 (.70 D)'
patch_sd = SDS_COLOURCHECKERS['ColorChecker N Ohta'][patch_name]
with domain_range_scale('1'):
XYZ = sd_to_XYZ(patch_sd, cmfs, illuminant)
RGB = XYZ_to_sRGB(XYZ)
plt.close(
plot_single_colour_swatch(
ColourSwatch(patch_name.title(), RGB),
text_kwargs={'size': 'x-large'},
**arguments)[0])
arguments['filename'] = os.path.join(output_directory,
'Tutorial_Colour_Checker.png')
plt.close(
plot_single_colour_checker(
colour_checker='ColorChecker 2005',
text_kwargs={'visible': False},
**arguments)[0])
arguments['filename'] = os.path.join(
output_directory, 'Tutorial_CIE_1931_Chromaticity_Diagram.png')
xy = XYZ_to_xy(XYZ)
plot_chromaticity_diagram_CIE1931(standalone=False)
x, y = xy
plt.plot(x, y, 'o-', color='white')
# Annotating the plot.
plt.annotate(
patch_sd.name.title(),
xy=xy,
xytext=(-50, 30),
textcoords='offset points',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3, rad=-0.2'))
plt.close(
render(
standalone=True,
limits=(-0.1, 0.9, -0.1, 0.9),
x_tighten=True,
y_tighten=True,
**arguments)[0])
# *************************************************************************
# "basics.rst"
# *************************************************************************
arguments['filename'] = os.path.join(output_directory,
'Basics_Logo_Small_001_CIE_XYZ.png')
RGB = read_image(os.path.join(output_directory,
'Logo_Small_001.png'))[..., 0:3]
XYZ = sRGB_to_XYZ(RGB)
plt.close(
plot_image(XYZ, text_kwargs={'text': 'sRGB to XYZ'}, **arguments)[0])
if __name__ == '__main__':
generate_documentation_plots(os.path.join('..', 'docs', '_static'))
| bsd-3-clause |
sweverett/Balrog-GalSim | balrog/grid.py | 1 | 12988 | #####################################################################
#
# This file contains code relevant to the construction and use of
# an injection grid for Balrog objects (for now, likely just
# galaxies). Only rectangular and hexagonal grids currently
# supported.
#
#
# Spencer Everett
# UCSC
# 5/2/18
#####################################################################
import math
import numpy as np
import matplotlib.pyplot as plt
# import pudb
# The following base class is useful for accessing allowed parameter values
# without constructing a full config
class BaseGrid(object):
_valid_grid_types = ['RectGrid', 'HexGrid'] #, 'FibonacciGrid']
_valid_mixed_types = ['MixedGrid']
class Grid(BaseGrid):
def __init__(self, grid_spacing, wcs, Npix_x=10000, Npix_y=10000, pixscale=0.2631,
rot_angle=None, pos_offset=None, angle_unit='rad'):
self.grid_spacing = grid_spacing # arcsec
self.im_gs = grid_spacing * (1.0 / pixscale) # pixels
self.Npix_x, self.Npix_y = Npix_x, Npix_y
self.pixscale = pixscale # arcsec
self.wcs = wcs
self.rot_angle = rot_angle # rotation angle, in rad
self.angle_unit = angle_unit
if pos_offset:
self.pos_offset = pos_offset
else:
self.pos_offset = [0., 0.]
# May have to modify grid corners if there is a rotation
if rot_angle:
dx = Npix_x / 2.
dy = Npix_y / 2.
if angle_unit == 'deg':
theta = np.deg2rad(rot_angle)
else:
theta = rot_angle
self.startx = (0.-dx) * np.cos(theta) - (Npix_y-dy) * np.sin(theta) + dx
self.endx = (Npix_x-dx) * np.cos(theta) - (0.-dy) * np.sin(theta) + dx
self.starty = (0.-dx) * np.cos(theta) + (0.-dy) * np.sin(theta) + dx
self.endy = (Npix_x-dx) * np.cos(theta) + (Npix_y-dy) * np.sin(theta) + dx
else:
self.startx, self.endx= 0., Npix_x
self.starty, self.endy= 0., Npix_y
return
def rotate_grid(self, theta, offset=None, angle_unit='rad'):
if angle_unit == 'deg':
theta = np.deg2rad(theta)
elif angle_unit != 'rad':
raise ValueError('`angle_unit` can only be `deg` or `rad`! ' +
'Passed unit of {}'.format(angle_unit))
if not offset: offset = [0., 0.]
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c,-s), (s, c)))
offset_grid = np.array([self.im_ra - offset[0], self.im_dec - offset[1]])
translate = np.empty_like(offset_grid)
translate[0,:] = offset[0]
translate[1,:] = offset[1]
rotated_grid = np.dot(R, offset_grid) + translate
self.im_pos = rotated_grid.T
self.im_ra, self.im_dec = self.im_pos[0,:], self.im_pos[1,:]
return
def cut2buffer(self):
'''
Remove objects outside of tile (and buffer).
We must sample points in the buffer zone in the beginning due to
possible rotations.
'''
b = self.im_gs
in_region = np.where( (self.im_pos[:,0]>b) & (self.im_pos[:,0]<self.Npix_x-b) &
(self.im_pos[:,1]>b) & (self.im_pos[:,1]<self.Npix_y-b) )
self.im_pos = self.im_pos[in_region]
self.im_ra = self.im_pos[:,0]
self.im_dec = self.im_pos[:,1]
# Get all image coordinate pairs
self.pos = self.wcs.wcs_pix2world(self.im_pos, 1)
self.ra = self.pos[:,0]
self.dec = self.pos[:,1]
return
class RectGrid(Grid):
def __init__(self, grid_spacing, wcs, Npix_x=10000, Npix_y=10000, pixscale=0.2631,
rot_angle=None, pos_offset=None, angle_unit='rad'):
super(RectGrid, self).__init__(grid_spacing, wcs, Npix_x=Npix_x, Npix_y=Npix_y,
pixscale=pixscale, rot_angle=rot_angle,
pos_offset=pos_offset, angle_unit=angle_unit)
self._create_grid()
return
def _create_grid(self):
im_gs = self.im_gs
po = self.pos_offset
im_po = po / self.pixscale
self.im_ra = np.arange(self.startx, self.endx, im_gs)
self.im_dec = np.arange(self.starty, self.endy, im_gs)
# Get all image coordinate pairs
self.im_pos = np.array(np.meshgrid(self.im_ra, self.im_dec)).T.reshape(
-1, 2)
self.im_ra = self.im_pos[:,0]
self.im_dec = self.im_pos[:,1]
if self.rot_angle:
self.rotate_grid(self.rot_angle, angle_unit=self.angle_unit,
offset=[(self.Npix_x+im_po[0])/2., (self.Npix_y+im_po[1])/2.])
self.cut2buffer()
return
class HexGrid(Grid):
def __init__(self, grid_spacing, wcs, Npix_x=10000, Npix_y=10000, pixscale=0.2631,
rot_angle=None, pos_offset=None, angle_unit='rad'):
super(HexGrid, self).__init__(grid_spacing, wcs, Npix_x=Npix_x, Npix_y=Npix_y,
pixscale=pixscale, rot_angle=rot_angle,
pos_offset=pos_offset, angle_unit=angle_unit)
self._create_grid()
return
def _create_grid(self):
im_gs = self.im_gs
po = self.pos_offset
im_po = [p / self.pixscale for p in po]
self.im_pos = HexGrid.calc_hex_coords(self.startx, self.starty,
self.endx, self.endy, im_gs)
self.im_ra = self.im_pos[:,0]
self.im_dec = self.im_pos[:,1]
if self.rot_angle:
self.rotate_grid(self.rot_angle, angle_unit=self.angle_unit,
offset=[(self.Npix_x+im_po[0])/2., (self.Npix_y+im_po[1])/2.])
self.cut2buffer()
return
@classmethod
def calc_hex_coords(cls, startx, starty, endx, endy, radius):
# Geoemtric factors of given hexagon
r = radius
p = r * np.tan(np.pi / 6.) # side length / 2
h = 4. * p
dx = 2. * r
dy = 2. * p
row = 1
xs = []
ys = []
while startx < endx:
x = [startx, startx, startx + r, startx + dx, startx + dx, startx + r, startx + r]
xs.append(x)
startx += dx
while starty < endy:
y = [starty + p, starty + 3*p, starty + h, starty + 3*p, starty + p, starty, starty + dy]
ys.append(y)
starty += 2*p
row += 1
polygons = [zip(x, y) for x in xs for y in ys]
hexgrid = cls.polygons2coords(polygons)
# Some hexagonal elements go beyond boundary; cut these out
indx = np.where( (hexgrid[:,0]<endx) & (hexgrid[:,1]<endy) )
return hexgrid[indx]
@classmethod
def polygons2coords(HexGrid, p):
s = np.shape(p)
L = s[0]*s[1]
pp = np.array(p).reshape(L,2)
c = np.vstack({tuple(row) for row in pp})
# Some of the redundant coordinates are offset by ~1e-10 pixels
return np.unique(c.round(decimals=6), axis=0)
def rotate_polygons():
return
class MixedGrid(BaseGrid):
# NOTE: Due to how Balrog processes inputs and position sampling, it can't
# instantiate a MixedGrid with all relevant types simultaneously. Instead,
# we will build the grid after all inj types are declared.
def __init__(self, grid_type, N_inj_types, inj_frac=None):
self.grid_type = grid_type
self.N_inj_types = N_inj_types
if inj_frac is not None:
if isinstance(inj_frac, float):
self.inj_frac = {inj_type : inj_frac}
self.curr_N_types = 1
elif isinstance(inj_frac, dict):
for val in inj_frac.values():
if not isinstance(val, float):
raise TypeError('Each `inj_frac` entry must be a float!')
self.inj_frac = inj_frac
self.curr_N_types = len(inj_frac)
else:
raise TypeError('`inj_frac` can only be passed as a float or a dict!')
self._check_inj_frac()
self.pos = {}
self.indx = {}
self.nobjects = {}
self.assigned_objects = False
return
def _check_inj_frac(self, final=False):
sum = 0
for frac in self.inj_frac.values():
if frac <=0 or frac >=1:
raise ValueError('Each injection fraction must be 0<frac<1.')
sum += frac
if len(self.inj_frac) == self.N_inj_types:
if sum != 1:
raise ValueError('The sum of injection fractions must equal 1 after '
'all types have been set!')
if (final is True) and (len(self.inj_frac) != self.N_inj_types):
raise ValueError('Cannot continue until all injection fractions are set!')
return
def add_injection(self, inj_type, inj_frac):
self.inj_frac[inj_type] = inj_frac
self.curr_N_types += 1
if self.curr_N_types == self.N_inj_types:
self._assign_objects()
return
def build_grid(self, **kwargs):
self.grid = _build_grid(self.grid_type, **kwargs)
# Only assign objects if all injection fractions are set
if self.curr_N_types == self.N_inj_types:
self._assign_objects()
return
def _assign_objects(self):
if self.curr_N_types != self.N_inj_types:
raise ValueError('Cannot assign injection objects to grid until the MixedGrid has '
'all input types set!')
self._check_inj_frac(final=True)
N = len(self.grid.pos)
Ninj = self.N_inj_types
if N==0:
raise ValueError('The constructed grid has zero objects to assign!')
indx = np.arange(N)
icount = 0
for inj_type, inj_frac in self.inj_frac.items():
icount += 1
# Always rounds down
n = int(self.inj_frac[inj_type] * N)
if icount < Ninj:
nobjs = n
else:
# Grab remaining items
nobjs = len(indx)
assert n <= nobjs <= n+Ninj
self.nobjects[inj_type] = nobjs
i = np.random.choice(indx, nobjs, replace=False)
self.indx[inj_type] = i
self.pos[inj_type] = self.grid.pos[i]
indx = np.setdiff1d(indx, i)
assert(np.sum(self.nobjects.values()) == N)
assert(len(indx) == 0)
self.assigned_objects = True
return
# TODO: This Grid hasn't been fully implemented yet
class FibonacciGrid(Grid):
def __init__(self, wcs, N=1000000):
pass
def make_fib_grid(self, N=1000000):
self.golden_angle = np.pi * (3 - np.sqrt(5))
theta = self.golden_angle * np.arange(N)
z = np.linspace(1 - 1.0 / N, 1.0 / N - 1, N)
radius = np.sqrt(1 - z * z)
points = np.zeros((N, 3))
points[:, 0] = radius * np.cos(theta) # rad
points[:, 1] = radius * np.sin(theta) # rad
points[:, 2] = z
return points
def plot_fib_grid(points):
fig = plt.figure()
fig.set_size_inches(10,10)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], zs=points[:,2])
return
def plot_sphere(points):
fig = plt.figure()
fig.set_size_inches(10, 10)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], zs=points[:,2])
return
def rotate_grid(self, theta, x, y, offset=[0., 0.]):
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c,-s), (s, c)))
offset_grid = np.array([x - offset[0], y - offset[1]])
translate = np.empty_like(offset_grid)
translate[0,:] = offset[0]
translate[1,:] = offset[1]
offset_grid = np.dot(R, offset_grid) + translate
return
def _build_grid(grid_type, **kwargs):
if grid_type in GRID_TYPES:
# User-defined grid construction
return GRID_TYPES[grid_type](**kwargs)
else:
raise ValueError('There is not yet an implemented default `BalGrid`!')
# NOTE: In the future, we could implement something like the following:
# # Generic grid construction
# bg = BaseGrid()
# if grid_type not in bg.valid_grid_types:
# raise ValueError('{} is not a valid grid type!'.format(grid_type))
# else:
# raise Exception('{} is a valid grid type but not included in '.format(grid_type) +
# 'grid.GRID_TYPES. Please add this type to allow for automatic '
# 'grid building.')
# return BalGrid(input_type, gsconfig, indx)
GRID_TYPES = {
'RectGrid' : RectGrid,
'HexGrid' : HexGrid
#'FibonacciGrid' : FibonacciGrid
}
| mit |
revoer/keystone-8.0.0 | swift/common/middleware/x_profile/html_viewer.py | 25 | 21039 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import os
import random
import re
import string
import tempfile
from swift import gettext_ as _
from exceptions import PLOTLIBNotInstalled, ODFLIBNotInstalled,\
NotFoundException, MethodNotAllowed, DataLoadFailure, ProfileException
from profile_model import Stats2
PLOTLIB_INSTALLED = True
try:
import matplotlib
# use agg backend for writing to file, not for rendering in a window.
# otherwise some platform will complain "no display name and $DISPLAY
# environment variable"
matplotlib.use('agg')
import matplotlib.pyplot as plt
except ImportError:
PLOTLIB_INSTALLED = False
empty_description = """
The default profile of current process or the profile you requested is
empty. <input type="submit" name="refresh" value="Refresh"/>
"""
profile_tmpl = """
<select name="profile">
<option value="current">current</option>
<option value="all">all</option>
${profile_list}
</select>
"""
sort_tmpl = """
<select name="sort">
<option value="time">time</option>
<option value="cumulative">cumulative</option>
<option value="calls">calls</option>
<option value="pcalls">pcalls</option>
<option value="name">name</option>
<option value="file">file</option>
<option value="module">module</option>
<option value="line">line</option>
<option value="nfl">nfl</option>
<option value="stdname">stdname</option>
</select>
"""
limit_tmpl = """
<select name="limit">
<option value="-1">all</option>
<option value="0.1">10%</option>
<option value="0.2">20%</option>
<option value="0.3">30%</option>
<option value="10">10</option>
<option value="20">20</option>
<option value="30">30</option>
<option value="50">50</option>
<option value="100">100</option>
<option value="200">200</option>
<option value="300">300</option>
<option value="400">400</option>
<option value="500">500</option>
</select>
"""
fulldirs_tmpl = """
<input type="checkbox" name="fulldirs" value="1"
${fulldir_checked}/>
"""
mode_tmpl = """
<select name="mode">
<option value="stats">stats</option>
<option value="callees">callees</option>
<option value="callers">callers</option>
</select>
"""
nfl_filter_tmpl = """
<input type="text" name="nfl_filter" value="${nfl_filter}"
placeholder="filename part" />
"""
formelements_tmpl = """
<div>
<table>
<tr>
<td>
<strong>Profile</strong>
<td>
<strong>Sort</strong>
</td>
<td>
<strong>Limit</strong>
</td>
<td>
<strong>Full Path</strong>
</td>
<td>
<strong>Filter</strong>
</td>
<td>
</td>
<td>
<strong>Plot Metric</strong>
</td>
<td>
<strong>Plot Type</strong>
<td>
</td>
<td>
<strong>Format</strong>
</td>
<td>
<td>
</td>
<td>
</td>
</tr>
<tr>
<td>
${profile}
<td>
${sort}
</td>
<td>
${limit}
</td>
<td>
${fulldirs}
</td>
<td>
${nfl_filter}
</td>
<td>
<input type="submit" name="query" value="query"/>
</td>
<td>
<select name='metric'>
<option value='nc'>call count</option>
<option value='cc'>primitive call count</option>
<option value='tt'>total time</option>
<option value='ct'>cumulative time</option>
</select>
</td>
<td>
<select name='plottype'>
<option value='bar'>bar</option>
<option value='pie'>pie</option>
</select>
<td>
<input type="submit" name="plot" value="plot"/>
</td>
<td>
<select name='format'>
<option value='default'>binary</option>
<option value='json'>json</option>
<option value='csv'>csv</option>
<option value='ods'>ODF.ods</option>
</select>
</td>
<td>
<input type="submit" name="download" value="download"/>
</td>
<td>
<input type="submit" name="clear" value="clear"/>
</td>
</tr>
</table>
</div>
"""
index_tmpl = """
<html>
<head>
<title>profile results</title>
<style>
<!--
tr.normal { background-color: #ffffff }
tr.hover { background-color: #88eeee }
//-->
</style>
</head>
<body>
<form action="${action}" method="POST">
<div class="form-text">
${description}
</div>
<hr />
${formelements}
</form>
<pre>
${profilehtml}
</pre>
</body>
</html>
"""
class HTMLViewer(object):
format_dict = {'default': 'application/octet-stream',
'json': 'application/json',
'csv': 'text/csv',
'ods': 'application/vnd.oasis.opendocument.spreadsheet',
'python': 'text/html'}
def __init__(self, app_path, profile_module, profile_log):
self.app_path = app_path
self.profile_module = profile_module
self.profile_log = profile_log
def _get_param(self, query_dict, key, default=None, multiple=False):
value = query_dict.get(key, default)
if value is None or value == '':
return default
if multiple:
return value
if isinstance(value, list):
return eval(value[0]) if isinstance(default, int) else value[0]
else:
return value
def render(self, url, method, path_entry, query_dict, clear_callback):
plot = self._get_param(query_dict, 'plot', None)
download = self._get_param(query_dict, 'download', None)
clear = self._get_param(query_dict, 'clear', None)
action = plot or download or clear
profile_id = self._get_param(query_dict, 'profile', 'current')
sort = self._get_param(query_dict, 'sort', 'time')
limit = self._get_param(query_dict, 'limit', -1)
fulldirs = self._get_param(query_dict, 'fulldirs', 0)
nfl_filter = self._get_param(query_dict, 'nfl_filter', '').strip()
metric_selected = self._get_param(query_dict, 'metric', 'cc')
plot_type = self._get_param(query_dict, 'plottype', 'bar')
download_format = self._get_param(query_dict, 'format', 'default')
content = ''
# GET /__profile, POST /__profile
if len(path_entry) == 2 and method in ['GET', 'POST']:
log_files = self.profile_log.get_logfiles(profile_id)
if action == 'plot':
content, headers = self.plot(log_files, sort, limit,
nfl_filter, metric_selected,
plot_type)
elif action == 'download':
content, headers = self.download(log_files, sort, limit,
nfl_filter, download_format)
else:
if action == 'clear':
self.profile_log.clear(profile_id)
clear_callback and clear_callback()
content, headers = self.index_page(log_files, sort, limit,
fulldirs, nfl_filter,
profile_id, url)
# GET /__profile__/all
# GET /__profile__/current
# GET /__profile__/profile_id
# GET /__profile__/profile_id/
# GET /__profile__/profile_id/account.py:50(GETorHEAD)
# GET /__profile__/profile_id/swift/proxy/controllers
# /account.py:50(GETorHEAD)
# with QUERY_STRING: ?format=[default|json|csv|ods]
elif len(path_entry) > 2 and method == 'GET':
profile_id = path_entry[2]
log_files = self.profile_log.get_logfiles(profile_id)
pids = self.profile_log.get_all_pids()
# return all profiles in a json format by default.
# GET /__profile__/
if profile_id == '':
content = '{"profile_ids": ["' + '","'.join(pids) + '"]}'
headers = [('content-type', self.format_dict['json'])]
else:
if len(path_entry) > 3 and path_entry[3] != '':
nfl_filter = '/'.join(path_entry[3:])
if path_entry[-1].find(':0') == -1:
nfl_filter = '/' + nfl_filter
content, headers = self.download(log_files, sort, -1,
nfl_filter, download_format)
headers.append(('Access-Control-Allow-Origin', '*'))
else:
raise MethodNotAllowed(_('method %s is not allowed.') % method)
return content, headers
def index_page(self, log_files=None, sort='time', limit=-1,
fulldirs=0, nfl_filter='', profile_id='current', url='#'):
headers = [('content-type', 'text/html')]
if len(log_files) == 0:
return empty_description, headers
try:
stats = Stats2(*log_files)
except (IOError, ValueError):
raise DataLoadFailure(_('Can not load profile data from %s.')
% log_files)
if not fulldirs:
stats.strip_dirs()
stats.sort_stats(sort)
nfl_filter_esc =\
nfl_filter.replace('(', '\(').replace(')', '\)')
amount = [nfl_filter_esc, limit] if nfl_filter_esc else [limit]
profile_html = self.generate_stats_html(stats, self.app_path,
profile_id, *amount)
description = "Profiling information is generated by using\
'%s' profiler." % self.profile_module
sort_repl = '<option value="%s">' % sort
sort_selected = '<option value="%s" selected>' % sort
sort = sort_tmpl.replace(sort_repl, sort_selected)
plist = ''.join(['<option value="%s">%s</option>' % (p, p)
for p in self.profile_log.get_all_pids()])
profile_element = string.Template(profile_tmpl).substitute(
{'profile_list': plist})
profile_repl = '<option value="%s">' % profile_id
profile_selected = '<option value="%s" selected>' % profile_id
profile_element = profile_element.replace(profile_repl,
profile_selected)
limit_repl = '<option value="%s">' % limit
limit_selected = '<option value="%s" selected>' % limit
limit = limit_tmpl.replace(limit_repl, limit_selected)
fulldirs_checked = 'checked' if fulldirs else ''
fulldirs_element = string.Template(fulldirs_tmpl).substitute(
{'fulldir_checked': fulldirs_checked})
nfl_filter_element = string.Template(nfl_filter_tmpl).\
substitute({'nfl_filter': nfl_filter})
form_elements = string.Template(formelements_tmpl).substitute(
{'description': description,
'action': url,
'profile': profile_element,
'sort': sort,
'limit': limit,
'fulldirs': fulldirs_element,
'nfl_filter': nfl_filter_element,
}
)
content = string.Template(index_tmpl).substitute(
{'formelements': form_elements,
'action': url,
'description': description,
'profilehtml': profile_html,
})
return content, headers
def download(self, log_files, sort='time', limit=-1, nfl_filter='',
output_format='default'):
if len(log_files) == 0:
raise NotFoundException(_('no log file found'))
try:
nfl_esc = nfl_filter.replace('(', '\(').replace(')', '\)')
# remove the slash that is intentionally added in the URL
# to avoid failure of filtering stats data.
if nfl_esc.startswith('/'):
nfl_esc = nfl_esc[1:]
stats = Stats2(*log_files)
stats.sort_stats(sort)
if output_format == 'python':
data = self.format_source_code(nfl_filter)
elif output_format == 'json':
data = stats.to_json(nfl_esc, limit)
elif output_format == 'csv':
data = stats.to_csv(nfl_esc, limit)
elif output_format == 'ods':
data = stats.to_ods(nfl_esc, limit)
else:
data = stats.print_stats()
return data, [('content-type', self.format_dict[output_format])]
except ODFLIBNotInstalled as ex:
raise ex
except Exception as ex:
raise ProfileException(_('Data download error: %s') % ex)
def plot(self, log_files, sort='time', limit=10, nfl_filter='',
metric_selected='cc', plot_type='bar'):
if not PLOTLIB_INSTALLED:
raise PLOTLIBNotInstalled(_('python-matplotlib not installed.'))
if len(log_files) == 0:
raise NotFoundException(_('no log file found'))
try:
stats = Stats2(*log_files)
stats.sort_stats(sort)
stats_dict = stats.stats
__, func_list = stats.get_print_list([nfl_filter, limit])
nfls = []
performance = []
names = {'nc': 'Total Call Count', 'cc': 'Primitive Call Count',
'tt': 'Total Time', 'ct': 'Cumulative Time'}
for func in func_list:
cc, nc, tt, ct, __ = stats_dict[func]
metric = {'cc': cc, 'nc': nc, 'tt': tt, 'ct': ct}
nfls.append(func[2])
performance.append(metric[metric_selected])
y_pos = range(len(nfls))
error = [random.random() for __ in y_pos]
plt.clf()
if plot_type == 'pie':
plt.pie(x=performance, explode=None, labels=nfls,
autopct='%1.1f%%')
else:
plt.barh(y_pos, performance, xerr=error, align='center',
alpha=0.4)
plt.yticks(y_pos, nfls)
plt.xlabel(names[metric_selected])
plt.title('Profile Statistics (by %s)' % names[metric_selected])
# plt.gcf().tight_layout(pad=1.2)
with tempfile.TemporaryFile() as profile_img:
plt.savefig(profile_img, format='png', dpi=300)
profile_img.seek(0)
data = profile_img.read()
return data, [('content-type', 'image/jpg')]
except Exception as ex:
raise ProfileException(_('plotting results failed due to %s') % ex)
def format_source_code(self, nfl):
nfls = re.split('[:()]', nfl)
file_path = nfls[0]
try:
lineno = int(nfls[1])
except (TypeError, ValueError, IndexError):
lineno = 0
# for security reason, this need to be fixed.
if not file_path.endswith('.py'):
return _('The file type are forbidden to access!')
try:
data = []
i = 0
with open(file_path) as f:
lines = f.readlines()
max_width = str(len(str(len(lines))))
fmt = '<span id="L%d" rel="#L%d">%' + max_width\
+ 'd|<code>%s</code></span>'
for line in lines:
l = cgi.escape(line, quote=None)
i = i + 1
if i == lineno:
fmt2 = '<span id="L%d" style="background-color: \
rgb(127,255,127)">%' + max_width +\
'd|<code>%s</code></span>'
data.append(fmt2 % (i, i, l))
else:
data.append(fmt % (i, i, i, l))
data = ''.join(data)
except Exception:
return _('Can not access the file %s.') % file_path
return '<pre>%s</pre>' % data
def generate_stats_html(self, stats, app_path, profile_id, *selection):
html = []
for filename in stats.files:
html.append('<p>%s</p>' % filename)
try:
for func in stats.top_level:
html.append('<p>%s</p>' % func[2])
html.append('%s function calls' % stats.total_calls)
if stats.total_calls != stats.prim_calls:
html.append("(%d primitive calls)" % stats.prim_calls)
html.append('in %.3f seconds' % stats.total_tt)
if stats.fcn_list:
stat_list = stats.fcn_list[:]
msg = "<p>Ordered by: %s</p>" % stats.sort_type
else:
stat_list = stats.stats.keys()
msg = '<p>Random listing order was used</p>'
for sel in selection:
stat_list, msg = stats.eval_print_amount(sel, stat_list, msg)
html.append(msg)
html.append('<table style="border-width: 1px">')
if stat_list:
html.append('<tr><th>#</th><th>Call Count</th>\
<th>Total Time</th><th>Time/Call</th>\
<th>Cumulative Time</th>\
<th>Cumulative Time/Call</th>\
<th>Filename:Lineno(Function)</th>\
<th>JSON</th>\
</tr>')
count = 0
for func in stat_list:
count = count + 1
html.append('<tr onMouseOver="this.className=\'hover\'"\
onMouseOut="this.className=\'normal\'">\
<td>%d)</td>' % count)
cc, nc, tt, ct, __ = stats.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
html.append('<td>%s</td>' % c)
html.append('<td>%f</td>' % tt)
if nc == 0:
html.append('<td>-</td>')
else:
html.append('<td>%f</td>' % (float(tt) / nc))
html.append('<td>%f</td>' % ct)
if cc == 0:
html.append('<td>-</td>')
else:
html.append('<td>%f</td>' % (float(ct) / cc))
nfls = cgi.escape(stats.func_std_string(func))
if nfls.split(':')[0] not in ['', 'profile'] and\
os.path.isfile(nfls.split(':')[0]):
html.append('<td><a href="%s/%s%s?format=python#L%d">\
%s</a></td>' % (app_path, profile_id,
nfls, func[1], nfls))
else:
html.append('<td>%s</td>' % nfls)
if not nfls.startswith('/'):
nfls = '/' + nfls
html.append('<td><a href="%s/%s%s?format=json">\
--></a></td></tr>' % (app_path,
profile_id, nfls))
except Exception as ex:
html.append("Exception:" % ex.message)
return ''.join(html)
| apache-2.0 |
guillermodeandajauregui/enrichmentator | lib/sif_to_net_islands.py | 1 | 1688 | import networkx as nx
import pandas as pd
import os
import sys
# RUN # python sif_to_net_islands.py NETWORK.sif
########## SYSTEMS ARGUMENTS
NW = sys.argv[1]
tamano= sys.argv[2]
### GRAPH LOADING FUNCTION
def graphfromsif(sif):
s = pd.read_table(sif,
header =None,
delim_whitespace=True)
s = s[[0,2,1]]
s = s.values.tolist()
def formatfunction(lista):
return "{} {} {}".format(lista[0], lista[1], lista[2])
reformat = []
for elem in s:
reformat.append(formatfunction(elem))
g = nx.parse_edgelist(reformat,
nodetype = str,
data=(('weight',float),)
)
return(g)
#########
#LOAD GRAPH
#########
G = graphfromsif(NW)
NOMBRE = NW.split(".")
#########
#BREAK INTO ISLANDS
#########
if not nx.is_connected(G):
L = sorted(list(nx.connected_components(G)), key = len, reverse=True)
a = len(L)
b = len(L[0])
c = len(L[a-1])
print("There are",a,"components!")
print("Largest has",b,"nodes.")
print("Smallest has",c,"nodes.")
comp_num = 1
if not os.path.isdir(NOMBRE[0]+"/"+"ISLANDS/"):
os.makedirs(NOMBRE[0]+"/"+"ISLANDS/")
#lista_graphs = sorted(list(nx.connected_component_subgraphs(G)))
for g in L:
if len(g)>=int(tamano):
print(len(g), type(g))
sub = G.subgraph(g)
print(sorted(nx.nodes(sub))[1], len(sub))
comp_name = NOMBRE[0]+"/"+"ISLANDS/"+NOMBRE[0]+"_I"+str(comp_num).zfill(3)+".net"
nx.write_pajek(G.subgraph(g), path=comp_name, encoding = 'UTF-8')
comp_num = comp_num + 1
| gpl-2.0 |
TaxIPP-Life/Til-BaseModel | til_base_model/targets/dependance.py | 1 | 2884 | # -*- coding:utf-8 -*-
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
import os
import pandas
import tables
from til_base_model.tests.base import line_prepender, til_base_model_path
try:
from liam2.importer import array_to_disk_array
except ImportError:
from src.importer import array_to_disk_array
drees_excel_file_path = os.path.join(til_base_model_path, 'param', 'demo', 'drees', 'dss43_horizon_2060.xls')
def build_prevalence_2010():
df = pandas.read_excel(drees_excel_file_path, sheetname ='Tab2', header = 3, parse_cols = 'B:O', skip_footer = 4)
for column in df.columns:
if column.startswith('Unnamed') or column.startswith('Ensemble'):
del df[column]
df.index = [index.year for index in df.index]
df.columns = range(1, 7)
csv_file_path = os.path.join(til_base_model_path, 'param', 'demo', 'dependance_prevalence_2010.csv')
data = pandas.DataFrame(df.xs(2010)).T
data = (data / 200).apply(numpy.round) # TODO fix this
data.astype(int).to_csv(csv_file_path, index = False)
line_prepender(csv_file_path, 'age_category')
def build_prevalence_all_years():
df = pandas.read_excel(drees_excel_file_path, sheetname ='Tab6A', header = 3, parse_cols = 'B:E', skip_footer = 3)
# "Au 1er janvier"
df.columns = ['year', 'dependants_optimiste', 'DEPENDANTS', 'dependants_pessimiste']
df.set_index('year', inplace = True)
data = df.reindex(index = range(2010, 2061)).interpolate(method='polynomial', order = 6)
# On passe en année pleine
data.index = [int(str(year - 1) + "01") for year in data.index]
data.index.name = "PERIOD"
simulation_file_path = os.path.join(til_base_model_path, 'Patrimoine_next_metro_200.h5')
h5file = tables.open_file(simulation_file_path, mode="a")
array_to_disk_array(h5file, '/globals', 'dependance_prevalence_all_years', data.DEPENDANTS.values)
h5file.close()
csv_file_path = os.path.join(til_base_model_path, 'param', 'demo', 'dependance_prevalence_all_years.csv')
data = data.reset_index()[['PERIOD', 'DEPENDANTS']]
data.astype(int).to_csv(csv_file_path, index = False)
| gpl-3.0 |
Aasmi/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
SciTools/nc-time-axis | nc_time_axis/__init__.py | 1 | 13082 | """
Support for cftime axis in matplotlib.
"""
from collections import namedtuple
import cftime
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
import numpy as np
from ._version import version as __version__ # noqa: F401
# Lower and upper are in number of days.
FormatOption = namedtuple("FormatOption", ["lower", "upper", "format_string"])
class CalendarDateTime:
"""
Container for :class:`cftime.datetime` object and calendar.
"""
def __init__(self, datetime, calendar):
self.datetime = datetime
self.calendar = calendar
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.datetime == other.datetime
and self.calendar == other.calendar
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return (
f"<{type(self).__name__}: datetime={self.datetime}, "
"calendar={self.calendar}>"
)
class NetCDFTimeDateFormatter(mticker.Formatter):
"""
Formatter for cftime.datetime data.
"""
# Some magic numbers. These seem to work pretty well.
format_options = [
FormatOption(0.0, 0.2, "%H:%M:%S"),
FormatOption(0.2, 0.8, "%H:%M"),
FormatOption(0.8, 15, "%Y-%m-%d %H:%M"),
FormatOption(15, 90, "%Y-%m-%d"),
FormatOption(90, 900, "%Y-%m"),
FormatOption(900, 6000000, "%Y"),
]
def __init__(self, locator, calendar, time_units):
#: The locator associated with this formatter. This is used to get hold
#: of the scaling information.
self.locator = locator
self.calendar = calendar
self.time_units = time_units
def pick_format(self, ndays):
"""
Returns a format string for an interval of the given number of days.
"""
for option in self.format_options:
if option.lower < ndays <= option.upper:
return option.format_string
else:
emsg = f"No formatter found for an interval of {ndays} days."
raise ValueError(emsg)
def __call__(self, x, pos=0):
format_string = self.pick_format(ndays=self.locator.ndays)
dt = cftime.num2date(x, self.time_units, calendar=self.calendar)
return dt.strftime(format_string)
class NetCDFTimeDateLocator(mticker.Locator):
"""
Determines tick locations when plotting cftime.datetime data.
"""
real_world_calendars = (
"gregorian",
"julian",
"proleptic_gregorian",
"standard",
)
def __init__(self, max_n_ticks, calendar, date_unit, min_n_ticks=3):
# The date unit must be in the form of days since ...
self.max_n_ticks = max_n_ticks
self.min_n_ticks = min_n_ticks
self._max_n_locator = mticker.MaxNLocator(max_n_ticks, integer=True)
self._max_n_locator_days = mticker.MaxNLocator(
max_n_ticks, integer=True, steps=[1, 2, 4, 7, 10]
)
self.calendar = calendar
self.date_unit = date_unit
if not self.date_unit.lower().startswith("days since"):
emsg = (
"The date unit must be days since for a NetCDF "
"time locator."
)
raise ValueError(emsg)
self._cached_resolution = {}
def compute_resolution(self, num1, num2, date1, date2):
"""
Returns the resolution of the dates (hourly, minutely, yearly), and
an **approximate** number of those units.
"""
num_days = float(np.abs(num1 - num2))
resolution = "SECONDLY"
n = mdates.SEC_PER_DAY
if num_days * mdates.MINUTES_PER_DAY > self.max_n_ticks:
resolution = "MINUTELY"
n = int(num_days / mdates.MINUTES_PER_DAY)
if num_days * mdates.HOURS_PER_DAY > self.max_n_ticks:
resolution = "HOURLY"
n = int(num_days / mdates.HOURS_PER_DAY)
if num_days > self.max_n_ticks:
resolution = "DAILY"
n = int(num_days)
if num_days > 30 * self.max_n_ticks:
resolution = "MONTHLY"
n = num_days // 30
if num_days > 365 * self.max_n_ticks:
resolution = "YEARLY"
n = abs(date1.year - date2.year)
return resolution, n
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(
vmin, vmax, expander=1e-7, tiny=1e-13
)
self.ndays = float(abs(vmax - vmin))
lower = cftime.num2date(vmin, self.date_unit, calendar=self.calendar)
upper = cftime.num2date(vmax, self.date_unit, calendar=self.calendar)
resolution, n = self.compute_resolution(vmin, vmax, lower, upper)
def has_year_zero(year):
result = dict()
if self.calendar in self.real_world_calendars and not bool(year):
result = dict(has_year_zero=True)
return result
if resolution == "YEARLY":
# TODO START AT THE BEGINNING OF A DECADE/CENTURY/MILLENIUM as
# appropriate.
years = self._max_n_locator.tick_values(lower.year, upper.year)
ticks = [
cftime.datetime(
int(year),
1,
1,
calendar=self.calendar,
**has_year_zero(year),
)
for year in years
]
elif resolution == "MONTHLY":
# TODO START AT THE BEGINNING OF A DECADE/CENTURY/MILLENIUM as
# appropriate.
months_offset = self._max_n_locator.tick_values(0, n)
ticks = []
for offset in months_offset:
year = lower.year + np.floor((lower.month + offset) / 12)
month = ((lower.month + offset) % 12) + 1
dt = cftime.datetime(
int(year),
int(month),
1,
calendar=self.calendar,
**has_year_zero(year),
)
ticks.append(dt)
elif resolution == "DAILY":
# TODO: It would be great if this favoured multiples of 7.
days = self._max_n_locator_days.tick_values(vmin, vmax)
ticks = [
cftime.num2date(dt, self.date_unit, calendar=self.calendar)
for dt in days
]
elif resolution == "HOURLY":
hour_unit = "hours since 2000-01-01"
in_hours = cftime.date2num(
[lower, upper], hour_unit, calendar=self.calendar
)
hours = self._max_n_locator.tick_values(in_hours[0], in_hours[1])
ticks = [
cftime.num2date(dt, hour_unit, calendar=self.calendar)
for dt in hours
]
elif resolution == "MINUTELY":
minute_unit = "minutes since 2000-01-01"
in_minutes = cftime.date2num(
[lower, upper], minute_unit, calendar=self.calendar
)
minutes = self._max_n_locator.tick_values(
in_minutes[0], in_minutes[1]
)
ticks = [
cftime.num2date(dt, minute_unit, calendar=self.calendar)
for dt in minutes
]
elif resolution == "SECONDLY":
second_unit = "seconds since 2000-01-01"
in_seconds = cftime.date2num(
[lower, upper], second_unit, calendar=self.calendar
)
seconds = self._max_n_locator.tick_values(
in_seconds[0], in_seconds[1]
)
ticks = [
cftime.num2date(dt, second_unit, calendar=self.calendar)
for dt in seconds
]
else:
emsg = f"Resolution {resolution} not implemented yet."
raise ValueError(emsg)
# Some calenders do not allow a year 0.
# Remove ticks to avoid raising an error.
if self.calendar in [
"proleptic_gregorian",
"gregorian",
"julian",
"standard",
]:
ticks = [t for t in ticks if t.year != 0]
return cftime.date2num(ticks, self.date_unit, calendar=self.calendar)
class NetCDFTimeConverter(mdates.DateConverter):
"""
Converter for cftime.datetime data.
"""
standard_unit = "days since 2000-01-01"
@staticmethod
def axisinfo(unit, axis):
"""
Returns the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
calendar, date_unit, date_type = unit
majloc = NetCDFTimeDateLocator(
4, calendar=calendar, date_unit=date_unit
)
majfmt = NetCDFTimeDateFormatter(
majloc, calendar=calendar, time_units=date_unit
)
if date_type is CalendarDateTime:
datemin = CalendarDateTime(
cftime.datetime(2000, 1, 1), calendar=calendar
)
datemax = CalendarDateTime(
cftime.datetime(2010, 1, 1), calendar=calendar
)
else:
datemin = date_type(2000, 1, 1)
datemax = date_type(2010, 1, 1)
return munits.AxisInfo(
majloc=majloc,
majfmt=majfmt,
label="",
default_limits=(datemin, datemax),
)
@classmethod
def default_units(cls, sample_point, axis):
"""
Computes some units for the given data point.
"""
if hasattr(sample_point, "__iter__"):
# Deal with nD `sample_point` arrays.
if isinstance(sample_point, np.ndarray):
sample_point = sample_point.reshape(-1)
calendars = np.array([point.calendar for point in sample_point])
if np.all(calendars == calendars[0]):
calendar = calendars[0]
else:
raise ValueError("Calendar units are not all equal.")
date_type = type(sample_point[0])
else:
# Deal with a single `sample_point` value.
if not hasattr(sample_point, "calendar"):
msg = (
"Expecting cftimes with an extra " '"calendar" attribute.'
)
raise ValueError(msg)
else:
calendar = sample_point.calendar
date_type = type(sample_point)
return calendar, cls.standard_unit, date_type
@classmethod
def convert(cls, value, unit, axis):
"""
Converts value, if it is not already a number or sequence of numbers,
with :func:`cftime.date2num`.
"""
shape = None
if isinstance(value, np.ndarray):
# Don't do anything with numeric types.
if value.dtype != object:
return value
shape = value.shape
value = value.reshape(-1)
first_value = value[0]
else:
# Don't do anything with numeric types.
if munits.ConversionInterface.is_numlike(value):
return value
first_value = value
if not isinstance(first_value, (CalendarDateTime, cftime.datetime)):
raise ValueError(
"The values must be numbers or instances of "
'"nc_time_axis.CalendarDateTime" or '
'"cftime.datetime".'
)
if isinstance(first_value, CalendarDateTime):
if not isinstance(first_value.datetime, cftime.datetime):
raise ValueError(
"The datetime attribute of the "
"CalendarDateTime object must be of type "
"`cftime.datetime`."
)
if isinstance(first_value, CalendarDateTime):
if isinstance(value, np.ndarray):
value = [v.datetime for v in value]
else:
value = value.datetime
result = cftime.date2num(
value, cls.standard_unit, calendar=first_value.calendar
)
if shape is not None:
result = result.reshape(shape)
return result
# Automatically register NetCDFTimeConverter with matplotlib.unit's converter
# dictionary.
if CalendarDateTime not in munits.registry:
munits.registry[CalendarDateTime] = NetCDFTimeConverter()
CFTIME_TYPES = [
cftime.DatetimeNoLeap,
cftime.DatetimeAllLeap,
cftime.DatetimeProlepticGregorian,
cftime.DatetimeGregorian,
cftime.Datetime360Day,
cftime.DatetimeJulian,
]
for date_type in CFTIME_TYPES:
if date_type not in munits.registry:
munits.registry[date_type] = NetCDFTimeConverter()
| bsd-3-clause |
sniemi/SamPy | resolve/misc/findSlitsDirectImage.py | 1 | 30076 | '''
Fits slit positions to a direct image to find sky positions and a rotation angle.
Fits all three slits separately to recover their position on the sky.
All Inputs should be given in a configuration file, only command line argument
needed is the name of the configuration file.
:requires: SamPy
:requires: PyFITS
:requires: NumPy
:requires: matplotlib
:requires: SciPy
:author: Sami-Matias Niemi
:contact: [email protected]
:version: 0.2
'''
import matplotlib
import sys, os
import ConfigParser
from optparse import OptionParser
import pyfits as PF
import pylab as P
import numpy as np
import matplotlib.patches as patches
from matplotlib import cm
import scipy
import scipy.ndimage.interpolation as interpolation
import SamPy.smnIO.write as write
import SamPy.smnIO.read
import SamPy.image.manipulation as m
class FindSlitPositionsInDirectImage():
'''
This class can be used to find slit positions in a direct image.
It fits the slits separately to recover their sky positions.
'''
def __init__(self, configfile, debug, section='DEFAULT'):
'''
Constructor
'''
self.configfile = configfile
self.section = section
self.debug = debug
self._readConfigs()
self._readFITSfiles()
self._processConfigs()
def _readConfigs(self):
'''
Reads the config file information using configParser.
'''
self.config = ConfigParser.RawConfigParser()
self.config.readfp(open(self.configfile))
self.slitfile = self.config.get(self.section, 'slitimage')
self.dirfile = self.config.get(self.section, 'directimage')
def _readFITSfiles(self):
'''
Reads in the slit and direct image FITS files.
'''
#load images
fh = PF.open(self.dirfile, ignore_missing_end=True)
self.dirImageHDR = fh[0].header
img = fh[0].data
fh.close()
if img.shape[0] == 1:
img = img[0]
self.directImage = img
fh = PF.open(self.slitfile, ignore_missing_end=True)
self.slitImageHDR = fh[0].header
slitimage = fh[0].data
fh.close()
if slitimage.shape[0] == 1:
slitimage = slitimage[0]
self.slitImage = slitimage
def _processConfigs(self):
'''
Process configuration information and produce a dictionary
describing slits.
'''
ltolerance = 10
xcoords = [int(a) for a in self.config.get(self.section, 'xcoordinates').strip().split(',')]
ycoords = [int(a) for a in self.config.get(self.section, 'ycoordinates').strip().split(',')]
widths = [float(a) for a in self.config.get(self.section, 'widths').strip().split(',')]
heights = [float(a) for a in self.config.get(self.section, 'heights').strip().split(',')]
thrs = [float(a) for a in self.config.get(self.section, 'throughputs').strip().split(',')]
offseta = self.config.getfloat(self.section, 'offsetalong')
offsetb = self.config.getfloat(self.section, 'offsetbetween')
platescale = self.config.getfloat(self.section, 'platescale')
names = list(self.config.get(self.section, 'names').strip().split(','))
names = [name.strip() for name in names]
out = {}
for n, x, y, w, h, t in zip(names, xcoords, ycoords, widths, heights, thrs):
n = n.strip()
wd = w / platescale
hd = h / platescale
xmin = int(x)
xmax = int(np.floor(xmin + wd))
xmid = int(np.round((2*xmin+wd) / 2.))
ymin = int(y)
ymax = int(np.floor(ymin + hd))
ymid = int(np.round((2*ymin+hd) / 2.))
#slit image values
vals = self.slitImage[ymin:ymax + 1, xmin:xmax + 1].copy()
vals2 = self.slitImage[ymin-ltolerance:ymax + ltolerance + 1, \
xmin-ltolerance:xmax + ltolerance + 1].copy()
#make the dictionary
out[n] = {'xmid': xmid, 'ymid': ymid,
'skywidth': w, 'skyheight': h,
'wd' : wd, 'hd' : hd,
'width': xmax-xmin, 'height': ymax-ymin,
'xmin': xmin, 'xmax': xmax,
'ymin': ymin, 'ymax': ymax,
'xy': (xmin, ymin),
'shape': vals.shape,
'throughput': 1. / t,
'values': vals,
'valuesLarge' : vals2,
'tolerance' : ltolerance,
'pixels': len(vals.ravel())}
self.slits = out
sky = {}
sky['offseta'] = offseta
sky['offsetb'] = offsetb
sky['platescale'] = platescale
self.sky = sky
#fitting related
fitting = {}
fitting['xrange'] = self.config.getint(self.section, 'xrange')
fitting['xstep'] = self.config.getint(self.section, 'xstep')
fitting['yrange'] = self.config.getint(self.section, 'yrange')
fitting['ystep'] = self.config.getint(self.section, 'ystep')
try:
fitting['rotation'] = self.config.getfloat(self.section, 'rotation')
self.rotation = True
except:
self.rotation = False
try:
self.normalize = self.config.getboolean(self.section, 'normalize')
except:
self.normalize = False
fitting['rotstep'] = self.config.getfloat(self.section, 'rotationstep')
self.fitting = fitting
if self.debug:
print '\nslits:'
print self.slits
print '\nsky:'
print self.sky
print '\nfitting:'
print self.fitting
def generateSlitImages(self, output='slits', type='.pdf'):
'''
Generates diagnostic plots from slit image.
'''
rotText = 40
#generate a separate image of the slit data of each slit image.
for slit in self.slits:
s = self.slits[slit]
fig = P.figure()
ax = fig.add_subplot(111)
#take log10 from the slit data
tmp = s['values'] * s['throughput']
tmp[tmp <= 0.0] = 1
tmp = np.log10(tmp)
ax.imshow(tmp,
origin='lower',
interpolation=None)
#rotate x axis labels
for tl in ax.get_xticklabels():
tl.set_rotation(rotText)
P.savefig(output + slit + type)
P.close()
#make a single slit image
fig = P.figure()
for i, slit in enumerate(self.slits):
s = self.slits[slit]
ax = fig.add_subplot(1, len(self.slits), i + 1)
#take log10 from the slit data
tmp = s['values'] * s['throughput']
tmp[tmp <= 0.0] = 1
tmp = np.log10(tmp)
#take log10 from the slit data
ax.imshow(tmp,
origin='lower', interpolation=None)
#rotate x axis labels
for tl in ax.get_xticklabels():
tl.set_rotation(rotText)
#annotate
ax.annotate(slit, xy=(0.5, 1.05),
xycoords='axes fraction', ha='center', va='center')
P.savefig(output + 'All' + type)
P.close()
def writeDS9RegionFile(self, output='slits.reg'):
'''
Writes a DS9 region file for all the slits.
Draws a rectangle around each slit.
'''
fh = open(output, 'w')
for slit in self.slits:
s = self.slits[slit]
#DS0 box format is x, y, width, height, but x and y are the centre point
string = 'box %i %i %i %i 0\n' % (s['xmid'],
s['ymid'],
s['width'],
s['height'])
fh.write(string)
fh.close()
def approxSkyPosition(self):
'''
Generates an approximated sky position for slits.
:note: this works only if the slits have the same width
'''
yoff = int(np.round(self.sky['offseta'] / self.sky['platescale']))
xoff = int(np.round(self.sky['offsetb'] / self.sky['platescale']))
for slit in self.slits:
if slit == 'up':
self.slits[slit]['ymidSky'] = self.slits['mid']['ymid'] + yoff
self.slits[slit]['yminSky'] = self.slits[slit]['ymidSky'] - self.slits[slit]['height']/2
self.slits[slit]['ymaxSky'] = self.slits[slit]['yminSky'] + self.slits[slit]['height']
self.slits[slit]['xmidSky'] = self.slits['mid']['xmid'] - xoff
self.slits[slit]['xminSky'] = self.slits[slit]['xmidSky'] - self.slits[slit]['width']/2
self.slits[slit]['xmaxSky'] = self.slits[slit]['xminSky'] + self.slits[slit]['width']
elif slit == 'low':
self.slits[slit]['ymidSky'] = self.slits['mid']['ymid'] - yoff
self.slits[slit]['yminSky'] = self.slits[slit]['ymidSky'] - self.slits[slit]['height']/2
self.slits[slit]['ymaxSky'] = self.slits[slit]['yminSky'] + self.slits[slit]['height']
self.slits[slit]['xmidSky'] = self.slits['mid']['xmid'] + xoff
self.slits[slit]['xminSky'] = self.slits[slit]['xmidSky'] - self.slits[slit]['width']/2
self.slits[slit]['xmaxSky'] = self.slits[slit]['xminSky'] + self.slits[slit]['width']
else:
self.slits[slit]['ymidSky'] = self.slits['mid']['ymid']
self.slits[slit]['yminSky'] = self.slits['mid']['ymin']
self.slits[slit]['ymaxSky'] = self.slits['mid']['ymax']
self.slits[slit]['xmidSky'] = self.slits['mid']['xmid']
self.slits[slit]['xminSky'] = self.slits[slit]['xmidSky'] - self.slits[slit]['width']/2
self.slits[slit]['xmaxSky'] = self.slits[slit]['xminSky'] + self.slits[slit]['width']
self.slits[slit]['xySky'] = (self.slits[slit]['xminSky'], self.slits[slit]['yminSky'])
fh = open('skyApproximated.reg', 'w')
for s in self.slits.values():
#DS0 box format is x, y, width, height, but x and y are the centre point
string = 'box %i %i %i %i 0\n' % (s['xmidSky'],
s['ymidSky'],
s['width'],
s['height'])
fh.write(string)
fh.close()
def fitSlitsToDirectImage(self, normalize=False):
'''
Fits slits to a direct image to recover their position an orientation.
By default the counts are not normalized to a peak count, but this can
be controlled using the optional keyword normalize.
:note: this is a very slow algorithm because of the insanely many nested
for loops...
:rtype: dictionary
'''
#generates a model array from the slit values, takes into account potential
#throughput of a slit
for slit in self.slits:
s = self.slits[slit]
model = s['values'].ravel() * s['throughput']
#mask out negative values
msk = model > 0.0
model = model[msk]
if self.normalize:
model /= np.max(model)
self.slits[slit]['model'] = model
self.slits[slit]['mask'] = msk
#generate rotations
if self.rotation:
rotations = np.arange(-self.fitting['rotation'], self.fitting['rotation'], self.fitting['rotstep'])
rotations[(rotations < 1e-8) & (rotations > -1e-8)] = 0.0
#make a copy of the direct image
origimage = self.directImage.copy()
else:
rotations = [0, ]
out = {}
chmin = {}
cm = {}
minpos = {}
for slit in self.slits:
chmin[slit] = -9.99
cm[slit] = 1e20
out[slit] = []
minpos[slit] = -1e10
#loop over a range of rotations, x and y positions around the nominal position and record x, y and chisquare
for r in rotations:
if self.rotation:
if r != 0.0:
d = interpolation.rotate(origimage, r, reshape=False)
else:
d = origimage.copy()
else:
d = self.directImage.copy()
for x in range(-self.fitting['xrange'], self.fitting['xrange'], self.fitting['xstep']):
for y in range(-self.fitting['yrange'], self.fitting['yrange'], self.fitting['ystep']):
#all slits
for slit in self.slits:
s = self.slits[slit]
#direct image data
dirdat = d[s['yminSky'] + y:s['ymaxSky'] + y + 1,
s['xminSky'] + x:s['xmaxSky'] + x + 1]
dirdata = dirdat.ravel()
#remove the masked pixels
dirdata = dirdata[s['mask']]
if self.normalize:
dirdata /= np.max(dirdata)
chisq = scipy.stats.chisquare(dirdata, self.fitting['model'])
chsiq = chisq[0]
tmp = [r, x, y, chisq, chisq / s['pixels'], slit]
out[slit].append(tmp)
#save the dirdata of the minimum chisqr
if chisq < cm[slit]:
chmin[slit] = dirdat
cm[slit] = chisq
minpos[slit] = tmp
if self.debug:
print r, x, y, chisq / s['pixels'], slit
#results dictionary
r = {}
r['outputs'] = out
r['chiMinData'] = chmin
r['minimaPosition'] = minpos
self.result = r
if self.debug:
print '\nMinima positions:'
print self.result['minimaPosition']
def fitSlitsToDirectImage2(self):
'''
Fits slits to a direct image to recover their position an orientation.
By default the counts are not normalized to a peak count, but this can
be controlled using the optional keyword normalize.
:note: this is a very slow algorithm because of the insanely many nested
for loops...
:rtype: dictionary
'''
#generates a model array from the slit values, takes into account potential
#throughput of a slit
for slit in self.slits:
s = self.slits[slit]
model = s['values'].ravel() * s['throughput']
#mask out negative values
msk = model > 0.0
model = model[msk]
mean = np.mean(model)
sig = np.sqrt(np.sum((model - mean)**2))
diff = model - mean
self.slits[slit]['model'] = model
self.slits[slit]['mask'] = msk
self.slits[slit]['sig'] = sig
self.slits[slit]['diff'] = diff
#generate rotations
if self.rotation:
rotations = np.arange(-self.fitting['rotation'], self.fitting['rotation'], self.fitting['rotstep'])
rotations[(rotations < 1e-8) & (rotations > -1e-8)] = 0.0
#make a copy of the direct image
origimage = self.directImage.copy()
else:
rotations = [0, ]
out = {}
chmin = {}
cm = {}
minpos = {}
for slit in self.slits:
chmin[slit] = -9.99
cm[slit] = 1e20
out[slit] = []
minpos[slit] = -1e10
#loop over a range of rotations, x and y positions around the nominal position and record x, y and chisquare
for r in rotations:
if self.rotation:
if r != 0.0:
d = interpolation.rotate(origimage, r, reshape=False)
else:
d = origimage.copy()
else:
d = self.directImage.copy()
for x in range(-self.fitting['xrange'], self.fitting['xrange'], self.fitting['xstep']):
for y in range(-self.fitting['yrange'], self.fitting['yrange'], self.fitting['ystep']):
#all slits
for slit in self.slits:
s = self.slits[slit]
#direct image data
dirdat = d[s['yminSky'] + y:s['ymaxSky'] + y + 1,
s['xminSky'] + x:s['xmaxSky'] + x + 1]
dirdata = dirdat.ravel()
#remove the masked pixels
dirdata = dirdata[s['mask']]
mean = np.mean(dirdata)
sig = np.sqrt(np.sum((dirdata - mean)**2))
diff = template1 - mean
corr = np.sum(s['diff'] * diff) / s['sig'] / sig
tmp = [r, x, y, 1./corr, 1./corr, slit]
out[slit].append(tmp)
#save the dirdata of the minimum chisqr
if chisq < cm[slit]:
chmin[slit] = dirdat
cm[slit] = 1./corr
minpos[slit] = tmp
if self.debug:
print r, x, y, 1./corr, slit
#results dictionary
r = {}
r['outputs'] = out
r['chiMinData'] = chmin
r['minimaPosition'] = minpos
self.result = r
if self.debug:
print '\nMinima positions:'
print self.result['minimaPosition']
def plotMinimalization(self, output='minima', type='.png'):
'''
Generates a two dimensional map of the minimalization
for each slit separately.
:note: When fitting rotation all rotations are plotted on
top, so the plot may not be that useful.
'''
data = self.result['outputs']
for slit in data:
d = np.asarray([[x[1], x[2], x[4]]for x in data[slit]])
P.figure()
P.scatter(d[:, 0],
d[:, 1],
c=1. / np.log10(d[:, 2]),
s=30,
cmap=cm.get_cmap('jet'),
edgecolor='none',
alpha=0.2)
P.xlim(-self.fitting['xrange'], self.fitting['xrange'])
P.ylim(-self.fitting['yrange'], self.fitting['yrange'])
P.xlabel('X [pixels]')
P.ylabel('Y [pixels]')
P.savefig(output + 'Map' + slit + type)
P.close()
def outputMinima(self):
'''
Outputs the results to a file and also the screen if debug = True.
'''
if self.debug:
print '\n\ndirect image slit image \t rot' + \
'\t x \t y \t xoff \t yoff \t chi**2 reduced chi**2 \t slit'
fh1 = open('min.txt', 'a')
fh2 = open('skyFitted.reg', 'w')
fh3 = open('slitmask.txt', 'w')
for res in self.result['minimaPosition'].values():
r = res[0]
x = res[1]
y = res[2]
n = res[5]
#take into account possible trimming of the direct image
try:
xtr = self.dirImageHDR['LTV1']
except:
xtr = 0
try:
ytr = self.dirImageHDR['LTV2']
except:
ytr = 0
#derive the mid positions in a full frame
xpos = x + self.slits[n]['xmidSky'] - xtr
ypos = y + self.slits[n]['ymidSky'] - ytr
#save the positions to result dictionary
self.slits[n]['xminFitted'] = x + self.slits[n]['xminSky'] - xtr
self.slits[n]['xmaxFitted'] = x + self.slits[n]['xmaxSky'] - xtr
self.slits[n]['yminFitted'] = y + self.slits[n]['yminSky'] - ytr
self.slits[n]['ymaxFitted'] = y + self.slits[n]['ymaxSky'] - ytr
#write the file
fh3.write('slit\t\t= %s\n' % (n))
fh3.write('rotation\t= %.3f\n' % -r)
fh3.write('x\t\t= %i\n' % xpos)
fh3.write('y\t\t= %i\n' % ypos)
fh3.write('\n')
tmp = 'box {0:1f} {1:1f} {2:1f} {3:1f} {4:.3f} \n'.format(xpos,
ypos,
self.slits[n]['wd'],
self.slits[n]['hd'],
r)
fh2.write(tmp)
str = '{0:>s}\t{1:>s}\t{2:.2f}\t{3:.0f}\t{4:.0f}\t{5:.0f}\t{6:.0f}\t{7:>s}\t{8:.1f}\t{9:>s}'.format(self.dirfile,
self.slitfile,
-r,
xpos,
ypos,
x,
y,
res[3],
res[4],
n)
fh1.write(str + '\n')
if self.debug:
print str
fh1.close()
fh2.close()
fh3.close()
if self.debug:
print
print xtr, ytr
def overPlotSlits(self, output='overplottedOriginalsLog', type='.pdf', logscale=True):
'''
Overplot the slits to image data. Will overplot both the original slit
positions and the best fitted position. Will also plot residuals.
:note: it looks lie the fitted slit positions are in a wrong place in the
image. Maybe the patch.set_transform is not working as I assume...
:param: output, output file name
:param: type
:param: logscale, whether a log10 should be taken from the image data
'''
#make a copy of the imdata, in case we modify it...
img = self.directImage.copy()
fig = P.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
#show image
img[img < 0] = 0
if logscale:
img[img > 0] = np.log10(img[img > 0])
ax1.imshow(img, origin='lower', interpolation=None)
#original Slits
for slit in self.slits.values():
ax1.add_patch(patches.Rectangle(slit['xySky'],
slit['width'],
slit['height'],
fill=False))
#fitted slit positions
for mins in self.result['minimaPosition'].values():
rot = mins[0]
n = mins[5]
tmp = (self.slits[n]['xminSky'] + mins[1], self.slits[n]['yminSky'] + mins[2])
patch = patches.Rectangle(tmp,
self.slits[n]['wd'],
self.slits[n]['hd'],
fill=False,
ec='red')
t2 = matplotlib.transforms.Affine2D().rotate_deg(rot) + ax1.transData
patch.set_transform(t2)
ax1.add_patch(patch)
#rotate x axis labels
for tl in ax1.get_xticklabels():
tl.set_rotation(40)
#rotate x axis labels
for tl in ax2.get_xticklabels():
tl.set_rotation(40)
#plot residuals
z = np.ones(img.shape)
for mins in self.result['minimaPosition'].values():
x = mins[1]
y = mins[2]
n = mins[5]
s = self.slits[n]
y1 = s['yminSky'] + y
y2 = s['ymaxSky'] + y + 1
x1 = s['xminSky'] + x
x2 = s['xmaxSky'] + x + 1
z[y1:y2, x1:x2] = (s['values']/np.max(s['values'])) / \
(self.result['chiMinData'][n]/np.max(self.result['chiMinData'][n]))
i2 = ax2.imshow(z, origin='lower', interpolation=None,
cmap=cm.get_cmap('binary'), vmin=0.795, vmax=1.205)
c2 = fig.colorbar(i2, ax=ax2, shrink=0.7, fraction=0.05)
c2.set_label('Slit Values / Direct Image Data')
#annotate
ax1.annotate('Fitted Position', xy=(0.5, 1.05),
xycoords='axes fraction', ha='center', va='center')
ax2.annotate('Residuals', xy=(0.5, 1.05),
xycoords='axes fraction', ha='center', va='center')
#save the first image
P.savefig(output + type)
#zoom-in version
ymin = np.min(np.asarray([x['yminSky'] for x in self.slits.values()]))
ymax = np.max(np.asarray([x['ymaxSky'] for x in self.slits.values()]))
xmin = np.min(np.asarray([x['xminSky'] for x in self.slits.values()]))
xmax = np.max(np.asarray([x['xmaxSky'] for x in self.slits.values()]))
ax1.set_xlim(xmin - 200, xmax + 200)
ax2.set_xlim(xmin - 200, xmax + 200)
ax1.set_ylim(ymin - 100, ymax + 100)
ax2.set_ylim(ymin - 100, ymax + 100)
P.savefig(output + 'Zoomed' + type)
P.close()
del img
def outputShiftedImage(self):
'''
Outputs a FITS file in which the slits have been shifted
to the best fitted positions.
'''
outfile1 = 'fittedSlitImage.fits'
outfile2 = 'fittedSlitImageFullFrame.fits'
zeros = np.zeros(self.slitImage.shape)
r = []
for res in self.result['minimaPosition'].values():
r.append(res[0])
x = res[1]
y = res[2]
n = res[5]
d = self.slits[n]['values']
xmin = self.slits[n]['xminSky'] + x
xmax = self.slits[n]['xmaxSky'] + x
ymin = self.slits[n]['yminSky'] + y
ymax = self.slits[n]['ymaxSky'] + y
zeros[ymin:ymax + 1, xmin:xmax + 1] = d
rot = np.median(np.asarray(r))
#note: -rot, because when fitting the direct image was rotated not the slits
img = interpolation.rotate(zeros, -rot, reshape=False)
if self.debug:
print '\n{0:.2f} degree rotation to the fits file'.format(-rot)
#output to a fits file
hdu = PF.PrimaryHDU(img)
if os.path.isfile(outfile1):
os.remove(outfile1)
hdu.writeto(outfile1)
#output a second image
zeros = np.zeros((3096, 3096))
for slit in self.slits:
xmin = self.slits[slit]['xminFitted']
xmax = self.slits[slit]['xmaxFitted']
ymin = self.slits[slit]['yminFitted']
ymax = self.slits[slit]['ymaxFitted']
zeros[ymin - self.slits[slit]['tolerance']:ymax + 1 + self.slits[slit]['tolerance'],\
xmin - self.slits[slit]['tolerance']:xmax + 1 + self.slits[slit]['tolerance']] = \
self.slits[slit]['valuesLarge']
#note: -rot, because when fitting the direct image was rotated not the slits
img = interpolation.rotate(zeros, -rot, reshape=False)
#output to a fits file
hdu = PF.PrimaryHDU(img)
if os.path.isfile(outfile2):
os.remove(outfile2)
hdu.writeto(outfile2)
def pickleVars(self):
'''
This simple method pickles all important variables
'''
write.cPickleDumpDictionary(self.result, 'results.pk')
write.cPickleDumpDictionary(self.slits, 'slits.pk')
write.cPickleDumpDictionary(self.fitting, 'fitting.pk')
def run(self):
'''
Driver function, runs all required steps.
'''
self.generateSlitImages()
self.writeDS9RegionFile()
self.approxSkyPosition()
self.fitSlitsToDirectImage()
#self.fitSlitsToDirectImage2()
self.plotMinimalization()
self.outputMinima()
self.overPlotSlits()
self.outputShiftedImage()
self.pickleVars()
def processArgs(printHelp=False):
'''
Processes command line arguments
'''
parser = OptionParser()
parser.add_option('-c', '--configfile', dest='configfile',
help="Name of the configuration file", metavar="string")
parser.add_option('-s', '--section', dest='section',
help="Name of the section of the config file", metavar="string")
parser.add_option('-d', '--debug', dest='debug', action='store_true',
help='Debugging mode on')
if printHelp:
parser.print_help()
else:
return parser.parse_args()
if __name__ == '__main__':
opts, args = processArgs()
if opts.configfile is None:
processArgs(True)
sys.exit(1)
if opts.section is None:
find = FindSlitPositionsInDirectImage(opts.configfile, opts.debug)
else:
find = FindSlitPositionsInDirectImage(opts.configfile, opts.debug, opts.section)
find.run() | bsd-2-clause |
tylerjereddy/scipy | scipy/signal/ltisys.py | 12 | 128865 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Apr 2011: Jeffrey Armstrong <[email protected]>
# Added dlsim, dstep, dimpulse, cont2discrete
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
# Merged discrete systems and added dlti
import warnings
# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
# use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
from scipy import integrate, interpolate, linalg
from scipy.interpolate import interp1d
from .filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk,
freqz_zpk)
from .lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk,
cont2discrete)
import numpy
import numpy as np
from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
'dfreqresp', 'dbode']
class LinearTimeInvariant:
def __new__(cls, *system, **kwargs):
"""Create a new object, don't allow direct instances."""
if cls is LinearTimeInvariant:
raise NotImplementedError('The LinearTimeInvariant class is not '
'meant to be used directly, use `lti` '
'or `dlti` instead.')
return super(LinearTimeInvariant, cls).__new__(cls)
def __init__(self):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super().__init__()
self.inputs = None
self.outputs = None
self._dt = None
@property
def dt(self):
"""Return the sampling time of the system, `None` for `lti` systems."""
return self._dt
@property
def _dt_dict(self):
if self.dt is None:
return {}
else:
return {'dt': self.dt}
@property
def zeros(self):
"""Zeros of the system."""
return self.to_zpk().zeros
@property
def poles(self):
"""Poles of the system."""
return self.to_zpk().poles
def _as_ss(self):
"""Convert to `StateSpace` system, without copying.
Returns
-------
sys: StateSpace
The `StateSpace` system. If the class is already an instance of
`StateSpace` then this instance is returned.
"""
if isinstance(self, StateSpace):
return self
else:
return self.to_ss()
def _as_zpk(self):
"""Convert to `ZerosPolesGain` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `ZerosPolesGain` system. If the class is already an instance of
`ZerosPolesGain` then this instance is returned.
"""
if isinstance(self, ZerosPolesGain):
return self
else:
return self.to_zpk()
def _as_tf(self):
"""Convert to `TransferFunction` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `TransferFunction` system. If the class is already an instance of
`TransferFunction` then this instance is returned.
"""
if isinstance(self, TransferFunction):
return self
else:
return self.to_tf()
class lti(LinearTimeInvariant):
r"""
Continuous-time linear time invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
continuous-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, dlti
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3,
5]``).
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> signal.lti(1, 2, 3, 4)
StateSpaceContinuous(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: None
)
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> signal.lti([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function :math:`H(s) = \frac{3s + 4}{1s + 2}`:
>>> signal.lti([3, 4], [1, 2])
TransferFunctionContinuous(
array([3., 4.]),
array([1., 2.]),
dt: None
)
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous, *system)
elif N == 3:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous, *system)
elif N == 4:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system)
else:
raise ValueError("`system` needs to be an instance of `lti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super().__init__(*system)
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `bode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `freqresp` for details.
"""
return freqresp(self, w=w, n=n)
def to_discrete(self, dt, method='zoh', alpha=None):
"""Return a discretized version of the current system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti`
"""
raise NotImplementedError('to_discrete is not implemented for this '
'system class.')
class dlti(LinearTimeInvariant):
r"""
Discrete-time linear time invariant system base class.
Parameters
----------
*system: arguments
The `dlti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
discrete-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to ``True``
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, lti
Notes
-----
`dlti` instances do not exist directly. Instead, `dlti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3,
5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> signal.dlti(1, 2, 3, 4)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: True
)
>>> signal.dlti(1, 2, 3, 4, dt=0.1)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: 0.1
)
Construct the transfer function
:math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.dlti([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
Construct the transfer function :math:`H(z) = \frac{3z + 4}{1z + 2}` with
a sampling time of 0.1 seconds:
>>> signal.dlti([3, 4], [1, 2], dt=0.1)
TransferFunctionDiscrete(
array([3., 4.]),
array([1., 2.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create an instance of the appropriate subclass."""
if cls is dlti:
N = len(system)
if N == 2:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete, *system, **kwargs)
elif N == 3:
return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete,
*system, **kwargs)
elif N == 4:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system,
**kwargs)
else:
raise ValueError("`system` needs to be an instance of `dlti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(dlti, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
dt = kwargs.pop('dt', True)
super().__init__(*system, **kwargs)
self.dt = dt
@property
def dt(self):
"""Return the sampling time of the system."""
return self._dt
@dt.setter
def dt(self, dt):
self._dt = dt
def impulse(self, x0=None, t=None, n=None):
"""
Return the impulse response of the discrete-time `dlti` system.
See `dimpulse` for details.
"""
return dimpulse(self, x0=x0, t=t, n=n)
def step(self, x0=None, t=None, n=None):
"""
Return the step response of the discrete-time `dlti` system.
See `dstep` for details.
"""
return dstep(self, x0=x0, t=t, n=n)
def output(self, u, t, x0=None):
"""
Return the response of the discrete-time system to input `u`.
See `dlsim` for details.
"""
return dlsim(self, u, t, x0=x0)
def bode(self, w=None, n=100):
r"""
Calculate Bode magnitude and phase data of a discrete-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `dbode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}`
with sampling time 0.5s:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5)
Equivalent: signal.dbode(sys)
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return dbode(self, w=w, n=n)
def freqresp(self, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `dfreqresp` for details.
"""
return dfreqresp(self, w=w, n=n, whole=whole)
class TransferFunction(LinearTimeInvariant):
r"""Linear Time Invariant system class in transfer function form.
Represents the system as the continuous-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the
discrete-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
`TransferFunction` systems inherit additional
functionality from the `lti`, respectively the `dlti` classes, depending on
which system representation is used.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, lti, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be
represented as ``[1, 3, 5]``)
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: None
)
Construct the transfer function
:math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
0.1 seconds:
>>> signal.TransferFunction(num, den, dt=0.1)
TransferFunctionDiscrete(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_tf()
# Choose whether to inherit from `lti` or from `dlti`
if cls is TransferFunction:
if kwargs.get('dt') is None:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous,
*system,
**kwargs)
else:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete,
*system,
**kwargs)
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super().__init__(**kwargs)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
repr(self.dt),
)
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den),
**self._dt_dict)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den),
**self._dt_dict)
@staticmethod
def _z_to_zinv(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((np.zeros(diff), den))
elif diff < 0:
num = np.hstack((np.zeros(-diff), num))
return num, den
@staticmethod
def _zinv_to_z(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((den, np.zeros(diff)))
elif diff < 0:
num = np.hstack((num, np.zeros(-diff)))
return num, den
class TransferFunctionContinuous(TransferFunction, lti):
r"""
Continuous-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Continuous-time `TransferFunction` systems inherit additional
functionality from the `lti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
See Also
--------
ZerosPolesGain, StateSpace, lti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``)
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `TransferFunction` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return TransferFunction(*cont2discrete((self.num, self.den),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class TransferFunctionDiscrete(TransferFunction, dlti):
r"""
Discrete-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Discrete-time `TransferFunction` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
Construct the transfer function
:math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
0.5 seconds:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den, 0.5)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.5
)
"""
pass
class ZerosPolesGain(LinearTimeInvariant):
r"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the continuous- or discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
`ZerosPolesGain` systems inherit additional functionality from the `lti`,
respectively the `dlti` classes, depending on which system representation
is used.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, lti, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function
:math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_zpk()
# Choose whether to inherit from `lti` or from `dlti`
if cls is ZerosPolesGain:
if kwargs.get('dt') is None:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous,
*system,
**kwargs)
else:
return ZerosPolesGainDiscrete.__new__(
ZerosPolesGainDiscrete,
*system,
**kwargs
)
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the zeros, poles, gain system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
super().__init__(**kwargs)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system."""
return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
repr(self.dt),
)
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain),
**self._dt_dict)
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain),
**self._dt_dict)
class ZerosPolesGainContinuous(ZerosPolesGain, lti):
r"""
Continuous-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the continuous time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Continuous-time `ZerosPolesGain` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
See Also
--------
TransferFunction, StateSpace, lti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s)=\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `ZerosPolesGain` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `ZerosPolesGain`
"""
return ZerosPolesGain(
*cont2discrete((self.zeros, self.poles, self.gain),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class ZerosPolesGainDiscrete(ZerosPolesGain, dlti):
r"""
Discrete-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Discrete-time `ZerosPolesGain` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function
:math:`H(s) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
pass
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
class StateSpace(LinearTimeInvariant):
r"""
Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u` or the discrete-time difference
equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems
inherit additional functionality from the `lti`, respectively the `dlti`
classes, depending on which system representation is used.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 4 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, lti, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
>>> sys.to_discrete(0.1)
StateSpaceDiscrete(
array([[1. , 0.1],
[0. , 1. ]]),
array([[0.005],
[0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[1. , 0.1],
[0. , 1. ]]),
array([[0.005],
[0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
# Override NumPy binary operations and ufuncs
__array_priority__ = 100.0
__array_ufunc__ = None
def __new__(cls, *system, **kwargs):
"""Create new StateSpace object and settle inheritance."""
# Handle object conversion if input is an instance of `lti`
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_ss()
# Choose whether to inherit from `lti` or from `dlti`
if cls is StateSpace:
if kwargs.get('dt') is None:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system, **kwargs)
else:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete,
*system, **kwargs)
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super().__init__(**kwargs)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
repr(self.dt),
)
def _check_binop_other(self, other):
return isinstance(other, (StateSpace, np.ndarray, float, complex,
np.number, int))
def __mul__(self, other):
"""
Post-multiply another system or a scalar
Handles multiplication of systems in the sense of a frequency domain
multiplication. That means, given two systems E1(s) and E2(s), their
multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s)
is equivalent to first applying E2(s), and then E1(s).
Notes
-----
For SISO systems the order of system application does not matter.
However, for MIMO systems, where the two systems are matrices, the
order above ensures standard Matrix multiplication rules apply.
"""
if not self._check_binop_other(other):
return NotImplemented
if isinstance(other, StateSpace):
# Disallow mix of discrete and continuous systems.
if type(other) is not type(self):
return NotImplemented
if self.dt != other.dt:
raise TypeError('Cannot multiply systems with different `dt`.')
n1 = self.A.shape[0]
n2 = other.A.shape[0]
# Interconnection of systems
# x1' = A1 x1 + B1 u1
# y1 = C1 x1 + D1 u1
# x2' = A2 x2 + B2 y1
# y2 = C2 x2 + D2 y1
#
# Plugging in with u1 = y2 yields
# [x1'] [A1 B1*C2 ] [x1] [B1*D2]
# [x2'] = [0 A2 ] [x2] + [B2 ] u2
# [x1]
# y2 = [C1 D1*C2] [x2] + D1*D2 u2
a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))),
np.hstack((zeros((n2, n1)), other.A))))
b = np.vstack((np.dot(self.B, other.D), other.B))
c = np.hstack((self.C, np.dot(self.D, other.C)))
d = np.dot(self.D, other.D)
else:
# Assume that other is a scalar / matrix
# For post multiplication the input gets scaled
a = self.A
b = np.dot(self.B, other)
c = self.C
d = np.dot(self.D, other)
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __rmul__(self, other):
"""Pre-multiply a scalar or matrix (but not StateSpace)"""
if not self._check_binop_other(other) or isinstance(other, StateSpace):
return NotImplemented
# For pre-multiplication only the output gets scaled
a = self.A
b = self.B
c = np.dot(other, self.C)
d = np.dot(other, self.D)
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __neg__(self):
"""Negate the system (equivalent to pre-multiplying by -1)."""
return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict)
def __add__(self, other):
"""
Adds two systems in the sense of frequency domain addition.
"""
if not self._check_binop_other(other):
return NotImplemented
if isinstance(other, StateSpace):
# Disallow mix of discrete and continuous systems.
if type(other) is not type(self):
raise TypeError('Cannot add {} and {}'.format(type(self),
type(other)))
if self.dt != other.dt:
raise TypeError('Cannot add systems with different `dt`.')
# Interconnection of systems
# x1' = A1 x1 + B1 u
# y1 = C1 x1 + D1 u
# x2' = A2 x2 + B2 u
# y2 = C2 x2 + D2 u
# y = y1 + y2
#
# Plugging in yields
# [x1'] [A1 0 ] [x1] [B1]
# [x2'] = [0 A2] [x2] + [B2] u
# [x1]
# y = [C1 C2] [x2] + [D1 + D2] u
a = linalg.block_diag(self.A, other.A)
b = np.vstack((self.B, other.B))
c = np.hstack((self.C, other.C))
d = self.D + other.D
else:
other = np.atleast_2d(other)
if self.D.shape == other.shape:
# A scalar/matrix is really just a static system (A=0, B=0, C=0)
a = self.A
b = self.B
c = self.C
d = self.D + other
else:
raise ValueError("Cannot add systems with incompatible "
"dimensions ({} and {})"
.format(self.D.shape, other.shape))
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __sub__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return self.__add__(-other)
def __radd__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return self.__add__(other)
def __rsub__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return (-self).__add__(other)
def __truediv__(self, other):
"""
Divide by a scalar
"""
# Division by non-StateSpace scalars
if not self._check_binop_other(other) or isinstance(other, StateSpace):
return NotImplemented
if isinstance(other, np.ndarray) and other.ndim > 0:
# It's ambiguous what this means, so disallow it
raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays")
return self.__mul__(1/other)
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
class StateSpaceContinuous(StateSpace, lti):
r"""
Continuous-time Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u`.
Continuous-time `StateSpace` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
See Also
--------
TransferFunction, ZerosPolesGain, lti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `StateSpace` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class StateSpaceDiscrete(StateSpace, dlti):
r"""
Discrete-time Linear Time Invariant system in state-space form.
Represents the system as the discrete-time difference equation
:math:`x[k+1] = A x[k] + B u[k]`.
`StateSpace` systems inherit additional functionality from the `dlti`
class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
pass
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
See Also
--------
lsim
Examples
--------
We'll use `lsim2` to simulate an analog Bessel filter applied to
a signal.
>>> from scipy.signal import bessel, lsim2
>>> import matplotlib.pyplot as plt
Create a low-pass Bessel filter with a cutoff of 12 Hz.
>>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True)
Generate data to which the filter is applied.
>>> t = np.linspace(0, 1.25, 500, endpoint=False)
The input signal is the sum of three sinusoidal curves, with
frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly
eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal.
>>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) +
... 0.5*np.cos(2*np.pi*80*t))
Simulate the filter with `lsim2`.
>>> tout, yout, xout = lsim2((b, a), U=u, T=t)
Plot the result.
>>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input')
>>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output')
>>> plt.legend(loc='best', shadow=True, framealpha=1)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
In a second example, we simulate a double integrator ``y'' = u``, with
a constant input ``u = 1``. We'll use the state space representation
of the integrator.
>>> from scipy.signal import lti
>>> A = np.array([[0, 1], [0, 0]])
>>> B = np.array([[0], [1]])
>>> C = np.array([[1, 0]])
>>> D = 0
>>> system = lti(A, B, C, D)
`t` and `u` define the time and input signal for the system to
be simulated.
>>> t = np.linspace(0, 5, num=50)
>>> u = np.ones_like(t)
Compute the simulation, and then plot `y`. As expected, the plot shows
the curve ``y = 0.5*t**2``.
>>> tout, y, x = lsim2(system, u, t)
>>> plt.plot(t, y)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float64):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
We'll use `lsim` to simulate an analog Bessel filter applied to
a signal.
>>> from scipy.signal import bessel, lsim
>>> import matplotlib.pyplot as plt
Create a low-pass Bessel filter with a cutoff of 12 Hz.
>>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True)
Generate data to which the filter is applied.
>>> t = np.linspace(0, 1.25, 500, endpoint=False)
The input signal is the sum of three sinusoidal curves, with
frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly
eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal.
>>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) +
... 0.5*np.cos(2*np.pi*80*t))
Simulate the filter with `lsim`.
>>> tout, yout, xout = lsim((b, a), U=u, T=t)
Plot the result.
>>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input')
>>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output')
>>> plt.legend(loc='best', shadow=True, framealpha=1)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
In a second example, we simulate a double integrator ``y'' = u``, with
a constant input ``u = 1``. We'll use the state space representation
of the integrator.
>>> from scipy.signal import lti
>>> A = np.array([[0.0, 1.0], [0.0, 0.0]])
>>> B = np.array([[0.0], [1.0]])
>>> C = np.array([[1.0, 0.0]])
>>> D = 0.0
>>> system = lti(A, B, C, D)
`t` and `u` define the time and input signal for the system to
be simulated.
>>> t = np.linspace(0, 5, num=50)
>>> u = np.ones_like(t)
Compute the simulation, and then plot `y`. As expected, the plot shows
the curve ``y = 0.5*t**2``.
>>> tout, y, x = lsim(system, u, t)
>>> plt.plot(t, y)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = np.empty((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None or
(isinstance(U, (int, float)) and U == 0.) or
not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in range(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in range(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in range(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : array_like
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Compute the impulse response of a second order system with a repeated
root: ``x''(t) + 2*x'(t) + x(t) = u(t)``
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, scipy.integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
Compute the impulse response of a second order system with a repeated
root: ``x''(t) + 2*x'(t) + x(t) = u(t)``
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lti = signal.lti([1.0], [1.0, 1.0])
>>> t, y = signal.step(lti)
>>> plt.plot(t, y)
>>> plt.xlabel('Time [s]')
>>> plt.ylabel('Amplitude')
>>> plt.title('Step response for 1. Order Lowpass')
>>> plt.grid()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lti = signal.lti([1.0], [1.0, 1.0])
>>> t, y = signal.step2(lti)
>>> plt.plot(t, y)
>>> plt.xlabel('Time [s]')
>>> plt.ylabel('Amplitude')
>>> plt.title('Step response for 1. Order Lowpass')
>>> plt.grid()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = signal.bode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
r"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given, a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(s) = \frac{5}{(s-1)^3}`:
>>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5])
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
if isinstance(system, (TransferFunction, ZerosPolesGain)):
sys = system
else:
sys = system._as_zpk()
elif isinstance(system, dlti):
raise AttributeError('freqresp can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_zpk()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(sys, TransferFunction):
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
elif isinstance(sys, ZerosPolesGain):
w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see https://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
https://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
https://hdl.handle.net/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2-D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
https://hdl.handle.net/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback", IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = 0
# The number of iterations needed before converging
nb_iter = 0
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If we can use the identity matrix as X the solution is obvious
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0])
# i.e K=inv(B)*(diag(P)-A)
# if B has as many lines as its rank (but not square) there are many
# solutions and we can choose one using least squares
# => use lstsq in both cases.
# In both cases the transfer matrix X will be eye(A.shape[0]) and I
# can hardly think of a better one so there is nothing to optimize
#
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = np.nan
nb_iter = np.nan
else:
# step A (p1144 KNV) and beginning of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the
# same line for each pole and this yields very long convergence
# times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and
# is a valid choice for transfer_matrix.
# This way for complex poles we are sure to have a non zero
# imaginary part that way, and the problem of lines full of zeros
# in transfer_matrix is solved too as when a vector from
# ker_pole_j has a zero the other one(s) when
# ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError as e:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles") from e
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
def dlsim(system, u, t=None, x0=None):
"""
Simulate output of a discrete-time linear system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
u : array_like
An input array describing the input at each time `t` (interpolation is
assumed between given times). If there are multiple inputs, then each
column of the rank-2 array represents an input.
t : array_like, optional
The time steps at which the input is defined. If `t` is given, it
must be the same length as `u`, and the final value in `t` determines
the number of steps returned in the output.
x0 : array_like, optional
The initial conditions on the state vector (zero by default).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
System response, as a 1-D array.
xout : ndarray, optional
Time-evolution of the state-vector. Only generated if the input is a
`StateSpace` system.
See Also
--------
lsim, dstep, dimpulse, cont2discrete
Examples
--------
A simple integrator transfer function with a discrete time step of 1.0
could be implemented as:
>>> from scipy import signal
>>> tf = ([1.0,], [1.0, -1.0], 1.0)
>>> t_in = [0.0, 1.0, 2.0, 3.0]
>>> u = np.asarray([0.0, 0.0, 1.0, 1.0])
>>> t_out, y = signal.dlsim(tf, u, t=t_in)
>>> y.T
array([[ 0., 0., 0., 1.]])
"""
# Convert system to dlti-StateSpace
if isinstance(system, lti):
raise AttributeError('dlsim can only be used with discrete-time dlti '
'systems.')
elif not isinstance(system, dlti):
system = dlti(*system[:-1], dt=system[-1])
# Condition needed to ensure output remains compatible
is_ss_input = isinstance(system, StateSpace)
system = system._as_ss()
u = np.atleast_1d(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
if t is None:
out_samples = len(u)
stoptime = (out_samples - 1) * system.dt
else:
stoptime = t[-1]
out_samples = int(np.floor(stoptime / system.dt)) + 1
# Pre-build output arrays
xout = np.zeros((out_samples, system.A.shape[0]))
yout = np.zeros((out_samples, system.C.shape[0]))
tout = np.linspace(0.0, stoptime, num=out_samples)
# Check initial condition
if x0 is None:
xout[0, :] = np.zeros((system.A.shape[1],))
else:
xout[0, :] = np.asarray(x0)
# Pre-interpolate inputs into the desired time steps
if t is None:
u_dt = u
else:
if len(u.shape) == 1:
u = u[:, np.newaxis]
u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True)
u_dt = u_dt_interp(tout).transpose()
# Simulate the system
for i in range(0, out_samples - 1):
xout[i+1, :] = (np.dot(system.A, xout[i, :]) +
np.dot(system.B, u_dt[i, :]))
yout[i, :] = (np.dot(system.C, xout[i, :]) +
np.dot(system.D, u_dt[i, :]))
# Last point
yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) +
np.dot(system.D, u_dt[out_samples-1, :]))
if is_ss_input:
return tout, yout, xout
else:
return tout, yout
def dimpulse(system, x0=None, t=None, n=None):
"""
Impulse response of discrete-time system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : tuple of ndarray
Impulse response of system. Each element of the tuple represents
the output of the system based on an impulse in each input.
See Also
--------
impulse, dstep, dlsim, cont2discrete
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> butter = signal.dlti(*signal.butter(3, 0.5))
>>> t, y = signal.dimpulse(butter, n=25)
>>> plt.step(t, np.squeeze(y))
>>> plt.grid()
>>> plt.xlabel('n [samples]')
>>> plt.ylabel('Amplitude')
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dimpulse can only be used with discrete-time '
'dlti systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[0, i] = 1.0
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dstep(system, x0=None, t=None, n=None):
"""
Step response of discrete-time system.
Parameters
----------
system : tuple of array_like
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Output time points, as a 1-D array.
yout : tuple of ndarray
Step response of system. Each element of the tuple represents
the output of the system based on a step response to each input.
See Also
--------
step, dimpulse, dlsim, cont2discrete
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> butter = signal.dlti(*signal.butter(3, 0.5))
>>> t, y = signal.dstep(butter, n=25)
>>> plt.step(t, np.squeeze(y))
>>> plt.grid()
>>> plt.xlabel('n [samples]')
>>> plt.ylabel('Amplitude')
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dstep can only be used with discrete-time dlti '
'systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[:, i] = np.ones((t.shape[0],))
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dfreqresp(system, w=None, n=10000, whole=False):
r"""
Calculate the frequency response of a discrete-time system.
Parameters
----------
system : an instance of the `dlti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (numerator, denominator, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
whole : bool, optional
Normally, if 'w' is not given, frequencies are computed from 0 to the
Nyquist frequency, pi radians/sample (upper-half of unit-circle). If
`whole` is True, compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : 1D ndarray
Frequency array [radians/sample]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function
:math:`H(z) = \frac{1}{z^2 + 2z + 3}` with a sampling time of 0.05
seconds:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
>>> w, H = signal.dfreqresp(sys)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if not isinstance(system, dlti):
if isinstance(system, lti):
raise AttributeError('dfreqresp can only be used with '
'discrete-time systems.')
system = dlti(*system[:-1], dt=system[-1])
if isinstance(system, StateSpace):
# No SS->ZPK code exists right now, just SS->TF->ZPK
system = system._as_tf()
if not isinstance(system, (TransferFunction, ZerosPolesGain)):
raise ValueError('Unknown system type')
if system.inputs != 1 or system.outputs != 1:
raise ValueError("dfreqresp requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(system, TransferFunction):
# Convert numerator and denominator from polynomials in the variable
# 'z' to polynomials in the variable 'z^-1', as freqz expects.
num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den)
w, h = freqz(num, den, worN=worN, whole=whole)
elif isinstance(system, ZerosPolesGain):
w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN,
whole=whole)
return w, h
def dbode(system, w=None, n=100):
r"""
Calculate Bode magnitude and phase data of a discrete-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (num, den, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/time_unit]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with
a sampling time of 0.05 seconds:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
Equivalent: sys.bode()
>>> w, mag, phase = signal.dbode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = dfreqresp(system, w=w, n=n)
if isinstance(system, dlti):
dt = system.dt
else:
dt = system[-1]
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y)))
return w / dt, mag, phase
| bsd-3-clause |
qtekfun/htcDesire820Kernel | external/chromium_org/ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 154 | 8545 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| gpl-2.0 |
venus2247/PyS | Test for DM&ML/DigitRecognize/Operation.py | 1 | 1334 | import pandas as pd # Dataframe
from sklearn.ensemble import RandomForestClassifier # Classification algorithm - random forest
from sklearn import metrics, grid_search
from sklearn.metrics import classification_report
from sklearn.cross_validation import train_test_split
import numpy as np
import math
import random as rd
import pylab as pl
import matplotlib.pyplot as plt
#matplotlib inline
import os
os.chdir('c:\\TEMP')
pwd
train_df = pd.read_csv('train.csv')
test_df = pd.read_csv('test.csv')
train_data = train_df.drop('label',axis=1).values
train_labels = train_df['label'].values
test_data = test_df.values
print ("Loading finished.")
print ("Train size:", train_df.shape)
print ("Test size:", test_df.shape)
train_df.head(5)
train_images = []
for image in train_data:
train_images.append(image.reshape(28,28))
train_images = np.array(train_images)
plt.figure(figsize=(20,10), dpi=600)
for i in range(10):
plt.subplot(1,10,(i+1))
print(train_labels[i],
pl.imshow(train_images[i],cmap=pl.cm.gray_r))
pl.show()
clf = RandomForestClassifier()
clf.fit(train_data,train_labels)
predictions = clf.predict(test_data)
print ("Predicting finished.")
submission = pd.DataFrame({"ImageId": np.arange(1,28001),"Label": predictions})
submission.to_csv('./submission.csv',index=False)
print ("Submission created.") | mit |
mne-tools/mne-python | examples/inverse/psf_ctf_label_leakage.py | 15 | 8034 | """
============================================================
Visualize source leakage among labels using a circular graph
============================================================
This example computes all-to-all pairwise leakage among 68 regions in
source space based on MNE inverse solutions and a FreeSurfer cortical
parcellation. Label-to-label leakage is estimated as the correlation among the
labels' point-spread functions (PSFs). It is visualized using a circular graph
which is ordered based on the locations of the regions in the axial plane.
"""
# Authors: Olaf Hauk <[email protected]>
# Martin Luessi <[email protected]>
# Alexandre Gramfort <[email protected]>
# Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import (read_inverse_operator,
make_inverse_resolution_matrix,
get_point_spread)
from mne.viz import circular_layout, plot_connectivity_circle
print(__doc__)
###############################################################################
# Load forward solution and inverse operator
# ------------------------------------------
#
# We need a matching forward solution and inverse operator to compute
# resolution matrices for different methods.
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-fixed-inv.fif'
forward = mne.read_forward_solution(fname_fwd)
# Convert forward solution to fixed source orientations
mne.convert_forward_solution(
forward, surf_ori=True, force_fixed=True, copy=False)
inverse_operator = read_inverse_operator(fname_inv)
# Compute resolution matrices for MNE
rm_mne = make_inverse_resolution_matrix(forward, inverse_operator,
method='MNE', lambda2=1. / 3.**2)
src = inverse_operator['src']
del forward, inverse_operator # save memory
###############################################################################
# Read and organise labels for cortical parcellation
# --------------------------------------------------
#
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels = mne.read_labels_from_annot('sample', parc='aparc',
subjects_dir=subjects_dir)
n_labels = len(labels)
label_colors = [label.color for label in labels]
# First, we reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
# Get the y-location of the label
label_ypos = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels]
###############################################################################
# Compute point-spread function summaries (PCA) for all labels
# ------------------------------------------------------------
#
# We summarise the PSFs per label by their first five principal components, and
# use the first component to evaluate label-to-label leakage below.
# Compute first PCA component across PSFs within labels.
# Note the differences in explained variance, probably due to different
# spatial extents of labels.
n_comp = 5
stcs_psf_mne, pca_vars_mne = get_point_spread(
rm_mne, src, labels, mode='pca', n_comp=n_comp, norm=None,
return_pca_vars=True)
n_verts = rm_mne.shape[0]
del rm_mne
###############################################################################
# We can show the explained variances of principal components per label. Note
# how they differ across labels, most likely due to their varying spatial
# extent.
with np.printoptions(precision=1):
for [name, var] in zip(label_names, pca_vars_mne):
print(f'{name}: {var.sum():.1f}% {var}')
###############################################################################
# The output shows the summed variance explained by the first five principal
# components as well as the explained variances of the individual components.
#
# Evaluate leakage based on label-to-label PSF correlations
# ---------------------------------------------------------
#
# Note that correlations ignore the overall amplitude of PSFs, i.e. they do
# not show which region will potentially be the bigger "leaker".
# get PSFs from Source Estimate objects into matrix
psfs_mat = np.zeros([n_labels, n_verts])
# Leakage matrix for MNE, get first principal component per label
for [i, s] in enumerate(stcs_psf_mne):
psfs_mat[i, :] = s.data[:, 0]
# Compute label-to-label leakage as Pearson correlation of PSFs
# Sign of correlation is arbitrary, so take absolute values
leakage_mne = np.abs(np.corrcoef(psfs_mat))
# Save the plot order and create a circular layout
node_order = lh_labels[::-1] + rh_labels # mirror label order across hemis
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) / 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 200 strongest connections.
fig = plt.figure(num=None, figsize=(8, 8), facecolor='black')
plot_connectivity_circle(leakage_mne, label_names, n_lines=200,
node_angles=node_angles, node_colors=label_colors,
title='MNE Leakage', fig=fig)
###############################################################################
# Most leakage occurs for neighbouring regions, but also for deeper regions
# across hemispheres.
#
# Save the figure (optional)
# --------------------------
#
# Matplotlib controls figure facecolor separately for interactive display
# versus for saved figures. Thus when saving you must specify ``facecolor``,
# else your labels, title, etc will not be visible::
#
# >>> fname_fig = data_path + '/MEG/sample/plot_label_leakage.png'
# >>> fig.savefig(fname_fig, facecolor='black')
#
# Plot PSFs for individual labels
# -------------------------------
#
# Let us confirm for left and right lateral occipital lobes that there is
# indeed no leakage between them, as indicated by the correlation graph.
# We can plot the summary PSFs for both labels to examine the spatial extent of
# their leakage.
# left and right lateral occipital
idx = [22, 23]
stc_lh = stcs_psf_mne[idx[0]]
stc_rh = stcs_psf_mne[idx[1]]
# Maximum for scaling across plots
max_val = np.max([stc_lh.data, stc_rh.data])
###############################################################################
# Point-spread function for the lateral occipital label in the left hemisphere
brain_lh = stc_lh.plot(subjects_dir=subjects_dir, subject='sample',
hemi='both', views='caudal',
clim=dict(kind='value',
pos_lims=(0, max_val / 2., max_val)))
brain_lh.add_text(0.1, 0.9, label_names[idx[0]], 'title', font_size=16)
###############################################################################
# and in the right hemisphere.
brain_rh = stc_rh.plot(subjects_dir=subjects_dir, subject='sample',
hemi='both', views='caudal',
clim=dict(kind='value',
pos_lims=(0, max_val / 2., max_val)))
brain_rh.add_text(0.1, 0.9, label_names[idx[1]], 'title', font_size=16)
###############################################################################
# Both summary PSFs are confined to their respective hemispheres, indicating
# that there is indeed low leakage between these two regions.
| bsd-3-clause |
frank-tancf/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 73 | 1232 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
xzturn/tensorflow | tensorflow/tools/compatibility/renames_v2.py | 3 | 59978 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
pyformat --in_place third_party/tensorflow/tools/compatibility/renames_v2.py
This file should be updated whenever endpoints are deprecated.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
renames = {
'tf.AUTO_REUSE':
'tf.compat.v1.AUTO_REUSE',
'tf.AttrValue':
'tf.compat.v1.AttrValue',
'tf.COMPILER_VERSION':
'tf.version.COMPILER_VERSION',
'tf.CXX11_ABI_FLAG':
'tf.sysconfig.CXX11_ABI_FLAG',
'tf.ConditionalAccumulator':
'tf.compat.v1.ConditionalAccumulator',
'tf.ConditionalAccumulatorBase':
'tf.compat.v1.ConditionalAccumulatorBase',
'tf.ConfigProto':
'tf.compat.v1.ConfigProto',
'tf.Dimension':
'tf.compat.v1.Dimension',
'tf.Event':
'tf.compat.v1.Event',
'tf.FIFOQueue':
'tf.queue.FIFOQueue',
'tf.FixedLenFeature':
'tf.io.FixedLenFeature',
'tf.FixedLenSequenceFeature':
'tf.io.FixedLenSequenceFeature',
'tf.FixedLengthRecordReader':
'tf.compat.v1.FixedLengthRecordReader',
'tf.GIT_VERSION':
'tf.version.GIT_VERSION',
'tf.GPUOptions':
'tf.compat.v1.GPUOptions',
'tf.GRAPH_DEF_VERSION':
'tf.version.GRAPH_DEF_VERSION',
'tf.GRAPH_DEF_VERSION_MIN_CONSUMER':
'tf.version.GRAPH_DEF_VERSION_MIN_CONSUMER',
'tf.GRAPH_DEF_VERSION_MIN_PRODUCER':
'tf.version.GRAPH_DEF_VERSION_MIN_PRODUCER',
'tf.GraphDef':
'tf.compat.v1.GraphDef',
'tf.GraphKeys':
'tf.compat.v1.GraphKeys',
'tf.GraphOptions':
'tf.compat.v1.GraphOptions',
'tf.HistogramProto':
'tf.compat.v1.HistogramProto',
'tf.IdentityReader':
'tf.compat.v1.IdentityReader',
'tf.InteractiveSession':
'tf.compat.v1.InteractiveSession',
'tf.LMDBReader':
'tf.compat.v1.LMDBReader',
'tf.LogMessage':
'tf.compat.v1.LogMessage',
'tf.MONOLITHIC_BUILD':
'tf.sysconfig.MONOLITHIC_BUILD',
'tf.MetaGraphDef':
'tf.compat.v1.MetaGraphDef',
'tf.NameAttrList':
'tf.compat.v1.NameAttrList',
'tf.NoGradient':
'tf.no_gradient',
'tf.NodeDef':
'tf.compat.v1.NodeDef',
'tf.NotDifferentiable':
'tf.no_gradient',
'tf.OpError':
'tf.errors.OpError',
'tf.OptimizerOptions':
'tf.compat.v1.OptimizerOptions',
'tf.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.Print':
'tf.compat.v1.Print',
'tf.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.QUANTIZED_DTYPES':
'tf.dtypes.QUANTIZED_DTYPES',
'tf.QueueBase':
'tf.queue.QueueBase',
'tf.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.ReaderBase':
'tf.compat.v1.ReaderBase',
'tf.RunMetadata':
'tf.compat.v1.RunMetadata',
'tf.RunOptions':
'tf.compat.v1.RunOptions',
'tf.Session':
'tf.compat.v1.Session',
'tf.SessionLog':
'tf.compat.v1.SessionLog',
'tf.SparseConditionalAccumulator':
'tf.compat.v1.SparseConditionalAccumulator',
'tf.SparseFeature':
'tf.io.SparseFeature',
'tf.SparseTensorValue':
'tf.compat.v1.SparseTensorValue',
'tf.Summary':
'tf.compat.v1.Summary',
'tf.SummaryMetadata':
'tf.compat.v1.SummaryMetadata',
'tf.TFRecordReader':
'tf.compat.v1.TFRecordReader',
'tf.TensorInfo':
'tf.compat.v1.TensorInfo',
'tf.TextLineReader':
'tf.compat.v1.TextLineReader',
'tf.VERSION':
'tf.version.VERSION',
'tf.VarLenFeature':
'tf.io.VarLenFeature',
'tf.VariableScope':
'tf.compat.v1.VariableScope',
'tf.WholeFileReader':
'tf.compat.v1.WholeFileReader',
'tf.accumulate_n':
'tf.math.accumulate_n',
'tf.add_check_numerics_ops':
'tf.compat.v1.add_check_numerics_ops',
'tf.add_to_collection':
'tf.compat.v1.add_to_collection',
'tf.add_to_collections':
'tf.compat.v1.add_to_collections',
'tf.all_variables':
'tf.compat.v1.all_variables',
'tf.angle':
'tf.math.angle',
'tf.app.run':
'tf.compat.v1.app.run',
'tf.assert_greater_equal':
'tf.compat.v1.assert_greater_equal',
'tf.assert_integer':
'tf.compat.v1.assert_integer',
'tf.assert_less_equal':
'tf.compat.v1.assert_less_equal',
'tf.assert_near':
'tf.compat.v1.assert_near',
'tf.assert_negative':
'tf.compat.v1.assert_negative',
'tf.assert_non_negative':
'tf.compat.v1.assert_non_negative',
'tf.assert_non_positive':
'tf.compat.v1.assert_non_positive',
'tf.assert_none_equal':
'tf.compat.v1.assert_none_equal',
'tf.assert_positive':
'tf.compat.v1.assert_positive',
'tf.assert_proper_iterable':
'tf.debugging.assert_proper_iterable',
'tf.assert_rank_at_least':
'tf.compat.v1.assert_rank_at_least',
'tf.assert_rank_in':
'tf.compat.v1.assert_rank_in',
'tf.assert_same_float_dtype':
'tf.debugging.assert_same_float_dtype',
'tf.assert_scalar':
'tf.compat.v1.assert_scalar',
'tf.assert_type':
'tf.compat.v1.assert_type',
'tf.assert_variables_initialized':
'tf.compat.v1.assert_variables_initialized',
'tf.assign':
'tf.compat.v1.assign',
'tf.assign_add':
'tf.compat.v1.assign_add',
'tf.assign_sub':
'tf.compat.v1.assign_sub',
'tf.batch_scatter_update':
'tf.compat.v1.batch_scatter_update',
'tf.betainc':
'tf.math.betainc',
'tf.ceil':
'tf.math.ceil',
'tf.check_numerics':
'tf.debugging.check_numerics',
'tf.cholesky':
'tf.linalg.cholesky',
'tf.cholesky_solve':
'tf.linalg.cholesky_solve',
'tf.clip_by_average_norm':
'tf.compat.v1.clip_by_average_norm',
'tf.colocate_with':
'tf.compat.v1.colocate_with',
'tf.conj':
'tf.math.conj',
'tf.container':
'tf.compat.v1.container',
'tf.control_flow_v2_enabled':
'tf.compat.v1.control_flow_v2_enabled',
'tf.convert_to_tensor_or_indexed_slices':
'tf.compat.v1.convert_to_tensor_or_indexed_slices',
'tf.convert_to_tensor_or_sparse_tensor':
'tf.compat.v1.convert_to_tensor_or_sparse_tensor',
'tf.count_up_to':
'tf.compat.v1.count_up_to',
'tf.create_partitioned_variables':
'tf.compat.v1.create_partitioned_variables',
'tf.cross':
'tf.linalg.cross',
'tf.cumprod':
'tf.math.cumprod',
'tf.data.get_output_classes':
'tf.compat.v1.data.get_output_classes',
'tf.data.get_output_shapes':
'tf.compat.v1.data.get_output_shapes',
'tf.data.get_output_types':
'tf.compat.v1.data.get_output_types',
'tf.data.make_initializable_iterator':
'tf.compat.v1.data.make_initializable_iterator',
'tf.data.make_one_shot_iterator':
'tf.compat.v1.data.make_one_shot_iterator',
'tf.debugging.is_finite':
'tf.math.is_finite',
'tf.debugging.is_inf':
'tf.math.is_inf',
'tf.debugging.is_nan':
'tf.math.is_nan',
'tf.debugging.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.debugging.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.decode_base64':
'tf.io.decode_base64',
'tf.decode_compressed':
'tf.io.decode_compressed',
'tf.decode_json_example':
'tf.io.decode_json_example',
'tf.delete_session_tensor':
'tf.compat.v1.delete_session_tensor',
'tf.depth_to_space':
'tf.compat.v1.depth_to_space',
'tf.dequantize':
'tf.quantization.dequantize',
'tf.deserialize_many_sparse':
'tf.io.deserialize_many_sparse',
'tf.diag':
'tf.linalg.tensor_diag',
'tf.diag_part':
'tf.linalg.tensor_diag_part',
'tf.digamma':
'tf.math.digamma',
'tf.dimension_at_index':
'tf.compat.dimension_at_index',
'tf.dimension_value':
'tf.compat.dimension_value',
'tf.disable_control_flow_v2':
'tf.compat.v1.disable_control_flow_v2',
'tf.disable_eager_execution':
'tf.compat.v1.disable_eager_execution',
'tf.disable_resource_variables':
'tf.compat.v1.disable_resource_variables',
'tf.disable_tensor_equality':
'tf.compat.v1.disable_tensor_equality',
'tf.disable_v2_behavior':
'tf.compat.v1.disable_v2_behavior',
'tf.disable_v2_tensorshape':
'tf.compat.v1.disable_v2_tensorshape',
'tf.distribute.get_loss_reduction':
'tf.compat.v1.distribute.get_loss_reduction',
'tf.distributions.Bernoulli':
'tf.compat.v1.distributions.Bernoulli',
'tf.distributions.Beta':
'tf.compat.v1.distributions.Beta',
'tf.distributions.Categorical':
'tf.compat.v1.distributions.Categorical',
'tf.distributions.Dirichlet':
'tf.compat.v1.distributions.Dirichlet',
'tf.distributions.DirichletMultinomial':
'tf.compat.v1.distributions.DirichletMultinomial',
'tf.distributions.Distribution':
'tf.compat.v1.distributions.Distribution',
'tf.distributions.Exponential':
'tf.compat.v1.distributions.Exponential',
'tf.distributions.FULLY_REPARAMETERIZED':
'tf.compat.v1.distributions.FULLY_REPARAMETERIZED',
'tf.distributions.Gamma':
'tf.compat.v1.distributions.Gamma',
'tf.distributions.Laplace':
'tf.compat.v1.distributions.Laplace',
'tf.distributions.Multinomial':
'tf.compat.v1.distributions.Multinomial',
'tf.distributions.NOT_REPARAMETERIZED':
'tf.compat.v1.distributions.NOT_REPARAMETERIZED',
'tf.distributions.Normal':
'tf.compat.v1.distributions.Normal',
'tf.distributions.RegisterKL':
'tf.compat.v1.distributions.RegisterKL',
'tf.distributions.ReparameterizationType':
'tf.compat.v1.distributions.ReparameterizationType',
'tf.distributions.StudentT':
'tf.compat.v1.distributions.StudentT',
'tf.distributions.Uniform':
'tf.compat.v1.distributions.Uniform',
'tf.distributions.kl_divergence':
'tf.compat.v1.distributions.kl_divergence',
'tf.div':
'tf.compat.v1.div',
'tf.div_no_nan':
'tf.math.divide_no_nan',
'tf.dtypes.as_string':
'tf.strings.as_string',
'tf.enable_control_flow_v2':
'tf.compat.v1.enable_control_flow_v2',
'tf.enable_eager_execution':
'tf.compat.v1.enable_eager_execution',
'tf.enable_resource_variables':
'tf.compat.v1.enable_resource_variables',
'tf.enable_tensor_equality':
'tf.compat.v1.enable_tensor_equality',
'tf.enable_v2_behavior':
'tf.compat.v1.enable_v2_behavior',
'tf.enable_v2_tensorshape':
'tf.compat.v1.enable_v2_tensorshape',
'tf.encode_base64':
'tf.io.encode_base64',
'tf.erf':
'tf.math.erf',
'tf.erfc':
'tf.math.erfc',
'tf.estimator.experimental.KMeans':
'tf.compat.v1.estimator.experimental.KMeans',
'tf.estimator.experimental.dnn_logit_fn_builder':
'tf.compat.v1.estimator.experimental.dnn_logit_fn_builder',
'tf.estimator.experimental.linear_logit_fn_builder':
'tf.compat.v1.estimator.experimental.linear_logit_fn_builder',
'tf.estimator.inputs.numpy_input_fn':
'tf.compat.v1.estimator.inputs.numpy_input_fn',
'tf.estimator.inputs.pandas_input_fn':
'tf.compat.v1.estimator.inputs.pandas_input_fn',
'tf.estimator.tpu.InputPipelineConfig':
'tf.compat.v1.estimator.tpu.InputPipelineConfig',
'tf.estimator.tpu.RunConfig':
'tf.compat.v1.estimator.tpu.RunConfig',
'tf.estimator.tpu.TPUConfig':
'tf.compat.v1.estimator.tpu.TPUConfig',
'tf.estimator.tpu.TPUEstimator':
'tf.compat.v1.estimator.tpu.TPUEstimator',
'tf.estimator.tpu.TPUEstimatorSpec':
'tf.compat.v1.estimator.tpu.TPUEstimatorSpec',
'tf.estimator.tpu.experimental.EmbeddingConfigSpec':
'tf.compat.v1.estimator.tpu.experimental.EmbeddingConfigSpec',
'tf.executing_eagerly_outside_functions':
'tf.compat.v1.executing_eagerly_outside_functions',
'tf.experimental.output_all_intermediates':
'tf.compat.v1.experimental.output_all_intermediates',
'tf.expm1':
'tf.math.expm1',
'tf.fake_quant_with_min_max_args':
'tf.quantization.fake_quant_with_min_max_args',
'tf.fake_quant_with_min_max_args_gradient':
'tf.quantization.fake_quant_with_min_max_args_gradient',
'tf.fake_quant_with_min_max_vars':
'tf.quantization.fake_quant_with_min_max_vars',
'tf.fake_quant_with_min_max_vars_gradient':
'tf.quantization.fake_quant_with_min_max_vars_gradient',
'tf.fake_quant_with_min_max_vars_per_channel':
'tf.quantization.fake_quant_with_min_max_vars_per_channel',
'tf.fake_quant_with_min_max_vars_per_channel_gradient':
'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient',
'tf.feature_column.input_layer':
'tf.compat.v1.feature_column.input_layer',
'tf.feature_column.linear_model':
'tf.compat.v1.feature_column.linear_model',
'tf.feature_column.shared_embedding_columns':
'tf.compat.v1.feature_column.shared_embedding_columns',
'tf.fft':
'tf.signal.fft',
'tf.fft2d':
'tf.signal.fft2d',
'tf.fft3d':
'tf.signal.fft3d',
'tf.fixed_size_partitioner':
'tf.compat.v1.fixed_size_partitioner',
'tf.floordiv':
'tf.math.floordiv',
'tf.floormod':
'tf.math.floormod',
'tf.get_collection':
'tf.compat.v1.get_collection',
'tf.get_collection_ref':
'tf.compat.v1.get_collection_ref',
'tf.get_default_graph':
'tf.compat.v1.get_default_graph',
'tf.get_default_session':
'tf.compat.v1.get_default_session',
'tf.get_local_variable':
'tf.compat.v1.get_local_variable',
'tf.get_seed':
'tf.compat.v1.get_seed',
'tf.get_session_handle':
'tf.compat.v1.get_session_handle',
'tf.get_session_tensor':
'tf.compat.v1.get_session_tensor',
'tf.get_variable':
'tf.compat.v1.get_variable',
'tf.get_variable_scope':
'tf.compat.v1.get_variable_scope',
'tf.gfile.FastGFile':
'tf.compat.v1.gfile.FastGFile',
'tf.global_norm':
'tf.linalg.global_norm',
'tf.global_variables':
'tf.compat.v1.global_variables',
'tf.global_variables_initializer':
'tf.compat.v1.global_variables_initializer',
'tf.graph_util.convert_variables_to_constants':
'tf.compat.v1.graph_util.convert_variables_to_constants',
'tf.graph_util.extract_sub_graph':
'tf.compat.v1.graph_util.extract_sub_graph',
'tf.graph_util.must_run_on_cpu':
'tf.compat.v1.graph_util.must_run_on_cpu',
'tf.graph_util.remove_training_nodes':
'tf.compat.v1.graph_util.remove_training_nodes',
'tf.graph_util.tensor_shape_from_node_def_name':
'tf.compat.v1.graph_util.tensor_shape_from_node_def_name',
'tf.ifft':
'tf.signal.ifft',
'tf.ifft2d':
'tf.signal.ifft2d',
'tf.ifft3d':
'tf.signal.ifft3d',
'tf.igamma':
'tf.math.igamma',
'tf.igammac':
'tf.math.igammac',
'tf.imag':
'tf.math.imag',
'tf.image.resize_area':
'tf.compat.v1.image.resize_area',
'tf.image.resize_bicubic':
'tf.compat.v1.image.resize_bicubic',
'tf.image.resize_bilinear':
'tf.compat.v1.image.resize_bilinear',
'tf.image.resize_image_with_crop_or_pad':
'tf.image.resize_with_crop_or_pad',
'tf.image.resize_image_with_pad':
'tf.compat.v1.image.resize_image_with_pad',
'tf.image.resize_nearest_neighbor':
'tf.compat.v1.image.resize_nearest_neighbor',
'tf.image.transpose_image':
'tf.image.transpose',
'tf.initialize_all_tables':
'tf.compat.v1.initialize_all_tables',
'tf.initialize_all_variables':
'tf.compat.v1.initialize_all_variables',
'tf.initialize_local_variables':
'tf.compat.v1.initialize_local_variables',
'tf.initialize_variables':
'tf.compat.v1.initialize_variables',
'tf.initializers.global_variables':
'tf.compat.v1.initializers.global_variables',
'tf.initializers.local_variables':
'tf.compat.v1.initializers.local_variables',
'tf.initializers.tables_initializer':
'tf.compat.v1.initializers.tables_initializer',
'tf.initializers.uniform_unit_scaling':
'tf.compat.v1.initializers.uniform_unit_scaling',
'tf.initializers.variables':
'tf.compat.v1.initializers.variables',
'tf.invert_permutation':
'tf.math.invert_permutation',
'tf.io.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.io.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.io.QueueBase':
'tf.queue.QueueBase',
'tf.io.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.io.TFRecordCompressionType':
'tf.compat.v1.io.TFRecordCompressionType',
'tf.io.tf_record_iterator':
'tf.compat.v1.io.tf_record_iterator',
'tf.is_finite':
'tf.math.is_finite',
'tf.is_inf':
'tf.math.is_inf',
'tf.is_nan':
'tf.math.is_nan',
'tf.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.is_numeric_tensor':
'tf.debugging.is_numeric_tensor',
'tf.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.is_variable_initialized':
'tf.compat.v1.is_variable_initialized',
'tf.keras.backend.get_session':
'tf.compat.v1.keras.backend.get_session',
'tf.keras.backend.set_session':
'tf.compat.v1.keras.backend.set_session',
'tf.keras.experimental.export_saved_model':
'tf.compat.v1.keras.experimental.export_saved_model',
'tf.keras.experimental.load_from_saved_model':
'tf.compat.v1.keras.experimental.load_from_saved_model',
'tf.keras.layers.CuDNNGRU':
'tf.compat.v1.keras.layers.CuDNNGRU',
'tf.keras.layers.CuDNNLSTM':
'tf.compat.v1.keras.layers.CuDNNLSTM',
'tf.keras.losses.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.losses.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.layers.AveragePooling1D':
'tf.compat.v1.layers.AveragePooling1D',
'tf.layers.AveragePooling2D':
'tf.compat.v1.layers.AveragePooling2D',
'tf.layers.AveragePooling3D':
'tf.compat.v1.layers.AveragePooling3D',
'tf.layers.BatchNormalization':
'tf.compat.v1.layers.BatchNormalization',
'tf.layers.Conv1D':
'tf.compat.v1.layers.Conv1D',
'tf.layers.Conv2D':
'tf.compat.v1.layers.Conv2D',
'tf.layers.Conv2DTranspose':
'tf.compat.v1.layers.Conv2DTranspose',
'tf.layers.Conv3D':
'tf.compat.v1.layers.Conv3D',
'tf.layers.Conv3DTranspose':
'tf.compat.v1.layers.Conv3DTranspose',
'tf.layers.Dense':
'tf.compat.v1.layers.Dense',
'tf.layers.Dropout':
'tf.compat.v1.layers.Dropout',
'tf.layers.Flatten':
'tf.compat.v1.layers.Flatten',
'tf.layers.InputSpec':
'tf.keras.layers.InputSpec',
'tf.layers.Layer':
'tf.compat.v1.layers.Layer',
'tf.layers.MaxPooling1D':
'tf.compat.v1.layers.MaxPooling1D',
'tf.layers.MaxPooling2D':
'tf.compat.v1.layers.MaxPooling2D',
'tf.layers.MaxPooling3D':
'tf.compat.v1.layers.MaxPooling3D',
'tf.layers.SeparableConv1D':
'tf.compat.v1.layers.SeparableConv1D',
'tf.layers.SeparableConv2D':
'tf.compat.v1.layers.SeparableConv2D',
'tf.layers.average_pooling1d':
'tf.compat.v1.layers.average_pooling1d',
'tf.layers.average_pooling2d':
'tf.compat.v1.layers.average_pooling2d',
'tf.layers.average_pooling3d':
'tf.compat.v1.layers.average_pooling3d',
'tf.layers.batch_normalization':
'tf.compat.v1.layers.batch_normalization',
'tf.layers.conv1d':
'tf.compat.v1.layers.conv1d',
'tf.layers.conv2d':
'tf.compat.v1.layers.conv2d',
'tf.layers.conv2d_transpose':
'tf.compat.v1.layers.conv2d_transpose',
'tf.layers.conv3d':
'tf.compat.v1.layers.conv3d',
'tf.layers.conv3d_transpose':
'tf.compat.v1.layers.conv3d_transpose',
'tf.layers.dense':
'tf.compat.v1.layers.dense',
'tf.layers.dropout':
'tf.compat.v1.layers.dropout',
'tf.layers.experimental.keras_style_scope':
'tf.compat.v1.layers.experimental.keras_style_scope',
'tf.layers.experimental.set_keras_style':
'tf.compat.v1.layers.experimental.set_keras_style',
'tf.layers.flatten':
'tf.compat.v1.layers.flatten',
'tf.layers.max_pooling1d':
'tf.compat.v1.layers.max_pooling1d',
'tf.layers.max_pooling2d':
'tf.compat.v1.layers.max_pooling2d',
'tf.layers.max_pooling3d':
'tf.compat.v1.layers.max_pooling3d',
'tf.layers.separable_conv1d':
'tf.compat.v1.layers.separable_conv1d',
'tf.layers.separable_conv2d':
'tf.compat.v1.layers.separable_conv2d',
'tf.lbeta':
'tf.math.lbeta',
'tf.lgamma':
'tf.math.lgamma',
'tf.lin_space':
'tf.linspace',
'tf.linalg.transpose':
'tf.linalg.matrix_transpose',
'tf.lite.OpHint':
'tf.compat.v1.lite.OpHint',
'tf.lite.TocoConverter':
'tf.compat.v1.lite.TocoConverter',
'tf.lite.constants.FLOAT16':
'tf.compat.v1.lite.constants.FLOAT16',
'tf.lite.constants.GRAPHVIZ_DOT':
'tf.compat.v1.lite.constants.GRAPHVIZ_DOT',
'tf.lite.constants.INT8':
'tf.compat.v1.lite.constants.INT8',
'tf.lite.constants.TFLITE':
'tf.compat.v1.lite.constants.TFLITE',
'tf.lite.experimental.convert_op_hints_to_stubs':
'tf.compat.v1.lite.experimental.convert_op_hints_to_stubs',
'tf.lite.experimental.get_potentially_supported_ops':
'tf.compat.v1.lite.experimental.get_potentially_supported_ops',
'tf.lite.experimental.nn.TFLiteLSTMCell':
'tf.compat.v1.lite.experimental.nn.TFLiteLSTMCell',
'tf.lite.experimental.nn.TfLiteRNNCell':
'tf.compat.v1.lite.experimental.nn.TfLiteRNNCell',
'tf.lite.experimental.nn.dynamic_rnn':
'tf.compat.v1.lite.experimental.nn.dynamic_rnn',
'tf.lite.toco_convert':
'tf.compat.v1.lite.toco_convert',
'tf.local_variables':
'tf.compat.v1.local_variables',
'tf.local_variables_initializer':
'tf.compat.v1.local_variables_initializer',
'tf.log':
'tf.math.log',
'tf.log1p':
'tf.math.log1p',
'tf.log_sigmoid':
'tf.math.log_sigmoid',
'tf.logging.DEBUG':
'tf.compat.v1.logging.DEBUG',
'tf.logging.ERROR':
'tf.compat.v1.logging.ERROR',
'tf.logging.FATAL':
'tf.compat.v1.logging.FATAL',
'tf.logging.INFO':
'tf.compat.v1.logging.INFO',
'tf.logging.TaskLevelStatusMessage':
'tf.compat.v1.logging.TaskLevelStatusMessage',
'tf.logging.WARN':
'tf.compat.v1.logging.WARN',
'tf.logging.debug':
'tf.compat.v1.logging.debug',
'tf.logging.error':
'tf.compat.v1.logging.error',
'tf.logging.fatal':
'tf.compat.v1.logging.fatal',
'tf.logging.flush':
'tf.compat.v1.logging.flush',
'tf.logging.get_verbosity':
'tf.compat.v1.logging.get_verbosity',
'tf.logging.info':
'tf.compat.v1.logging.info',
'tf.logging.log':
'tf.compat.v1.logging.log',
'tf.logging.log_every_n':
'tf.compat.v1.logging.log_every_n',
'tf.logging.log_first_n':
'tf.compat.v1.logging.log_first_n',
'tf.logging.log_if':
'tf.compat.v1.logging.log_if',
'tf.logging.set_verbosity':
'tf.compat.v1.logging.set_verbosity',
'tf.logging.vlog':
'tf.compat.v1.logging.vlog',
'tf.logging.warn':
'tf.compat.v1.logging.warn',
'tf.logging.warning':
'tf.compat.v1.logging.warning',
'tf.logical_xor':
'tf.math.logical_xor',
'tf.losses.Reduction':
'tf.compat.v1.losses.Reduction',
'tf.losses.absolute_difference':
'tf.compat.v1.losses.absolute_difference',
'tf.losses.add_loss':
'tf.compat.v1.losses.add_loss',
'tf.losses.compute_weighted_loss':
'tf.compat.v1.losses.compute_weighted_loss',
'tf.losses.cosine_distance':
'tf.compat.v1.losses.cosine_distance',
'tf.losses.get_losses':
'tf.compat.v1.losses.get_losses',
'tf.losses.get_regularization_loss':
'tf.compat.v1.losses.get_regularization_loss',
'tf.losses.get_regularization_losses':
'tf.compat.v1.losses.get_regularization_losses',
'tf.losses.get_total_loss':
'tf.compat.v1.losses.get_total_loss',
'tf.losses.hinge_loss':
'tf.compat.v1.losses.hinge_loss',
'tf.losses.huber_loss':
'tf.compat.v1.losses.huber_loss',
'tf.losses.log_loss':
'tf.compat.v1.losses.log_loss',
'tf.losses.mean_pairwise_squared_error':
'tf.compat.v1.losses.mean_pairwise_squared_error',
'tf.losses.mean_squared_error':
'tf.compat.v1.losses.mean_squared_error',
'tf.losses.sigmoid_cross_entropy':
'tf.compat.v1.losses.sigmoid_cross_entropy',
'tf.losses.softmax_cross_entropy':
'tf.compat.v1.losses.softmax_cross_entropy',
'tf.losses.sparse_softmax_cross_entropy':
'tf.compat.v1.losses.sparse_softmax_cross_entropy',
'tf.make_template':
'tf.compat.v1.make_template',
'tf.manip.gather_nd':
'tf.compat.v1.manip.gather_nd',
'tf.manip.reshape':
'tf.reshape',
'tf.manip.reverse':
'tf.reverse',
'tf.manip.roll':
'tf.roll',
'tf.manip.scatter_nd':
'tf.scatter_nd',
'tf.manip.space_to_batch_nd':
'tf.space_to_batch_nd',
'tf.manip.tile':
'tf.tile',
'tf.matching_files':
'tf.io.matching_files',
'tf.matrix_band_part':
'tf.linalg.band_part',
'tf.matrix_determinant':
'tf.linalg.det',
'tf.matrix_diag':
'tf.linalg.diag',
'tf.matrix_diag_part':
'tf.linalg.diag_part',
'tf.matrix_inverse':
'tf.linalg.inv',
'tf.matrix_set_diag':
'tf.linalg.set_diag',
'tf.matrix_solve':
'tf.linalg.solve',
'tf.matrix_solve_ls':
'tf.linalg.lstsq',
'tf.matrix_transpose':
'tf.linalg.matrix_transpose',
'tf.matrix_triangular_solve':
'tf.linalg.triangular_solve',
'tf.metrics.accuracy':
'tf.compat.v1.metrics.accuracy',
'tf.metrics.auc':
'tf.compat.v1.metrics.auc',
'tf.metrics.average_precision_at_k':
'tf.compat.v1.metrics.average_precision_at_k',
'tf.metrics.false_negatives':
'tf.compat.v1.metrics.false_negatives',
'tf.metrics.false_negatives_at_thresholds':
'tf.compat.v1.metrics.false_negatives_at_thresholds',
'tf.metrics.false_positives':
'tf.compat.v1.metrics.false_positives',
'tf.metrics.false_positives_at_thresholds':
'tf.compat.v1.metrics.false_positives_at_thresholds',
'tf.metrics.mean':
'tf.compat.v1.metrics.mean',
'tf.metrics.mean_absolute_error':
'tf.compat.v1.metrics.mean_absolute_error',
'tf.metrics.mean_cosine_distance':
'tf.compat.v1.metrics.mean_cosine_distance',
'tf.metrics.mean_iou':
'tf.compat.v1.metrics.mean_iou',
'tf.metrics.mean_per_class_accuracy':
'tf.compat.v1.metrics.mean_per_class_accuracy',
'tf.metrics.mean_relative_error':
'tf.compat.v1.metrics.mean_relative_error',
'tf.metrics.mean_squared_error':
'tf.compat.v1.metrics.mean_squared_error',
'tf.metrics.mean_tensor':
'tf.compat.v1.metrics.mean_tensor',
'tf.metrics.percentage_below':
'tf.compat.v1.metrics.percentage_below',
'tf.metrics.precision':
'tf.compat.v1.metrics.precision',
'tf.metrics.precision_at_k':
'tf.compat.v1.metrics.precision_at_k',
'tf.metrics.precision_at_thresholds':
'tf.compat.v1.metrics.precision_at_thresholds',
'tf.metrics.precision_at_top_k':
'tf.compat.v1.metrics.precision_at_top_k',
'tf.metrics.recall':
'tf.compat.v1.metrics.recall',
'tf.metrics.recall_at_k':
'tf.compat.v1.metrics.recall_at_k',
'tf.metrics.recall_at_thresholds':
'tf.compat.v1.metrics.recall_at_thresholds',
'tf.metrics.recall_at_top_k':
'tf.compat.v1.metrics.recall_at_top_k',
'tf.metrics.root_mean_squared_error':
'tf.compat.v1.metrics.root_mean_squared_error',
'tf.metrics.sensitivity_at_specificity':
'tf.compat.v1.metrics.sensitivity_at_specificity',
'tf.metrics.sparse_average_precision_at_k':
'tf.compat.v1.metrics.sparse_average_precision_at_k',
'tf.metrics.sparse_precision_at_k':
'tf.compat.v1.metrics.sparse_precision_at_k',
'tf.metrics.specificity_at_sensitivity':
'tf.compat.v1.metrics.specificity_at_sensitivity',
'tf.metrics.true_negatives':
'tf.compat.v1.metrics.true_negatives',
'tf.metrics.true_negatives_at_thresholds':
'tf.compat.v1.metrics.true_negatives_at_thresholds',
'tf.metrics.true_positives':
'tf.compat.v1.metrics.true_positives',
'tf.metrics.true_positives_at_thresholds':
'tf.compat.v1.metrics.true_positives_at_thresholds',
'tf.min_max_variable_partitioner':
'tf.compat.v1.min_max_variable_partitioner',
'tf.mod':
'tf.math.floormod',
'tf.model_variables':
'tf.compat.v1.model_variables',
'tf.moving_average_variables':
'tf.compat.v1.moving_average_variables',
'tf.nn.avg_pool_v2':
'tf.nn.avg_pool',
'tf.nn.bidirectional_dynamic_rnn':
'tf.compat.v1.nn.bidirectional_dynamic_rnn',
'tf.nn.conv2d_backprop_filter':
'tf.compat.v1.nn.conv2d_backprop_filter',
'tf.nn.conv3d_backprop_filter':
'tf.compat.v1.nn.conv3d_backprop_filter',
'tf.nn.conv3d_backprop_filter_v2':
'tf.compat.v1.nn.conv3d_backprop_filter_v2',
'tf.nn.ctc_beam_search_decoder_v2':
'tf.nn.ctc_beam_search_decoder',
'tf.nn.ctc_loss_v2':
'tf.compat.v1.nn.ctc_loss_v2',
'tf.nn.depthwise_conv2d_native':
'tf.compat.v1.nn.depthwise_conv2d_native',
'tf.nn.depthwise_conv2d_native_backprop_filter':
'tf.nn.depthwise_conv2d_backprop_filter',
'tf.nn.depthwise_conv2d_native_backprop_input':
'tf.nn.depthwise_conv2d_backprop_input',
'tf.nn.dynamic_rnn':
'tf.compat.v1.nn.dynamic_rnn',
'tf.nn.log_uniform_candidate_sampler':
'tf.random.log_uniform_candidate_sampler',
'tf.nn.max_pool_v2':
'tf.nn.max_pool',
'tf.nn.quantized_avg_pool':
'tf.compat.v1.nn.quantized_avg_pool',
'tf.nn.quantized_conv2d':
'tf.compat.v1.nn.quantized_conv2d',
'tf.nn.quantized_max_pool':
'tf.compat.v1.nn.quantized_max_pool',
'tf.nn.quantized_relu_x':
'tf.compat.v1.nn.quantized_relu_x',
'tf.nn.raw_rnn':
'tf.compat.v1.nn.raw_rnn',
'tf.nn.relu_layer':
'tf.compat.v1.nn.relu_layer',
'tf.nn.rnn_cell.BasicLSTMCell':
'tf.compat.v1.nn.rnn_cell.BasicLSTMCell',
'tf.nn.rnn_cell.BasicRNNCell':
'tf.compat.v1.nn.rnn_cell.BasicRNNCell',
'tf.nn.rnn_cell.DeviceWrapper':
'tf.compat.v1.nn.rnn_cell.DeviceWrapper',
'tf.nn.rnn_cell.DropoutWrapper':
'tf.compat.v1.nn.rnn_cell.DropoutWrapper',
'tf.nn.rnn_cell.GRUCell':
'tf.compat.v1.nn.rnn_cell.GRUCell',
'tf.nn.rnn_cell.LSTMCell':
'tf.compat.v1.nn.rnn_cell.LSTMCell',
'tf.nn.rnn_cell.LSTMStateTuple':
'tf.compat.v1.nn.rnn_cell.LSTMStateTuple',
'tf.nn.rnn_cell.MultiRNNCell':
'tf.compat.v1.nn.rnn_cell.MultiRNNCell',
'tf.nn.rnn_cell.RNNCell':
'tf.compat.v1.nn.rnn_cell.RNNCell',
'tf.nn.rnn_cell.ResidualWrapper':
'tf.compat.v1.nn.rnn_cell.ResidualWrapper',
'tf.nn.static_bidirectional_rnn':
'tf.compat.v1.nn.static_bidirectional_rnn',
'tf.nn.static_rnn':
'tf.compat.v1.nn.static_rnn',
'tf.nn.static_state_saving_rnn':
'tf.compat.v1.nn.static_state_saving_rnn',
'tf.nn.uniform_candidate_sampler':
'tf.random.uniform_candidate_sampler',
'tf.nn.xw_plus_b':
'tf.compat.v1.nn.xw_plus_b',
'tf.no_regularizer':
'tf.compat.v1.no_regularizer',
'tf.op_scope':
'tf.compat.v1.op_scope',
'tf.parse_single_sequence_example':
'tf.io.parse_single_sequence_example',
'tf.parse_tensor':
'tf.io.parse_tensor',
'tf.placeholder':
'tf.compat.v1.placeholder',
'tf.placeholder_with_default':
'tf.compat.v1.placeholder_with_default',
'tf.polygamma':
'tf.math.polygamma',
'tf.profiler.AdviceProto':
'tf.compat.v1.profiler.AdviceProto',
'tf.profiler.GraphNodeProto':
'tf.compat.v1.profiler.GraphNodeProto',
'tf.profiler.MultiGraphNodeProto':
'tf.compat.v1.profiler.MultiGraphNodeProto',
'tf.profiler.OpLogProto':
'tf.compat.v1.profiler.OpLogProto',
'tf.profiler.ProfileOptionBuilder':
'tf.compat.v1.profiler.ProfileOptionBuilder',
'tf.profiler.Profiler':
'tf.compat.v1.profiler.Profiler',
'tf.profiler.advise':
'tf.compat.v1.profiler.advise',
'tf.profiler.profile':
'tf.compat.v1.profiler.profile',
'tf.profiler.write_op_log':
'tf.compat.v1.profiler.write_op_log',
'tf.py_func':
'tf.compat.v1.py_func',
'tf.python_io.TFRecordCompressionType':
'tf.compat.v1.python_io.TFRecordCompressionType',
'tf.python_io.TFRecordOptions':
'tf.io.TFRecordOptions',
'tf.python_io.TFRecordWriter':
'tf.io.TFRecordWriter',
'tf.python_io.tf_record_iterator':
'tf.compat.v1.python_io.tf_record_iterator',
'tf.qr':
'tf.linalg.qr',
'tf.quantize':
'tf.quantization.quantize',
'tf.quantized_concat':
'tf.quantization.quantized_concat',
'tf.ragged.RaggedTensorValue':
'tf.compat.v1.ragged.RaggedTensorValue',
'tf.ragged.constant_value':
'tf.compat.v1.ragged.constant_value',
'tf.ragged.placeholder':
'tf.compat.v1.ragged.placeholder',
'tf.random.get_seed':
'tf.compat.v1.random.get_seed',
'tf.random.set_random_seed':
'tf.compat.v1.random.set_random_seed',
'tf.random_crop':
'tf.image.random_crop',
'tf.random_gamma':
'tf.random.gamma',
'tf.random_normal':
'tf.random.normal',
'tf.random_shuffle':
'tf.random.shuffle',
'tf.random_uniform':
'tf.random.uniform',
'tf.read_file':
'tf.io.read_file',
'tf.real':
'tf.math.real',
'tf.reciprocal':
'tf.math.reciprocal',
'tf.regex_replace':
'tf.strings.regex_replace',
'tf.report_uninitialized_variables':
'tf.compat.v1.report_uninitialized_variables',
'tf.reset_default_graph':
'tf.compat.v1.reset_default_graph',
'tf.resource_loader.get_data_files_path':
'tf.compat.v1.resource_loader.get_data_files_path',
'tf.resource_loader.get_path_to_datafile':
'tf.compat.v1.resource_loader.get_path_to_datafile',
'tf.resource_loader.get_root_dir_with_all_resources':
'tf.compat.v1.resource_loader.get_root_dir_with_all_resources',
'tf.resource_loader.load_resource':
'tf.compat.v1.resource_loader.load_resource',
'tf.resource_loader.readahead_file_path':
'tf.compat.v1.resource_loader.readahead_file_path',
'tf.resource_variables_enabled':
'tf.compat.v1.resource_variables_enabled',
'tf.reverse_v2':
'tf.reverse',
'tf.rint':
'tf.math.rint',
'tf.rsqrt':
'tf.math.rsqrt',
'tf.saved_model.Builder':
'tf.compat.v1.saved_model.Builder',
'tf.saved_model.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.LEGACY_INIT_OP_KEY',
'tf.saved_model.MAIN_OP_KEY':
'tf.compat.v1.saved_model.MAIN_OP_KEY',
'tf.saved_model.build_signature_def':
'tf.compat.v1.saved_model.build_signature_def',
'tf.saved_model.build_tensor_info':
'tf.compat.v1.saved_model.build_tensor_info',
'tf.saved_model.builder.SavedModelBuilder':
'tf.compat.v1.saved_model.builder.SavedModelBuilder',
'tf.saved_model.classification_signature_def':
'tf.compat.v1.saved_model.classification_signature_def',
'tf.saved_model.constants.ASSETS_DIRECTORY':
'tf.saved_model.ASSETS_DIRECTORY',
'tf.saved_model.constants.ASSETS_KEY':
'tf.saved_model.ASSETS_KEY',
'tf.saved_model.constants.DEBUG_DIRECTORY':
'tf.saved_model.DEBUG_DIRECTORY',
'tf.saved_model.constants.DEBUG_INFO_FILENAME_PB':
'tf.saved_model.DEBUG_INFO_FILENAME_PB',
'tf.saved_model.constants.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.constants.LEGACY_INIT_OP_KEY',
'tf.saved_model.constants.MAIN_OP_KEY':
'tf.compat.v1.saved_model.constants.MAIN_OP_KEY',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PB':
'tf.saved_model.SAVED_MODEL_FILENAME_PB',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PBTXT':
'tf.saved_model.SAVED_MODEL_FILENAME_PBTXT',
'tf.saved_model.constants.SAVED_MODEL_SCHEMA_VERSION':
'tf.saved_model.SAVED_MODEL_SCHEMA_VERSION',
'tf.saved_model.constants.VARIABLES_DIRECTORY':
'tf.saved_model.VARIABLES_DIRECTORY',
'tf.saved_model.constants.VARIABLES_FILENAME':
'tf.saved_model.VARIABLES_FILENAME',
'tf.saved_model.experimental.save':
'tf.saved_model.save',
'tf.saved_model.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.get_tensor_from_tensor_info',
'tf.saved_model.is_valid_signature':
'tf.compat.v1.saved_model.is_valid_signature',
'tf.saved_model.loader.load':
'tf.compat.v1.saved_model.loader.load',
'tf.saved_model.loader.maybe_saved_model_directory':
'tf.compat.v1.saved_model.loader.maybe_saved_model_directory',
'tf.saved_model.main_op.main_op':
'tf.compat.v1.saved_model.main_op.main_op',
'tf.saved_model.main_op.main_op_with_restore':
'tf.compat.v1.saved_model.main_op.main_op_with_restore',
'tf.saved_model.main_op_with_restore':
'tf.compat.v1.saved_model.main_op_with_restore',
'tf.saved_model.maybe_saved_model_directory':
'tf.compat.v1.saved_model.maybe_saved_model_directory',
'tf.saved_model.predict_signature_def':
'tf.compat.v1.saved_model.predict_signature_def',
'tf.saved_model.regression_signature_def':
'tf.compat.v1.saved_model.regression_signature_def',
'tf.saved_model.signature_constants.CLASSIFY_INPUTS':
'tf.saved_model.CLASSIFY_INPUTS',
'tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME':
'tf.saved_model.CLASSIFY_METHOD_NAME',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES':
'tf.saved_model.CLASSIFY_OUTPUT_CLASSES',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES':
'tf.saved_model.CLASSIFY_OUTPUT_SCORES',
'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY':
'tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY',
'tf.saved_model.signature_constants.PREDICT_INPUTS':
'tf.saved_model.PREDICT_INPUTS',
'tf.saved_model.signature_constants.PREDICT_METHOD_NAME':
'tf.saved_model.PREDICT_METHOD_NAME',
'tf.saved_model.signature_constants.PREDICT_OUTPUTS':
'tf.saved_model.PREDICT_OUTPUTS',
'tf.saved_model.signature_constants.REGRESS_INPUTS':
'tf.saved_model.REGRESS_INPUTS',
'tf.saved_model.signature_constants.REGRESS_METHOD_NAME':
'tf.saved_model.REGRESS_METHOD_NAME',
'tf.saved_model.signature_constants.REGRESS_OUTPUTS':
'tf.saved_model.REGRESS_OUTPUTS',
'tf.saved_model.signature_def_utils.MethodNameUpdater':
'tf.compat.v1.saved_model.signature_def_utils.MethodNameUpdater',
'tf.saved_model.signature_def_utils.build_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.build_signature_def',
'tf.saved_model.signature_def_utils.classification_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.classification_signature_def',
'tf.saved_model.signature_def_utils.is_valid_signature':
'tf.compat.v1.saved_model.signature_def_utils.is_valid_signature',
'tf.saved_model.signature_def_utils.predict_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.predict_signature_def',
'tf.saved_model.signature_def_utils.regression_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.regression_signature_def',
'tf.saved_model.simple_save':
'tf.compat.v1.saved_model.simple_save',
'tf.saved_model.tag_constants.GPU':
'tf.saved_model.GPU',
'tf.saved_model.tag_constants.SERVING':
'tf.saved_model.SERVING',
'tf.saved_model.tag_constants.TPU':
'tf.saved_model.TPU',
'tf.saved_model.tag_constants.TRAINING':
'tf.saved_model.TRAINING',
'tf.saved_model.utils.build_tensor_info':
'tf.compat.v1.saved_model.utils.build_tensor_info',
'tf.saved_model.utils.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info',
'tf.scatter_add':
'tf.compat.v1.scatter_add',
'tf.scatter_div':
'tf.compat.v1.scatter_div',
'tf.scatter_max':
'tf.compat.v1.scatter_max',
'tf.scatter_min':
'tf.compat.v1.scatter_min',
'tf.scatter_mul':
'tf.compat.v1.scatter_mul',
'tf.scatter_nd_add':
'tf.compat.v1.scatter_nd_add',
'tf.scatter_nd_sub':
'tf.compat.v1.scatter_nd_sub',
'tf.scatter_nd_update':
'tf.compat.v1.scatter_nd_update',
'tf.scatter_sub':
'tf.compat.v1.scatter_sub',
'tf.scatter_update':
'tf.compat.v1.scatter_update',
'tf.segment_max':
'tf.math.segment_max',
'tf.segment_mean':
'tf.math.segment_mean',
'tf.segment_min':
'tf.math.segment_min',
'tf.segment_prod':
'tf.math.segment_prod',
'tf.segment_sum':
'tf.math.segment_sum',
'tf.self_adjoint_eig':
'tf.linalg.eigh',
'tf.self_adjoint_eigvals':
'tf.linalg.eigvalsh',
'tf.serialize_many_sparse':
'tf.compat.v1.serialize_many_sparse',
'tf.serialize_sparse':
'tf.compat.v1.serialize_sparse',
'tf.serialize_tensor':
'tf.io.serialize_tensor',
'tf.set_random_seed':
'tf.compat.v1.set_random_seed',
'tf.setdiff1d':
'tf.compat.v1.setdiff1d',
'tf.sets.set_difference':
'tf.sets.difference',
'tf.sets.set_intersection':
'tf.sets.intersection',
'tf.sets.set_size':
'tf.sets.size',
'tf.sets.set_union':
'tf.sets.union',
'tf.space_to_depth':
'tf.compat.v1.space_to_depth',
'tf.sparse.SparseConditionalAccumulator':
'tf.compat.v1.sparse.SparseConditionalAccumulator',
'tf.sparse.matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse.merge':
'tf.compat.v1.sparse.merge',
'tf.sparse.placeholder':
'tf.compat.v1.sparse.placeholder',
'tf.sparse.reduce_max_sparse':
'tf.compat.v1.sparse.reduce_max_sparse',
'tf.sparse.reduce_sum_sparse':
'tf.compat.v1.sparse.reduce_sum_sparse',
'tf.sparse_fill_empty_rows':
'tf.sparse.fill_empty_rows',
'tf.sparse_mask':
'tf.sparse.mask',
'tf.sparse_maximum':
'tf.sparse.maximum',
'tf.sparse_merge':
'tf.compat.v1.sparse_merge',
'tf.sparse_minimum':
'tf.sparse.minimum',
'tf.sparse_placeholder':
'tf.compat.v1.sparse_placeholder',
'tf.sparse_reduce_max_sparse':
'tf.compat.v1.sparse_reduce_max_sparse',
'tf.sparse_reduce_sum_sparse':
'tf.compat.v1.sparse_reduce_sum_sparse',
'tf.sparse_reorder':
'tf.sparse.reorder',
'tf.sparse_reset_shape':
'tf.sparse.reset_shape',
'tf.sparse_reshape':
'tf.sparse.reshape',
'tf.sparse_retain':
'tf.sparse.retain',
'tf.sparse_segment_mean':
'tf.compat.v1.sparse_segment_mean',
'tf.sparse_segment_sqrt_n':
'tf.compat.v1.sparse_segment_sqrt_n',
'tf.sparse_segment_sum':
'tf.compat.v1.sparse_segment_sum',
'tf.sparse_slice':
'tf.sparse.slice',
'tf.sparse_softmax':
'tf.sparse.softmax',
'tf.sparse_tensor_dense_matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse_tensor_to_dense':
'tf.sparse.to_dense',
'tf.sparse_to_dense':
'tf.compat.v1.sparse_to_dense',
'tf.sparse_to_indicator':
'tf.sparse.to_indicator',
'tf.sparse_transpose':
'tf.sparse.transpose',
'tf.spectral.dct':
'tf.signal.dct',
'tf.spectral.fft':
'tf.signal.fft',
'tf.spectral.fft2d':
'tf.signal.fft2d',
'tf.spectral.fft3d':
'tf.signal.fft3d',
'tf.spectral.idct':
'tf.signal.idct',
'tf.spectral.ifft':
'tf.signal.ifft',
'tf.spectral.ifft2d':
'tf.signal.ifft2d',
'tf.spectral.ifft3d':
'tf.signal.ifft3d',
'tf.spectral.irfft':
'tf.signal.irfft',
'tf.spectral.irfft2d':
'tf.signal.irfft2d',
'tf.spectral.irfft3d':
'tf.signal.irfft3d',
'tf.spectral.rfft':
'tf.signal.rfft',
'tf.spectral.rfft2d':
'tf.signal.rfft2d',
'tf.spectral.rfft3d':
'tf.signal.rfft3d',
'tf.squared_difference':
'tf.math.squared_difference',
'tf.string_join':
'tf.strings.join',
'tf.string_strip':
'tf.strings.strip',
'tf.string_to_hash_bucket_fast':
'tf.strings.to_hash_bucket_fast',
'tf.string_to_hash_bucket_strong':
'tf.strings.to_hash_bucket_strong',
'tf.summary.Event':
'tf.compat.v1.summary.Event',
'tf.summary.FileWriter':
'tf.compat.v1.summary.FileWriter',
'tf.summary.FileWriterCache':
'tf.compat.v1.summary.FileWriterCache',
'tf.summary.SessionLog':
'tf.compat.v1.summary.SessionLog',
'tf.summary.Summary':
'tf.compat.v1.summary.Summary',
'tf.summary.SummaryDescription':
'tf.compat.v1.summary.SummaryDescription',
'tf.summary.TaggedRunMetadata':
'tf.compat.v1.summary.TaggedRunMetadata',
'tf.summary.all_v2_summary_ops':
'tf.compat.v1.summary.all_v2_summary_ops',
'tf.summary.audio':
'tf.compat.v1.summary.audio',
'tf.summary.get_summary_description':
'tf.compat.v1.summary.get_summary_description',
'tf.summary.histogram':
'tf.compat.v1.summary.histogram',
'tf.summary.image':
'tf.compat.v1.summary.image',
'tf.summary.initialize':
'tf.compat.v1.summary.initialize',
'tf.summary.merge':
'tf.compat.v1.summary.merge',
'tf.summary.merge_all':
'tf.compat.v1.summary.merge_all',
'tf.summary.scalar':
'tf.compat.v1.summary.scalar',
'tf.summary.tensor_summary':
'tf.compat.v1.summary.tensor_summary',
'tf.summary.text':
'tf.compat.v1.summary.text',
'tf.svd':
'tf.linalg.svd',
'tf.tables_initializer':
'tf.compat.v1.tables_initializer',
'tf.tensor_scatter_add':
'tf.tensor_scatter_nd_add',
'tf.tensor_scatter_sub':
'tf.tensor_scatter_nd_sub',
'tf.tensor_scatter_update':
'tf.tensor_scatter_nd_update',
'tf.test.StubOutForTesting':
'tf.compat.v1.test.StubOutForTesting',
'tf.test.compute_gradient_error':
'tf.compat.v1.test.compute_gradient_error',
'tf.test.get_temp_dir':
'tf.compat.v1.test.get_temp_dir',
'tf.test.mock':
'tf.compat.v1.test.mock',
'tf.test.test_src_dir_path':
'tf.compat.v1.test.test_src_dir_path',
'tf.to_bfloat16':
'tf.compat.v1.to_bfloat16',
'tf.to_complex128':
'tf.compat.v1.to_complex128',
'tf.to_complex64':
'tf.compat.v1.to_complex64',
'tf.to_double':
'tf.compat.v1.to_double',
'tf.to_float':
'tf.compat.v1.to_float',
'tf.to_int32':
'tf.compat.v1.to_int32',
'tf.to_int64':
'tf.compat.v1.to_int64',
'tf.tpu.CrossShardOptimizer':
'tf.compat.v1.tpu.CrossShardOptimizer',
'tf.tpu.PaddingSpec':
'tf.compat.v1.tpu.PaddingSpec',
'tf.tpu.batch_parallel':
'tf.compat.v1.tpu.batch_parallel',
'tf.tpu.bfloat16_scope':
'tf.compat.v1.tpu.bfloat16_scope',
'tf.tpu.core':
'tf.compat.v1.tpu.core',
'tf.tpu.cross_replica_sum':
'tf.compat.v1.tpu.cross_replica_sum',
'tf.tpu.experimental.AdagradParameters':
'tf.compat.v1.tpu.experimental.AdagradParameters',
'tf.tpu.experimental.AdamParameters':
'tf.compat.v1.tpu.experimental.AdamParameters',
'tf.tpu.experimental.FtrlParameters':
'tf.compat.v1.tpu.experimental.FtrlParameters',
'tf.tpu.experimental.StochasticGradientDescentParameters':
'tf.compat.v1.tpu.experimental.StochasticGradientDescentParameters',
'tf.tpu.experimental.embedding_column':
'tf.compat.v1.tpu.experimental.embedding_column',
'tf.tpu.experimental.shared_embedding_columns':
'tf.compat.v1.tpu.experimental.shared_embedding_columns',
'tf.tpu.initialize_system':
'tf.compat.v1.tpu.initialize_system',
'tf.tpu.outside_compilation':
'tf.compat.v1.tpu.outside_compilation',
'tf.tpu.replicate':
'tf.compat.v1.tpu.replicate',
'tf.tpu.rewrite':
'tf.compat.v1.tpu.rewrite',
'tf.tpu.shard':
'tf.compat.v1.tpu.shard',
'tf.tpu.shutdown_system':
'tf.compat.v1.tpu.shutdown_system',
'tf.trace':
'tf.linalg.trace',
'tf.train.AdadeltaOptimizer':
'tf.compat.v1.train.AdadeltaOptimizer',
'tf.train.AdagradDAOptimizer':
'tf.compat.v1.train.AdagradDAOptimizer',
'tf.train.AdagradOptimizer':
'tf.compat.v1.train.AdagradOptimizer',
'tf.train.AdamOptimizer':
'tf.compat.v1.train.AdamOptimizer',
'tf.train.CheckpointSaverHook':
'tf.estimator.CheckpointSaverHook',
'tf.train.CheckpointSaverListener':
'tf.estimator.CheckpointSaverListener',
'tf.train.ChiefSessionCreator':
'tf.compat.v1.train.ChiefSessionCreator',
'tf.train.FeedFnHook':
'tf.estimator.FeedFnHook',
'tf.train.FinalOpsHook':
'tf.estimator.FinalOpsHook',
'tf.train.FtrlOptimizer':
'tf.compat.v1.train.FtrlOptimizer',
'tf.train.GlobalStepWaiterHook':
'tf.estimator.GlobalStepWaiterHook',
'tf.train.GradientDescentOptimizer':
'tf.compat.v1.train.GradientDescentOptimizer',
'tf.train.LoggingTensorHook':
'tf.estimator.LoggingTensorHook',
'tf.train.LooperThread':
'tf.compat.v1.train.LooperThread',
'tf.train.MomentumOptimizer':
'tf.compat.v1.train.MomentumOptimizer',
'tf.train.MonitoredSession':
'tf.compat.v1.train.MonitoredSession',
'tf.train.MonitoredTrainingSession':
'tf.compat.v1.train.MonitoredTrainingSession',
'tf.train.NanLossDuringTrainingError':
'tf.estimator.NanLossDuringTrainingError',
'tf.train.NanTensorHook':
'tf.estimator.NanTensorHook',
'tf.train.NewCheckpointReader':
'tf.compat.v1.train.NewCheckpointReader',
'tf.train.Optimizer':
'tf.compat.v1.train.Optimizer',
'tf.train.ProfilerHook':
'tf.estimator.ProfilerHook',
'tf.train.ProximalAdagradOptimizer':
'tf.compat.v1.train.ProximalAdagradOptimizer',
'tf.train.ProximalGradientDescentOptimizer':
'tf.compat.v1.train.ProximalGradientDescentOptimizer',
'tf.train.QueueRunner':
'tf.compat.v1.train.QueueRunner',
'tf.train.RMSPropOptimizer':
'tf.compat.v1.train.RMSPropOptimizer',
'tf.train.Saver':
'tf.compat.v1.train.Saver',
'tf.train.SaverDef':
'tf.compat.v1.train.SaverDef',
'tf.train.Scaffold':
'tf.compat.v1.train.Scaffold',
'tf.train.SecondOrStepTimer':
'tf.estimator.SecondOrStepTimer',
'tf.train.Server':
'tf.distribute.Server',
'tf.train.SessionCreator':
'tf.compat.v1.train.SessionCreator',
'tf.train.SessionManager':
'tf.compat.v1.train.SessionManager',
'tf.train.SessionRunArgs':
'tf.estimator.SessionRunArgs',
'tf.train.SessionRunContext':
'tf.estimator.SessionRunContext',
'tf.train.SessionRunHook':
'tf.estimator.SessionRunHook',
'tf.train.SessionRunValues':
'tf.estimator.SessionRunValues',
'tf.train.SingularMonitoredSession':
'tf.compat.v1.train.SingularMonitoredSession',
'tf.train.StepCounterHook':
'tf.estimator.StepCounterHook',
'tf.train.StopAtStepHook':
'tf.estimator.StopAtStepHook',
'tf.train.SummarySaverHook':
'tf.estimator.SummarySaverHook',
'tf.train.Supervisor':
'tf.compat.v1.train.Supervisor',
'tf.train.SyncReplicasOptimizer':
'tf.compat.v1.train.SyncReplicasOptimizer',
'tf.train.VocabInfo':
'tf.estimator.VocabInfo',
'tf.train.WorkerSessionCreator':
'tf.compat.v1.train.WorkerSessionCreator',
'tf.train.add_queue_runner':
'tf.compat.v1.train.add_queue_runner',
'tf.train.assert_global_step':
'tf.compat.v1.train.assert_global_step',
'tf.train.basic_train_loop':
'tf.compat.v1.train.basic_train_loop',
'tf.train.batch':
'tf.compat.v1.train.batch',
'tf.train.batch_join':
'tf.compat.v1.train.batch_join',
'tf.train.checkpoint_exists':
'tf.compat.v1.train.checkpoint_exists',
'tf.train.cosine_decay':
'tf.compat.v1.train.cosine_decay',
'tf.train.cosine_decay_restarts':
'tf.compat.v1.train.cosine_decay_restarts',
'tf.train.create_global_step':
'tf.compat.v1.train.create_global_step',
'tf.train.do_quantize_training_on_graphdef':
'tf.compat.v1.train.do_quantize_training_on_graphdef',
'tf.train.experimental.MixedPrecisionLossScaleOptimizer':
'tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer',
'tf.train.exponential_decay':
'tf.compat.v1.train.exponential_decay',
'tf.train.export_meta_graph':
'tf.compat.v1.train.export_meta_graph',
'tf.train.generate_checkpoint_state_proto':
'tf.compat.v1.train.generate_checkpoint_state_proto',
'tf.train.get_checkpoint_mtimes':
'tf.compat.v1.train.get_checkpoint_mtimes',
'tf.train.get_global_step':
'tf.compat.v1.train.get_global_step',
'tf.train.get_or_create_global_step':
'tf.compat.v1.train.get_or_create_global_step',
'tf.train.global_step':
'tf.compat.v1.train.global_step',
'tf.train.import_meta_graph':
'tf.compat.v1.train.import_meta_graph',
'tf.train.init_from_checkpoint':
'tf.compat.v1.train.init_from_checkpoint',
'tf.train.input_producer':
'tf.compat.v1.train.input_producer',
'tf.train.inverse_time_decay':
'tf.compat.v1.train.inverse_time_decay',
'tf.train.limit_epochs':
'tf.compat.v1.train.limit_epochs',
'tf.train.linear_cosine_decay':
'tf.compat.v1.train.linear_cosine_decay',
'tf.train.match_filenames_once':
'tf.io.match_filenames_once',
'tf.train.maybe_batch':
'tf.compat.v1.train.maybe_batch',
'tf.train.maybe_batch_join':
'tf.compat.v1.train.maybe_batch_join',
'tf.train.maybe_shuffle_batch':
'tf.compat.v1.train.maybe_shuffle_batch',
'tf.train.maybe_shuffle_batch_join':
'tf.compat.v1.train.maybe_shuffle_batch_join',
'tf.train.natural_exp_decay':
'tf.compat.v1.train.natural_exp_decay',
'tf.train.noisy_linear_cosine_decay':
'tf.compat.v1.train.noisy_linear_cosine_decay',
'tf.train.piecewise_constant':
'tf.compat.v1.train.piecewise_constant',
'tf.train.piecewise_constant_decay':
'tf.compat.v1.train.piecewise_constant_decay',
'tf.train.polynomial_decay':
'tf.compat.v1.train.polynomial_decay',
'tf.train.queue_runner.QueueRunner':
'tf.compat.v1.train.queue_runner.QueueRunner',
'tf.train.queue_runner.add_queue_runner':
'tf.compat.v1.train.queue_runner.add_queue_runner',
'tf.train.queue_runner.start_queue_runners':
'tf.compat.v1.train.queue_runner.start_queue_runners',
'tf.train.range_input_producer':
'tf.compat.v1.train.range_input_producer',
'tf.train.remove_checkpoint':
'tf.compat.v1.train.remove_checkpoint',
'tf.train.replica_device_setter':
'tf.compat.v1.train.replica_device_setter',
'tf.train.shuffle_batch':
'tf.compat.v1.train.shuffle_batch',
'tf.train.shuffle_batch_join':
'tf.compat.v1.train.shuffle_batch_join',
'tf.train.slice_input_producer':
'tf.compat.v1.train.slice_input_producer',
'tf.train.start_queue_runners':
'tf.compat.v1.train.start_queue_runners',
'tf.train.string_input_producer':
'tf.compat.v1.train.string_input_producer',
'tf.train.summary_iterator':
'tf.compat.v1.train.summary_iterator',
'tf.train.update_checkpoint_state':
'tf.compat.v1.train.update_checkpoint_state',
'tf.train.warm_start':
'tf.compat.v1.train.warm_start',
'tf.train.write_graph':
'tf.io.write_graph',
'tf.trainable_variables':
'tf.compat.v1.trainable_variables',
'tf.truncated_normal':
'tf.random.truncated_normal',
'tf.uniform_unit_scaling_initializer':
'tf.compat.v1.uniform_unit_scaling_initializer',
'tf.unsorted_segment_max':
'tf.math.unsorted_segment_max',
'tf.unsorted_segment_mean':
'tf.math.unsorted_segment_mean',
'tf.unsorted_segment_min':
'tf.math.unsorted_segment_min',
'tf.unsorted_segment_prod':
'tf.math.unsorted_segment_prod',
'tf.unsorted_segment_sqrt_n':
'tf.math.unsorted_segment_sqrt_n',
'tf.unsorted_segment_sum':
'tf.math.unsorted_segment_sum',
'tf.variable_axis_size_partitioner':
'tf.compat.v1.variable_axis_size_partitioner',
'tf.variable_op_scope':
'tf.compat.v1.variable_op_scope',
'tf.variable_scope':
'tf.compat.v1.variable_scope',
'tf.variables_initializer':
'tf.compat.v1.variables_initializer',
'tf.verify_tensor_all_finite':
'tf.compat.v1.verify_tensor_all_finite',
'tf.wrap_function':
'tf.compat.v1.wrap_function',
'tf.write_file':
'tf.io.write_file',
'tf.zeta':
'tf.math.zeta'
}
| apache-2.0 |
leofdecarvalho/MachineLearning | 2. Modeling/3. Clustering/14. K_Means/kmeans.py | 1 | 1965 | # K-Means Clustering
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:, [3, 4]].values
# y = dataset.iloc[:, 3].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Using the elbow method to find the optimal number of clusters
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
# Fitting K-Means to the dataset
kmeans = KMeans(n_clusters = 5, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(X)
# Visualising the clusters
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Careful')
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Standard')
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Target')
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Carless')
plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Sensible')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = 'yellow', label = 'Centroids')
plt.title('Clusters of customers')
plt.xlabel('Annual Income (k$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show() | mit |
biln/airflow | airflow/www/views.py | 1 | 80316 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pkg_resources
import socket
import importlib
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
from itertools import chain, product
import json
from past.utils import old_div
from past.builtins import basestring
import inspect
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_
from flask import (
redirect, url_for, request, Markup, Response, current_app, render_template)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.tools import iterdecode
from flask_login import flash
from flask._compat import PY2
import jinja2
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom
from airflow.operators import BaseOperator, SubDagOperator
from airflow.utils.logging import LoggingMixin
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils import logging as log_utils
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
DEFAULT_SENSITIVE_VARIABLE_FIELDS = (
'password',
'secret',
'passwd',
'authorization',
'api_key',
'apikey',
'access_token',
)
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def should_hide_value_for_key(key_name):
return any(s in key_name for s in DEFAULT_SENSITIVE_VARIABLE_FIELDS) \
and conf.getboolean('admin', 'hide_sensitive_variable_fields')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
#@login_required
def dag_stats(self):
states = [
State.SUCCESS,
State.RUNNING,
State.FAILED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.QUEUED,
]
task_ids = []
dag_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
if not dag.is_subdag:
dag_ids.append(dag.dag_id)
TI = models.TaskInstance
DagRun = models.DagRun
session = Session()
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.filter(DagRun.state == State.RUNNING)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.outerjoin(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date)
)
.outerjoin(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date)
)
.filter(TI.task_id.in_(task_ids))
.filter(TI.dag_id.in_(dag_ids))
.filter(or_(
RunningDagRun.c.dag_id != None,
LastDagRun.c.dag_id != None
))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
m = importlib.import_module(dag.module_name)
code = inspect.getsource(m)
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/sandbox')
@login_required
def sandbox(self):
title = "Sandbox Suggested Configuration"
cfg_loc = conf.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {
'headers': {k: v for k, v in request.headers},
}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
if hasattr(current_user, 'username'):
d['username'] = current_user.username
return wwwutils.json_response(d)
@expose('/pickle_info')
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = os.path.join(BASE_LOG_FOLDER, log_relative)
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
log_loaded = False
if os.path.exists(loc):
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
log_loaded = True
except:
log = "*** Failed to load local log file: {0}.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = os.path.join(
"http://{host}:{WORKER_LOG_SERVER_PORT}/log", log_relative
).format(**locals())
log += "*** Log file isn't local.\n"
log += "*** Fetching here: {url}\n".format(**locals())
try:
import requests
response = requests.get(url)
response.raise_for_status()
log += '\n' + response.text
log_loaded = True
except:
log += "*** Failed to fetch log file from worker.\n".format(
**locals())
if not log_loaded:
# load remote logs
remote_log_base = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
remote_log = os.path.join(remote_log_base, log_relative)
log += '\n*** Reading remote logs...\n'
# S3
if remote_log.startswith('s3:/'):
log += log_utils.S3Log().read(remote_log, return_error=True)
# GCS
elif remote_log.startswith('gs:/'):
log += log_utils.GCSLog().read(remote_log, return_error=True)
# unsupported
elif remote_log:
log += '*** Unsupported remote log location.'
session.commit()
session.close()
if PY2 and not isinstance(log, unicode):
log = log.decode('utf-8')
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
def xcom(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
session = Session()
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)\
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
try:
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
MAX_PERIODS = 1000
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
dag_ids = [dag_id]
task_id_to_dag = {
task_id: dag
}
end_date = ((dag.latest_execution_date or datetime.now())
if future else execution_date)
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if recursive:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
if downstream:
relatives = task.get_flat_relatives(upstream=False)
task_ids += [t.task_id for t in relatives]
if recursive:
recurse_tasks(relatives, task_ids, dag_ids, task_id_to_dag)
if upstream:
relatives = task.get_flat_relatives(upstream=False)
task_ids += [t.task_id for t in relatives]
if recursive:
recurse_tasks(relatives, task_ids, dag_ids, task_id_to_dag)
TI = models.TaskInstance
if dag.schedule_interval == '@once':
dates = [start_date]
else:
dates = dag.date_range(start_date, end_date=end_date)
tis = session.query(TI).filter(
TI.dag_id.in_(dag_ids),
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids)).all()
tis_to_change = session.query(TI).filter(
TI.dag_id.in_(dag_ids),
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids),
TI.state != State.SUCCESS).all()
tasks = list(product(task_ids, dates))
tis_to_create = list(
set(tasks) -
set([(ti.task_id, ti.execution_date) for ti in tis]))
tis_all_altered = list(chain(
[(ti.task_id, ti.execution_date) for ti in tis_to_change],
tis_to_create))
if len(tis_all_altered) > MAX_PERIODS:
flash("Too many tasks at once (>{0})".format(
MAX_PERIODS), 'error')
return redirect(origin)
if confirmed:
for ti in tis_to_change:
ti.state = State.SUCCESS
session.commit()
for task_id, task_execution_date in tis_to_create:
ti = TI(
task=task_id_to_dag[task_id].get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(tis_all_altered)))
return redirect(origin)
else:
if not tis_all_altered:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id, task_execution_date in tis_all_altered:
tis.append(TI(
task=task_id_to_dag[task_id].get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id==dag.dag_id,
DR.execution_date<=base_date,
DR.execution_date>=min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if isinstance(tid, dict) and tid.get("state") == State.RUNNING:
d = datetime.now() - dateutil.parser.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
max_duration = 0
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=600, width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
if ti.duration:
if max_duration < ti.duration:
max_duration = ti.duration
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(float(ti.duration) / (60*60))
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildhtml()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart,
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=600, width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval:
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = old_div((ti.end_date - ts).total_seconds(), 60*60)
x.append(dttm)
y.append(secs)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart,
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused')
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(ti.end_date or datetime.now()),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': ti.end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(ti.end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state:ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25,
}
session.commit()
session.close()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
def task_instances(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# filter the dags if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
if do_filter:
qry = (
session.query(DM)
.filter(
~DM.is_subdag, DM.is_active,
DM.owners.like('%' + current_user.username + '%'))
.all()
)
else:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
if do_filter:
dags = {
dag.dag_id: dag
for dag in dags
if (
dag.owner == current_user.username and (not dag.parent_dag)
)
}
else:
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.now()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
def hidden_field_formatter(view, context, model, name):
if should_hide_value_for_key(model.key):
return Markup('*' * 8)
return getattr(model, name)
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter
}
def on_form_prefill(self, form, id):
if should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_delete = True
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.now()
else:
dr.end_date = datetime.now()
session.commit()
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
can_delete = True
page_size = 500
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
for count, id in enumerate(ids):
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
count += 1
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = dateutil.parser.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path' : StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma seperated)'),
}
form_choices = {
'conn_type': [
('fs', 'File (path)'),
('ftp', 'FTP',),
('google_cloud_platform', 'Google Cloud Platform'),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('vertica', 'Vertica',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('ssh', 'SSH',),
('cloudant', 'IBM Cloudant',),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
]
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key:formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(self):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception as e:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, LoggingMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("airflow")[0].version
except Exception as e:
airflow_version = None
self.logger.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
self.logger.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
zedoul/AnomalyDetection | test_data.py | 1 | 3339 | # -*- coding: utf-8 -*-
"""
http://www.astroml.org/sklearn_tutorial/dimensionality_reduction.html
Also this document helps me much.
https://www.kaggle.com/c/titanic-gettingStarted/details/getting-started-with-python-ii
"""
print (__doc__)
import numpy as np
import copy
from sklearn.cluster import KMeans
from sklearn.cluster import k_means
from sklearn.manifold import spectral_embedding
from sklearn.utils import check_random_state
import nslkdd.preprocessing as preprocessing
def show_one_row():
df_train = copy.deepcopy(df)
print len(gmms[0])
print len(gmms[1])
print len(gmms[0][0])
# select only normal data
df_train = df_train[(df_train["attack"] == 11)]
# remove every attack and difficulty
df_train.drop('attack',1,inplace=True)
df_train.drop('difficulty',1,inplace=True)
# select one row
print df_train[0:1]
for i, r in df_train.iterrows() :
# show one value
print "value " + str(r['duration'])
def show_classes():
import os
from nslkdd.data import model
workpath = os.path.dirname(os.path.abspath(__file__))
datafile_20 = workpath + '/nslkdd/data/KDDTrain+_20Percent.txt'
datafile_full = workpath + '/nslkdd/data/KDDTrain+.txt'
datafile_21 = workpath + '/nslkdd/data/KDDTest-21.txt'
datafile_plus = workpath + '/nslkdd/data/KDDTest+.txt'
headers, _ = preprocessing.get_header_data()
dfs = []
dfs.append(model.load_dataframe(datafile_20,headers))
dfs.append(model.load_dataframe(datafile_full,headers))
dfs.append(model.load_dataframe(datafile_21,headers))
dfs.append(model.load_dataframe(datafile_plus,headers))
df = dfs[0]
df = df.iloc[[1,3],:]
print df
exit()
# it shows every headers
# for di, df in enumerate(dfs[0]) :
# print df
attacks = []
for df in dfs :
attacks.append( list(set(df['attack'])) )
# print attacks[-1]
only_in_test_data = []
for i in attacks[3] :
if i in attacks[1] :
pass
else :
only_in_test_data.append(i)
total_test_set = attacks[1] + only_in_test_data
print total_test_set
# basic
for di, df in enumerate(dfs) :
print "=====" + str(di) + "======="
s = 0
for i in total_test_set :
det = len(df[df['attack']==i])
s = s + det
print i + " : " + str(len (df[df['attack']==i]))
print "------------------"
print "total : " + str(s)
print "============================"
# for tiddly
df_names = ["Training_20", "Training_full", "Test_21", "Test_plus"]
import copy
for attack_type in total_test_set :
for di, df_orig in enumerate(dfs) :
df = copy.deepcopy(df_orig)
df = df[df['attack'] == attack_type]
category_name = str(list(set(df['protocol_type'])))
df_name = df_names[di]
perc = len(df) / (len(dfs[di])*1.0) * 100.0
count = str(len(df)) + " / " + str(len(dfs[di])) + " (" + "{0:.3f}%".format(perc) + ")"
bg = " "
if perc == 0 :
bg = "bgcolor(#cd5c5c): "
print "| ! " + attack_type + " |" + bg + category_name + " |" + bg + df_name + " |" + bg + str(count) + " |" + bg + " |"
if __name__ == '__main__':
show_classes()
| mit |
Prooffreader/pyprooff | pyprooff/kmedoids.py | 1 | 1959 | import numpy as np
import random
def kMedoids(distance_matrix, k, tmax=100):
"""Clusters items according to a distance matrix
Args:
distance matrix: a 2-D iterable (numpy array or list of lists)
k = number of clusters
tmax = not sure, presumably maximum number of iterations
Returns: tuple of:
- array of cluster 'names', the indices of distance_matrix members
closest to the center of the cluster
- dict of arrays of cluster indices from distance_matrix
note that the keys of the dict are serial, corresponding to the
index of the cluster names, not their values
Note: pyprooff.similar.pairwise_distance_matrix is a good adjunct to this if
you want to use arbitrary distance functions. Otherwise scipy (euclidean, etc.) or
sklearn (cosine distance) are best.
"""
# determine dimensions of distance matrix D
D = distance_matrix # legacy variable name
m, n = D.shape
# randomly initialize an array of k medoid indices
M = np.sort(np.random.choice(n, k))
# create a copy of the array of medoid indices
Mnew = np.copy(M)
# initialize a dictionary to represent clusters
C = {}
for t in range(tmax):
# determine clusters, i. e. arrays of data indices
J = np.argmin(D[:,M], axis=1)
for kappa in range(k):
C[kappa] = np.where(J==kappa)[0]
# update cluster medoids
for kappa in range(k):
J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1)
j = np.argmin(J)
Mnew[kappa] = C[kappa][j]
np.sort(Mnew)
# check for convergence
if np.array_equal(M, Mnew):
break
M = np.copy(Mnew)
else:
# final update of cluster memberships
J = np.argmin(D[:,M], axis=1)
for kappa in range(k):
C[kappa] = np.where(J==kappa)[0]
# return results
return M, C | mit |
mraspaud/dask | dask/dataframe/tests/test_indexing.py | 1 | 12618 | import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pytest
import dask
import dask.dataframe as dd
from dask.dataframe.indexing import _coerce_loc_index
from dask.dataframe.utils import assert_eq, make_meta
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'x', meta, [0, 5, 9, 9])
full = d.compute()
def test_loc():
assert d.loc[3:8].divisions[0] == 3
assert d.loc[3:8].divisions[-1] == 8
assert d.loc[5].divisions == (5, 5)
assert_eq(d.loc[5], full.loc[5:5])
assert_eq(d.loc[3:8], full.loc[3:8])
assert_eq(d.loc[:8], full.loc[:8])
assert_eq(d.loc[3:], full.loc[3:])
assert_eq(d.loc[[5]], full.loc[[5]])
assert_eq(d.loc[[3, 4, 1, 8]], full.loc[[3, 4, 1, 8]])
assert_eq(d.a.loc[5], full.a.loc[5:5])
assert_eq(d.a.loc[3:8], full.a.loc[3:8])
assert_eq(d.a.loc[:8], full.a.loc[:8])
assert_eq(d.a.loc[3:], full.a.loc[3:])
assert_eq(d.a.loc[[5]], full.a.loc[[5]])
assert_eq(d.a.loc[[3, 4, 1, 8]], full.a.loc[[3, 4, 1, 8]])
pytest.raises(KeyError, lambda: d.loc[1000])
assert_eq(d.loc[1000:], full.loc[1000:])
assert_eq(d.loc[-2000:-1000], full.loc[-2000:-1000])
assert sorted(d.loc[5].dask) == sorted(d.loc[5].dask)
assert sorted(d.loc[5].dask) != sorted(d.loc[6].dask)
def test_loc_non_informative_index():
df = pd.DataFrame({'x': [1, 2, 3, 4]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
ddf.divisions = (None,) * 3
assert not ddf.known_divisions
ddf.loc[20:30].compute(get=dask.get)
assert_eq(ddf.loc[20:30], df.loc[20:30])
df = pd.DataFrame({'x': [1, 2, 3, 4]}, index=[10, 20, 20, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
assert_eq(ddf.loc[20], df.loc[20:20])
def test_loc_with_text_dates():
A = tm.makeTimeSeries(10).iloc[:5]
B = tm.makeTimeSeries(10).iloc[5:]
s = dd.Series({('df', 0): A, ('df', 1): B}, 'df', A,
[A.index.min(), B.index.min(), B.index.max()])
assert s.loc['2000': '2010'].divisions == s.divisions
assert_eq(s.loc['2000': '2010'], s)
assert len(s.loc['2000-01-03': '2000-01-05'].compute()) == 3
def test_loc_with_series():
assert_eq(d.loc[d.a % 2 == 0], full.loc[full.a % 2 == 0])
assert sorted(d.loc[d.a % 2].dask) == sorted(d.loc[d.a % 2].dask)
assert sorted(d.loc[d.a % 2].dask) != sorted(d.loc[d.a % 3].dask)
def test_loc_with_series_different_partition():
df = pd.DataFrame(np.random.randn(20, 5),
index=list('abcdefghijklmnopqrst'),
columns=list('ABCDE'))
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc[ddf.A > 0], df.loc[df.A > 0])
assert_eq(ddf.loc[(ddf.A > 0).repartition(['a', 'g', 'k', 'o', 't'])],
df.loc[df.A > 0])
def test_loc2d():
# index indexer is always regarded as slice for duplicated values
assert_eq(d.loc[5, 'a'], full.loc[5:5, 'a'])
# assert_eq(d.loc[[5], 'a'], full.loc[[5], 'a'])
assert_eq(d.loc[5, ['a']], full.loc[5:5, ['a']])
# assert_eq(d.loc[[5], ['a']], full.loc[[5], ['a']])
assert_eq(d.loc[3:8, 'a'], full.loc[3:8, 'a'])
assert_eq(d.loc[:8, 'a'], full.loc[:8, 'a'])
assert_eq(d.loc[3:, 'a'], full.loc[3:, 'a'])
assert_eq(d.loc[[8], 'a'], full.loc[[8], 'a'])
assert_eq(d.loc[3:8, ['a']], full.loc[3:8, ['a']])
assert_eq(d.loc[:8, ['a']], full.loc[:8, ['a']])
assert_eq(d.loc[3:, ['a']], full.loc[3:, ['a']])
assert_eq(d.loc[[3, 4, 3], ['a']], full.loc[[3, 4, 3], ['a']])
# 3d
with pytest.raises(pd.core.indexing.IndexingError):
d.loc[3, 3, 3]
# Series should raise
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[3, 3]
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[3:, 3]
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[d.a % 2 == 0, 3]
def test_loc2d_with_known_divisions():
df = pd.DataFrame(np.random.randn(20, 5),
index=list('abcdefghijklmnopqrst'),
columns=list('ABCDE'))
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc['a', 'A'], df.loc[['a'], 'A'])
assert_eq(ddf.loc['a', ['A']], df.loc[['a'], ['A']])
assert_eq(ddf.loc['a':'o', 'A'], df.loc['a':'o', 'A'])
assert_eq(ddf.loc['a':'o', ['A']], df.loc['a':'o', ['A']])
assert_eq(ddf.loc[['n'], ['A']], df.loc[['n'], ['A']])
assert_eq(ddf.loc[['a', 'c', 'n'], ['A']], df.loc[['a', 'c', 'n'], ['A']])
assert_eq(ddf.loc[['t', 'b'], ['A']], df.loc[['t', 'b'], ['A']])
assert_eq(ddf.loc[['r', 'r', 'c', 'g', 'h'], ['A']],
df.loc[['r', 'r', 'c', 'g', 'h'], ['A']])
def test_loc2d_with_unknown_divisions():
df = pd.DataFrame(np.random.randn(20, 5),
index=list('abcdefghijklmnopqrst'),
columns=list('ABCDE'))
ddf = dd.from_pandas(df, 3)
ddf.divisions = (None, ) * len(ddf.divisions)
assert ddf.known_divisions is False
assert_eq(ddf.loc['a', 'A'], df.loc[['a'], 'A'])
assert_eq(ddf.loc['a', ['A']], df.loc[['a'], ['A']])
assert_eq(ddf.loc['a':'o', 'A'], df.loc['a':'o', 'A'])
assert_eq(ddf.loc['a':'o', ['A']], df.loc['a':'o', ['A']])
def test_loc2d_duplicated_columns():
df = pd.DataFrame(np.random.randn(20, 5),
index=list('abcdefghijklmnopqrst'),
columns=list('AABCD'))
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc['a', 'A'], df.loc[['a'], 'A'])
assert_eq(ddf.loc['a', ['A']], df.loc[['a'], ['A']])
assert_eq(ddf.loc['j', 'B'], df.loc[['j'], 'B'])
assert_eq(ddf.loc['j', ['B']], df.loc[['j'], ['B']])
assert_eq(ddf.loc['a':'o', 'A'], df.loc['a':'o', 'A'])
assert_eq(ddf.loc['a':'o', ['A']], df.loc['a':'o', ['A']])
assert_eq(ddf.loc['j':'q', 'B'], df.loc['j':'q', 'B'])
assert_eq(ddf.loc['j':'q', ['B']], df.loc['j':'q', ['B']])
assert_eq(ddf.loc['a':'o', 'B':'D'], df.loc['a':'o', 'B':'D'])
assert_eq(ddf.loc['a':'o', 'B':'D'], df.loc['a':'o', 'B':'D'])
assert_eq(ddf.loc['j':'q', 'B':'A'], df.loc['j':'q', 'B':'A'])
assert_eq(ddf.loc['j':'q', 'B':'A'], df.loc['j':'q', 'B':'A'])
assert_eq(ddf.loc[ddf.B > 0, 'B'], df.loc[df.B > 0, 'B'])
assert_eq(ddf.loc[ddf.B > 0, ['A', 'C']], df.loc[df.B > 0, ['A', 'C']])
def test_getitem():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'B': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'C': [True, False, True] * 3},
columns=list('ABC'))
ddf = dd.from_pandas(df, 2)
assert_eq(ddf['A'], df['A'])
# check cache consistency
tm.assert_series_equal(ddf['A']._meta, ddf._meta['A'])
assert_eq(ddf[['A', 'B']], df[['A', 'B']])
tm.assert_frame_equal(ddf[['A', 'B']]._meta, ddf._meta[['A', 'B']])
assert_eq(ddf[ddf.C], df[df.C])
tm.assert_series_equal(ddf.C._meta, ddf._meta.C)
assert_eq(ddf[ddf.C.repartition([0, 2, 5, 8])], df[df.C])
pytest.raises(KeyError, lambda: df['X'])
pytest.raises(KeyError, lambda: df[['A', 'X']])
pytest.raises(AttributeError, lambda: df.X)
# not str/unicode
df = pd.DataFrame(np.random.randn(10, 5))
ddf = dd.from_pandas(df, 2)
assert_eq(ddf[0], df[0])
assert_eq(ddf[[1, 2]], df[[1, 2]])
pytest.raises(KeyError, lambda: df[8])
pytest.raises(KeyError, lambda: df[[1, 8]])
def test_getitem_slice():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'B': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'C': [True, False, True] * 3},
index=list('abcdefghi'))
ddf = dd.from_pandas(df, 3)
assert_eq(ddf['a':'e'], df['a':'e'])
assert_eq(ddf['a':'b'], df['a':'b'])
assert_eq(ddf['f':], df['f':])
def test_loc_on_numpy_datetimes():
df = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(np.datetime64, ['2014', '2015', '2016'])))
a = dd.from_pandas(df, 2)
a.divisions = list(map(np.datetime64, a.divisions))
assert_eq(a.loc['2014': '2015'], a.loc['2014': '2015'])
def test_loc_on_pandas_datetimes():
df = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(pd.Timestamp, ['2014', '2015', '2016'])))
a = dd.from_pandas(df, 2)
a.divisions = list(map(pd.Timestamp, a.divisions))
assert_eq(a.loc['2014': '2015'], a.loc['2014': '2015'])
def test_coerce_loc_index():
for t in [pd.Timestamp, np.datetime64]:
assert isinstance(_coerce_loc_index([t('2014')], '2014'), t)
def test_loc_timestamp_str():
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='H', periods=100))
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df.loc['2011-01-02'],
ddf.loc['2011-01-02'])
assert_eq(df.loc['2011-01-02':'2011-01-10'],
ddf.loc['2011-01-02':'2011-01-10'])
# same reso, dask result is always DataFrame
assert_eq(df.loc['2011-01-02 10:00'].to_frame().T,
ddf.loc['2011-01-02 10:00'])
# series
assert_eq(df.A.loc['2011-01-02'],
ddf.A.loc['2011-01-02'])
assert_eq(df.A.loc['2011-01-02':'2011-01-10'],
ddf.A.loc['2011-01-02':'2011-01-10'])
# slice with timestamp (dask result must be DataFrame)
assert_eq(df.loc[pd.Timestamp('2011-01-02')].to_frame().T,
ddf.loc[pd.Timestamp('2011-01-02')])
assert_eq(df.loc[pd.Timestamp('2011-01-02'):pd.Timestamp('2011-01-10')],
ddf.loc[pd.Timestamp('2011-01-02'):pd.Timestamp('2011-01-10')])
assert_eq(df.loc[pd.Timestamp('2011-01-02 10:00')].to_frame().T,
ddf.loc[pd.Timestamp('2011-01-02 10:00')])
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='M', periods=100))
ddf = dd.from_pandas(df, 50)
assert_eq(df.loc['2011-01'], ddf.loc['2011-01'])
assert_eq(df.loc['2011'], ddf.loc['2011'])
assert_eq(df.loc['2011-01':'2012-05'], ddf.loc['2011-01':'2012-05'])
assert_eq(df.loc['2011':'2015'], ddf.loc['2011':'2015'])
# series
assert_eq(df.B.loc['2011-01'], ddf.B.loc['2011-01'])
assert_eq(df.B.loc['2011'], ddf.B.loc['2011'])
assert_eq(df.B.loc['2011-01':'2012-05'], ddf.B.loc['2011-01':'2012-05'])
assert_eq(df.B.loc['2011':'2015'], ddf.B.loc['2011':'2015'])
def test_getitem_timestamp_str():
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='H', periods=100))
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df['2011-01-02'],
ddf['2011-01-02'])
assert_eq(df['2011-01-02':'2011-01-10'],
df['2011-01-02':'2011-01-10'])
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='D', periods=100))
ddf = dd.from_pandas(df, 50)
assert_eq(df['2011-01'], ddf['2011-01'])
assert_eq(df['2011'], ddf['2011'])
assert_eq(df['2011-01':'2012-05'], ddf['2011-01':'2012-05'])
assert_eq(df['2011':'2015'], ddf['2011':'2015'])
def test_loc_period_str():
# .loc with PeriodIndex doesn't support partial string indexing
# https://github.com/pydata/pandas/issues/13429
pass
def test_getitem_period_str():
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.period_range('2011-01-01', freq='H', periods=100))
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df['2011-01-02'],
ddf['2011-01-02'])
assert_eq(df['2011-01-02':'2011-01-10'],
df['2011-01-02':'2011-01-10'])
# same reso, dask result is always DataFrame
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.period_range('2011-01-01', freq='D', periods=100))
ddf = dd.from_pandas(df, 50)
assert_eq(df['2011-01'], ddf['2011-01'])
assert_eq(df['2011'], ddf['2011'])
assert_eq(df['2011-01':'2012-05'], ddf['2011-01':'2012-05'])
assert_eq(df['2011':'2015'], ddf['2011':'2015'])
| bsd-3-clause |
imranyousuf/project-kappa | code/utils/design_matrix.py | 5 | 1958 | import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
from dipy.segment.mask import median_otsu
from scipy.ndimage import gaussian_filter
from matplotlib import colors
from scipy.misc import *
from loading_data import *
from harmonic import *
from pre_process import *
from convolve import *
from events2neural_fixed import events2neural_fixed
import numpy.linalg as npl
def design_matrix(subject, run, TR = 2.5):
data = bold_data(subject,run)
vol_shape, n_trs = data.shape[:-1], data.shape[-1]
tr_times = np.arange(0,30,TR)
hrf_at_trs = hrf(tr_times)
col = 0
X = np.ones((n_trs,14))
#Smoothed and masked data
mean_data = np.mean(data,axis=-1)
masked, mask = median_otsu(mean_data,2,1)
# smooth_data = gaussian_filter(data,[2,2,2,0])
# Y = smooth_data[mask].T
#omitted smoothing for now
Y = data[mask].T
#Adding onsets to design matrix
for i in list_cond_file(subject,run):
neural_prediction = events2neural_fixed(i, TR, n_trs)
convolved = convolve(neural_prediction, hrf_at_trs)
X[:,col] = convolved
col = col+1
##PCA
Y_demeaned = Y - np.mean(Y,axis=1).reshape([-1,1])
unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
U, S, V = npl.svd(unscaled_cov)
X[:,8] = U[:,0]
X[:,9:11] = U[:,6:8]
linear_drift = np.linspace(-1,1,n_trs)
X[:,11] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X[:,12]= quadratic_drift
betas = npl.pinv(X).dot(Y)
betas_vols = np.zeros(vol_shape+(14,))
betas_vols[mask,:] = betas.T
projections = U.T.dot(Y_demeaned)
projection_vols = np.zeros(data.shape)
projection_vols[mask,:] = projections.T
return X, Y, betas_vols, mask, U, Y_demeaned, mean_data, projection_vols
| bsd-3-clause |
jskDr/jamespy | j3/jquinone.py | 1 | 26743 | import pandas as pd
import numpy as np
import re #regular expression
import itertools
import matplotlib.pyplot as plt
from mpltools import color
from collections import OrderedDict
from rdkit import Chem
#The following libraries are generated by Sungjin Kim
from . import jchem
from . import jutil
def gen_14BQ_OH():
"""
return 1,4BQ species with OH functionals.
"""
q_smiles_base = {}
q_smiles_mid = {}
q_smiles_base['1,4-BQ,2-OH'] = '[H]OC1=C([H])C(=O)C([H])=C([H])C1=O'
q_smiles_base['1,4-BQ,Full-OH'] = 'OC1=C(O)C(=O)C(O)=C(O)C1=O'
q_smiles_base['1,4-BQ'] = 'O=C1C=CC(=O)C=C1'
q_smiles_mid['1,4-BQ'] = 'O=C1C=CC(=O)C=C1'
q_smiles_mid['1,4-BQ,2-OH'] = 'OC1=CC(=O)C=CC1=O'
q_smiles_mid['1,4-BQ,2,3-OH'] = 'OC1=C(O)C(=O)C=CC1=O'
q_smiles_mid['1,4-BQ,2,3,5-OH'] = 'OC1=CC(=O)C(O)=C(O)C1=O'
q_smiles_mid['1,4-BQ,Full-OH'] = 'OC1=C(O)C(=O)C(O)=C(O)C1=O'
return q_smiles_base, q_smiles_mid
def gen_910AQ_SO3H():
"""
return 9,10AQ species with SO3H functionals.
"""
q_smiles_base = {}
q_smiles_mid = {}
q_smiles_base['9,10AQ'] = 'O=C1C2C=CC=CC2C(=O)C2=C1C=CC=C2'
q_smiles_base['9,10AQ,1-OH'] = 'OS(=O)(=O)C1=CC=CC2C1C(=O)C1=C(C=CC=C1)C2=O'
q_smiles_base['9,10AQ,2-OH'] = 'OS(=O)(=O)C1=CC2C(C=C1)C(=O)C1=C(C=CC=C1)C2=O'
q_smiles_base['9,10AQ,Full-OH'] = 'OS(=O)(=O)C1=C(C(=C(C2C1C(=O)C1=C(C2=O)C(=C(C(=C1S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O'
q_smiles_mid['9,10AQ'] = 'O=C1C2C=CC=CC2C(=O)C2=C1C=CC=C2'
q_smiles_mid['9,10AQ,1-OH'] = 'OS(=O)(=O)C1=CC=CC2C1C(=O)C1=C(C=CC=C1)C2=O'
q_smiles_mid['9,10AQ,2-OH'] = 'OS(=O)(=O)C1=CC2C(C=C1)C(=O)C1=C(C=CC=C1)C2=O'
q_smiles_mid['9,10AQ,1,2-OH'] = 'OS(=O)(=O)C1=C(C2C(C=C1)C(=O)C1=C(C=CC=C1)C2=O)S(O)(=O)=O'
q_smiles_mid['9,10AQ,Full-OH'] = 'OS(=O)(=O)C1=C(C(=C(C2C1C(=O)C1=C(C2=O)C(=C(C(=C1S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O'
return q_smiles_base, q_smiles_mid
def gen_smiles_quinone( quinone = '9,10AQ', r_group = 'SO3H'):
if quinone == '1,4BQ' and r_group == 'OH':
return gen_14BQ_OH()
elif quinone == '9,10AQ' and r_group == 'SO3H':
return gen_910AQ_SO3H()
class AQDS_OH():
def __init__(self, fname = 'oh_subs_csv.csv'):
self.pdr = pd.read_csv( fname)
def get_Frag6_D( self, dfr = None):
if dfr == None:
dfr = self.pdr
ri_vec = [1, 3, 4, 5, 6, 8]
R = []
HOH201 = {'H': 0, 'OH': 1}
for ri in ri_vec:
s = 'R{}'.format( ri)
rv = dfr[s].tolist()
rv_01 = [HOH201[x] for x in rv]
R.append( rv_01)
RM = np.mat( R).T
Frag6_L = []
OHSYMB = {'H': '', 'OH': '(O)'}
for ri in ri_vec:
s = 'R{}'.format( ri)
rv = dfr[s].tolist()
fr_01 = [OHSYMB[x] for x in rv]
Frag6_L.append( fr_01)
#print Frag6_L
Frag6_D = []
for ii in range( len(Frag6_L[0])):
Frag6_D.append({})
for ii, frag in enumerate(Frag6_L):
for ix, fr in enumerate(frag):
dict_key = '{B%d}' % ii
Frag6_D[ix][dict_key] = fr
return Frag6_D
def _gen_27aqds_with_oh_r1( self, Frag6_D, show = True):
"""
2,7-AQDS with OH attachment are performed
using smiles interpolation
"""
mol_smiles_list = []
for ix, mol_symb in enumerate(Frag6_D):
mol = bq14_oh2 = Chem.MolFromSmiles( 'C1(=O)c2c{B3}c{B4}c(S(=O)(=O)O)c{B5}c2C(=O)c2c{B0}c(S(=O)(=O)O)c{B1}c{B2}c21', replacements=mol_symb)
mol_smiles = Chem.MolToSmiles( mol)
mol_smiles_list.append( mol_smiles)
if show:
print(ix+1, mol_smiles)
jchem.show_mol( mol_smiles)
return mol_smiles_list
def gen_27aqds_with_R( self, Frag6_D, r_gr, show = True):
"""
2,7-AQDS with OH attachment are performed
using smiles interpolation
"""
mol_smiles_list = []
for ix, mol_symb in enumerate(Frag6_D):
# r_gr = 'S(=O)(=O)O' #[N+]([O-])=O
base_smiles = 'C1(=O)c2c{B3}c{B4}c(%s)c{B5}c2C(=O)c2c{B0}c(%s)c{B1}c{B2}c21' % (r_gr, r_gr)
mol = bq14_oh2 = Chem.MolFromSmiles( base_smiles, replacements=mol_symb)
mol_smiles = Chem.MolToSmiles( mol)
mol_smiles_list.append( mol_smiles)
if show:
print(ix+1, mol_smiles)
jchem.show_mol( mol_smiles)
return mol_smiles_list
def gen_27aqds_with_oh( self, Frag6_D, show = True):
r_gr = 'S(=O)(=O)O'
return self.gen_27aqds_with_R( Frag6_D, r_gr, show = show)
def gen_27aqds_with_no2( self, Frag6_D, show = True):
r_gr = '[N+]([O-])=O'
return self.gen_27aqds_with_R( Frag6_D, r_gr, show = show)
class HAQDS_OH():
def __init__(self, fname = 'oh_subs_csv.csv'):
self.pdr = pd.read_csv( fname)
def get_Frag6_D( self, dfr = None):
if dfr == None:
dfr = self.pdr
ri_vec = [1, 3, 4, 5, 6, 8]
R = []
HOH201 = {'H': 0, 'OH': 1}
for ri in ri_vec:
s = 'R{}'.format( ri)
rv = dfr[s].tolist()
rv_01 = [HOH201[x] for x in rv]
R.append( rv_01)
RM = np.mat( R).T
Frag6_L = []
OHSYMB = {'H': '', 'OH': '(O)'}
for ri in ri_vec:
s = 'R{}'.format( ri)
rv = dfr[s].tolist()
fr_01 = [OHSYMB[x] for x in rv]
Frag6_L.append( fr_01)
#print Frag6_L
Frag6_D = []
for ii in range( len(Frag6_L[0])):
Frag6_D.append({})
for ii, frag in enumerate(Frag6_L):
for ix, fr in enumerate(frag):
dict_key = '{B%d}' % ii
Frag6_D[ix][dict_key] = fr
return Frag6_D
def _gen_27aqds_with_oh_r1( self, Frag6_D, show = True):
"""
2,7-AQDS with OH attachment are performed
using smiles interpolation
"""
mol_smiles_list = []
for ix, mol_symb in enumerate(Frag6_D):
mol = bq14_oh2 = Chem.MolFromSmiles( 'C1(O)c2c{B3}c{B4}c(S(=O)(=O)O)c{B5}c2C(O)c2c{B0}c(S(=O)(=O)O)c{B1}c{B2}c21', replacements=mol_symb)
mol_smiles = Chem.MolToSmiles( mol)
mol_smiles_list.append( mol_smiles)
if show:
print(ix+1, mol_smiles)
jchem.show_mol( mol_smiles)
return mol_smiles_list
def gen_27aqds_with_R( self, Frag6_D, r_gr, show = True):
"""
2,7-AQDS with OH attachment are performed
using smiles interpolation
"""
mol_smiles_list = []
for ix, mol_symb in enumerate(Frag6_D):
# r_gr = 'S(=O)(=O)O' #[N+]([O-])=O
base_smiles = 'C1(O)c2c{B3}c{B4}c(%s)c{B5}c2C(O)c2c{B0}c(%s)c{B1}c{B2}c21' % (r_gr, r_gr)
mol = Chem.MolFromSmiles( base_smiles, replacements=mol_symb)
mol_smiles = Chem.MolToSmiles( mol)
mol_smiles_list.append( mol_smiles)
if show:
print(ix+1, mol_smiles)
jchem.show_mol( mol_smiles)
return mol_smiles_list
def gen_27aqds_with_oh( self, Frag6_D, show = True):
r_gr = 'S(=O)(=O)O'
return self.gen_27aqds_with_R( Frag6_D, r_gr, show = show)
def gen_27aqds_with_no2( self, Frag6_D, show = True):
r_gr = '[N+]([O-])=O'
return self.gen_27aqds_with_R( Frag6_D, r_gr, show = show)
def get_r_list( N_Rgroup = 4, so3h = '(S(O)(=O)=O)', disp = False, pdForm = True):
pdr_id, pdr_index, pdr_rgroups, pdr_no_r = [], [], [], []
N_max_bin = '0b' + '1' * N_Rgroup
for pos in range( int(N_max_bin, 2) + 1):
pos_bin = bin( pos)[2:].rjust( N_Rgroup, '0')
so_int_l = [int(x) for x in pos_bin]
so_l = [so3h if x == 1 else '' for x in so_int_l ]
no_r = sum( so_int_l)
pdr_id.append( pos + 1)
pdr_no_r.append( no_r)
pdr_index.append( so_int_l)
pdr_rgroups.append( so_l)
if disp: print(pos, no_r, so_int_l, '==>', so_l)
if pdForm:
pdr = pd.DataFrame()
pdr['ID'] = pdr_id
pdr['Rgroup'] = [so3h] * len( pdr_id)
pdr['NoOfR'] = pdr_no_r
pdr['Index'] = pdr_index
pdr['Rgroups'] = pdr_rgroups
return pdr
else:
return so_l
def get_multi_r_list( N_positions = 4, r_l = ['', '(S(O)(=O)=O)', '(O)'], disp = False, pdForm = True):
"""
Multiple R-groups will be attached.
The '' attachment should be involved in the list of R-groups since
it is also one of the possible case.
"""
pdr_id, pdr_index, pdr_rgroups, pdr_no_r = [], [], [], []
# The number of the possible elements in product operation is length of R-groups
# N_positions reprensents the number of attachment positions.
Nr = len( r_l)
so_int_l_all = itertools.product( list(range( Nr)), repeat = N_positions)
for pos, so_int_l in enumerate(so_int_l_all):
so_l = [ r_l[x] for x in so_int_l]
no_r = jutil.count( so_int_l, 0, inverse = True)
pdr_id.append( pos + 1)
pdr_no_r.append( no_r)
pdr_index.append( so_int_l)
pdr_rgroups.append( so_l)
if disp: print(pos, no_r, so_int_l, '==>', so_l)
if pdForm:
pdr = pd.DataFrame()
pdr['ID'] = pdr_id
if len( r_l) == 2 and '' in r_l:
"""
If r_l consists of one R-group and blank,
R-group becomes the one R-group.
The empty position can be 0 or 1, which is support for
generalization although it usually located in 0.
"""
if r_l.index( '') == 0:
pdr['Rgroup'] = [ r_l[1]] * len( pdr_id)
else:
pdr['Rgroup'] = [ r_l[0]] * len( pdr_id)
else:
pdr['Rgroup'] = ['Mix'] * len( pdr_id)
pdr['NoOfR'] = pdr_no_r
pdr['Index'] = pdr_index
pdr['Rgroups'] = pdr_rgroups
return pdr
else:
return so_l
def gen_r_attach( mol = 'Oc1nc(O)c2nc3c{0}c{1}c{2}c{3}c3nc2n1', so3h = '(S(O)(=O)=O)', disp = False, graph = False):
"""
generate molecules with R group fragment
"""
N_group = len( re.findall( '{[0-9]*}', mol)) # find number of R group positions
pdr = get_r_list( N_group, so3h, disp = disp, pdForm = True)
so_l = pdr['Rgroups'].tolist()
aso_l = []
for so in so_l:
aso = mol.format(*so)
aso_l.append( aso)
if disp: print(so, aso)
if graph: jchem.show_mol( aso)
pdr['SMILES'] = aso_l
pdr['BaseMol'] = [aso_l[0]] * len( aso_l)
pdr['BaseStr'] = [mol] * len( aso_l)
return pdr
# [nH]1c2[nH]c3ccccc3[nH]c2c(=O)[nH]c1=O
def gen_rl_attach( mol = 'Oc1nc(O)c2nc3c{0}c{1}c{2}c{3}c3nc2n1', r_l = ['', '(S(O)(=O)=O)'], disp = False, graph = False):
"""
generate molecules with R group fragment
get_r_list becomes get_multi_r_list so as to generate molecules with multiple R-group attached.
"""
N_group = len( re.findall( '{[0-9]*}', mol)) # find number of R group positions
pdr = get_multi_r_list( N_group, r_l, disp = disp, pdForm = True)
so_l = pdr['Rgroups'].tolist()
aso_l = []
for so in so_l:
aso = mol.format(*so)
aso_l.append( aso)
if disp: print(so, aso)
if graph: jchem.show_mol( aso)
pdr['SMILES'] = aso_l
pdr['BaseMol'] = [aso_l[0]] * len( aso_l)
pdr['BaseStr'] = [mol] * len( aso_l)
return pdr
def gen_rl_2attach( mol, mol_nH, r_l = ['', '(S(O)(=O)=O)'], disp = False, graph = False):
"""
generate molecules with R group fragment
get_r_list becomes get_multi_r_list so as to generate molecules with multiple R-group attached.
Reduced (or hydrated) SMILES strings will be generated as well.
"""
N_group = len( re.findall( '{[0-9]*}', mol)) # find number of R group positions
pdr = get_multi_r_list( N_group, r_l, disp = disp, pdForm = True)
so_l = pdr['Rgroups'].tolist()
aso_l = []
aso_nH_l = []
for so in so_l:
aso = mol.format(*so)
aso_l.append( aso)
aso_nH = mol_nH.format(*so)
aso_nH_l.append( aso_nH)
if disp: print(so, aso, aso_nH)
if graph:
print("Oxidated molecule:")
jchem.show_mol( aso)
print("Hydrated molecule:")
jchem.show_mol( aso_nH)
# Storing canonical smiles strings
pdr['SMILES'] = jchem.csmiles_l( aso_l)
pdr['R-SMILES'] = jchem.csmiles_l( aso_nH_l)
pdr['BaseMol'] = [aso_l[0]] * len( aso_l)
pdr['BaseStr'] = [mol] * len( aso_l)
return pdr
def gen_r_attach_Alloxazine_R123457( so3h = '(S(O)(=O)=O)', disp = False, graph = False):
"""
generate molecules with R group fragment
"""
# n1{R5}c2nc3c{R1}c{R2}c{R3}c{R4}c3nc2c(=O)n{R7}c1=O
#
N_group = 6 #R1234 5 7 -> 0123 4 5
pdr = get_r_list( N_group, so3h, disp = disp, pdForm = True)
so_l = pdr['Rgroups'].tolist()
aso_l = []
mol_l = []
for so in so_l:
if so[4] != '' and so[5] != '':
aso = 'n1{4}c2nc3c{0}c{1}c{2}c{3}c3nc2c(=O)n{5}c1=O'.format(*so)
mol_l.append('n1{4}c2nc3c{0}c{1}c{2}c{3}c3nc2c(=O)n{5}c1=O')
elif so[4] == '' and so[5] == '':
aso = '[nH]1c2nc3c{0}c{1}c{2}c{3}c3nc2c(=O)[nH]c1=O'.format(*so[:4])
mol_l.append('[nH]1c2nc3c{0}c{1}c{2}c{3}c3nc2c(=O)[nH]c1=O')
elif so[4] == '':
aso = '[nH]1c2nc3c{0}c{1}c{2}c{3}c3nc2c(=O)n{4}c1=O'.format(so[0],so[1],so[2],so[3], so[5])
mol_l.append('[nH]1c2nc3c{0}c{1}c{2}c{3}c3nc2c(=O)n{4}c1=O')
else: #so[5] == '':
aso = 'n1{4}c2nc3c{0}c{1}c{2}c{3}c3nc2c(=O)[nH]c1=O'.format(*so[:5])
mol_l.append('n1{4}c2nc3c{0}c{1}c{2}c{3}c3nc2c(=O)[nH]c1=O')
aso_l.append( aso)
if disp: print(so, aso)
if graph: jchem.show_mol( aso)
pdr['SMILES'] = aso_l
pdr['BaseMol'] = [aso_l[0]] * len( aso_l)
pdr['BaseStr'] = mol_l
return pdr
def gen_r_attach_Flavins( mol = 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O', so3h = '(S(O)(=O)=O)', disp = False, graph = False):
# jchem.show_mol( 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O'.format( so, so, so, so, so, so))
return gen_r_attach( mol = mol, so3h = so3h, disp = disp, graph = graph)
def r_attach_Flavins( idx = [0,0,0,0,0,0], mol = 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O', Rg = '(S(O)(=O)=O)'):
"""
It attachs a functional group in a specific position.
"""
idx_R = [ Rg if x == 1 else '' for x in idx]
return mol.format( *idx_R)
def r_2attach_Flavins( idx = [0,0,0,0,0,0], mol = 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O', mol_nH = '[nH]1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3[nH]c2c(=O)[nH]{0}c1=O',
Rg = '(S(O)(=O)=O)'):
"""
It attachs a functional group in a specific position.
"""
idx_R = [ Rg if x == 1 else '' for x in idx]
return mol.format( *idx_R), mol_nH.format( *idx_R)
# 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O'
def gen_rl_attach_Flavins( mol = 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O',
r_l = ['', '(S(O)(=O)=O)', '(O)'], disp = False, graph = False):
# jchem.show_mol( 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O'.format( so, so, so, so, so, so))
return gen_rl_attach( mol = mol, r_l = r_l, disp = disp, graph = graph)
def gen_rl_2attach_Flavins( mol = 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O', mol_nH = '[nH]1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3[nH]c2c(=O)[nH]{0}c1=O',
r_l = ['', '(S(O)(=O)=O)', '(O)'], disp = False, graph = False):
# jchem.show_mol( 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O'.format( so, so, so, so, so, so))
return gen_rl_2attach( mol = mol, mol_nH = mol_nH, r_l = r_l, disp = disp, graph = graph)
def gen_rl_2attach_Alloxazine_R10( mol = 'O=C1N{0}C(=O)C2=NC3=C{1}C{2}=C{3}C{4}=C3N=C2N1{5}', mol_nH = 'O=C1N{5}C2NC3=C{4}C{3}=C{2}C{1}=C3NC2C(=O)N1{0}',
r_l = ['', '(S(O)(=O)=O)', '(O)'], disp = False, graph = False):
"""
The Alloxazine is used as a frame molecules.
"""
return gen_rl_2attach( mol = mol, mol_nH = mol_nH, r_l = r_l, disp = disp, graph = graph)
def gen_rl_2attach_Alloxazine( mol = 'O=C1N{1}C(=O)C2=NC3=C{2}C{3}=C{4}C{5}=C3N=C2N1{0}', mol_nH = 'O=C1N{0}C2NC3=C{5}C{4}=C{3}C{2}=C3NC2C(=O)N1{1}',
r_l = ['', '(S(O)(=O)=O)', '(O)'], disp = False, graph = False):
"""
The Alloxazine is used as a frame molecules.
R1 is NH fragment, which was r10 previously.
"""
return gen_rl_2attach( mol = mol, mol_nH = mol_nH, r_l = r_l, disp = disp, graph = graph)
def get_R_group_dict():
R_group_dict = OrderedDict( [
('no group', '[H]'),
('amine', 'N'),
('hydroxyl', 'O'),
('methyl', 'C'),
('fluoro', 'F'),
('phosphonic acid', 'P(O)(O)(=O)'),
('sulfonic acid', 'S(=O)(=O)(O)'),
('carboxylic acid', 'C(O)(=O)'),
('nitro', '[N+]([O-])(=O)')])
return R_group_dict
def gen_rl_2attach_ThiophenoquinonesA(
mol_ox = "{0}C1=C({1})C2=C(S1)C(=O)C({3})=C({2})C2=O",
mol_rd = "OC1=C({3})C({2})=C(O)C2=C1SC({0})=C2{1}",
r_l = None,
disp = False, graph = False):
# jchem.show_mol( 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O'.format( so, so, so, so, so, so))
if r_l is None:
"""
If Rgroup list is not specified, a default list is used.
"""
r_l = list(get_R_group_dict().values())
pdw = gen_rl_2attach( mol = mol_ox, mol_nH = mol_rd, r_l = r_l, disp = disp, graph = graph)
# generate_sfp : position * no of r_group
Index = pdw.Index.values
N_positions = len( re.findall( '{[0-9]*}', mol_ox)) # find number of R group positions
N_rgroups = len( r_l)
sfp_list = list()
for idx in Index:
sfp = list()
for np in idx:
sfp_each = [0] * N_rgroups
sfp_each[ np] = 1
sfp.extend( sfp_each)
sfp_list.append( sfp)
pdw['SimpleFingerprint'] = sfp_list
return pdw
def gen_2attach_Alloxazine( fname = 'sheet/Alloxazine2_64.csv', rg_l = ['(O)', '(S(O)(=O)=O)', '(OC)', '(C)']):
pdr_l = []
for rg in rg_l:
pdr = jquinone.gen_rl_2attach_Alloxazine( r_l = ['', rg])
fname_each = fname[:-4] + '{}'.format( rg) + '.csv'
print('The results are saved to', fname)
pdr.to_csv( fname_each, index = False)
pdr_l.append( pdr)
pdr_all = pd.concat( pdr_l, ignore_index = True)
pdr_all['ID'] = list(range(1, pdr_all.shape[0] + 1))
pdr_all.to_csv( fname, index = False)
return pdr_all
def show_Alloxazine():
print('Original Alloxazine')
jchem.show_mol( 'O=C1NC(=O)C2=NC3=CC=CC=C3N=C2N1')
jchem.show_mol( 'O=C1N{0}C(=O)C2=NC3=C{1}C{2}=C{3}C{4}=C3N=C2N1{5}'.format( '(O)','(O)','(O)','(O)','(O)','(O)'))
print('Hydro Alloxazine')
jchem.show_mol( 'O=C1NC2NC3=CC=CC=C3NC2C(=O)N1')
jchem.show_mol( 'O=C1N{5}C2NC3=C{4}C{3}=C{2}C{1}=C3NC2C(=O)N1{0}'.format( '(O)','(O)','(O)','(O)','(O)','(O)'))
# [nH]1c2[nH]c3ccccc3[nH]c2c(=O)[nH]c1=O
def gen_r_attach_Flavins_nH( mol = '[nH]1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3[nH]c2c(=O)[nH]{0}c1=O', so3h = '(S(O)(=O)=O)', disp = False, graph = False):
# jchem.show_mol( 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O'.format( so, so, so, so, so, so))
return gen_r_attach( mol = mol, so3h = so3h, disp = disp, graph = graph)
def gen_r_attach_Alloxazine( mol = '[nH]1{5}c2nc3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O', so3h = '(S(O)(=O)=O)', disp = False, graph = False):
# '[nH]1{5}c2nc3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O'
return gen_r_attach( mol = mol, so3h = so3h, disp = disp, graph = graph)
def gen_r_attach_lowpot_Flavins( disp = False, graph = False):
oh = '(O)'
h = ''
oc = '(OC)'
rl = []
rl.append(([h,oh, oh, oh, h, h], -0.47))
rl.append(([oh, oh, h,h,h,h], -0.47))
rl.append(([oh, oh, oh, oh, oh, h], -0.47))
rl.append(([oh, oh, oh, oh, h, h], -0.51))
rl.append(([h, oh, h, oh, h, h], -0.50))
rl.append(([h, oh, h, h, h, h], -0.45))
rl.append(([oh, oh, h, oh, oh, h], -0.50))
rl.append(([h, oh, h, oh, oh, h], -0.46))
rl.append(([oh, oh, h, oh, h, h], -0.53))
rl.append(([h, oc, oc, oc, h, h], -0.48))
rl.append(([oc, oc, oc, oc, h, h], -0.48))
rl.append(([oc, oc, h, oc, h, h], -0.47))
rl.append(([h, oc, h, oc, oc, h], -0.46))
rl.append(([oc, oc, h, oc, oc, h], -0.50))
BaseStr = 'n1c2[nH]{5}c3c{4}c{3}c{2}c{1}c3nc2c(=O)[nH]{0}c1=O'
N_group = len( re.findall( '{[0-9]*}', BaseStr))
emptyR = [''] * N_group
BaseMol = BaseStr.format( *emptyR)
smiles_l = [ BaseStr.format(*r[0]) for r in rl]
pdr = pd.DataFrame()
pdr['ID'] = list(range( 1, len( smiles_l) + 1))
R_group_l = []
Index_l = []
NoOfR_l = []
for r in rl:
# Whether it is oh or oc family is determined
r_oh_test = [ x == oh for x in r[0]]
print(r[0], '-->', r_oh_test, '-->', any(r_oh_test))
if any(r_oh_test):
r_type = oh
else:
r_type = oc
R_group_l.append( r_type)
r_groups = [ 0 if x == '' else 1 for x in r[0]]
Index_l.append( r_groups)
NoOfR_l.append( np.sum( r_groups))
pdr['Rgroup'] = R_group_l # This is newly included.
pdr['NoOfR'] = NoOfR_l
pdr['Index'] = Index_l
pdr['Rgroups'] = [ r[0] for r in rl]
pdr['SMILES'] = smiles_l
pdr['BaseMol'] = [BaseMol] * len(rl)
pdr['BaseStr'] = [BaseStr] * len(rl)
pdr['RedoxPotential'] = [ r[1] for r in rl]
for ix, s in enumerate( smiles_l):
if disp: print(ix+1, s)
if graph:
jchem.show_mol( s)
return pdr
def aq1x( x = '(S(=O)(=O)O)', disp = False):
"""
It generates new quinone molecules with mono functional group attachment.
For anthraquinone(AQ), only two attachment positions are unique and
All the other position attachment generate a duplicated result.
"""
MaxAttach = 2
cs_l = []
en_mol = [''] * MaxAttach
for ix in range( MaxAttach):
en_mol[ix] = x
s = 'O=C1c2c{0}c{1}ccc2C(=O)c2ccccc21'.format( *en_mol)
cs_l.append( jchem.csmiles( s))
en_mol[ix] = ''
if disp:
print(ix)
jchem.show_mol( s)
return cs_l
def bq1x( x = '(S(=O)(=O)O)', disp = False):
"""
It generates new quinone molecules with mono functional group attachment.
For anthraquinone(AQ), only two attachment positions are unique and
All the other position attachment generate a duplicated result.
"""
MaxAttach = 1
cs_l = []
en_mol = [''] * MaxAttach
for ix in range( MaxAttach):
en_mol[ix] = x
s = 'C1=CC(=O)C{0}=CC1=O'.format( *en_mol)
cs_l.append( jchem.csmiles( s))
en_mol[ix] = ''
if disp:
print(ix)
jchem.show_mol( s)
return cs_l
def mol1x( mol = 'C1=CC(=O)C{0}=CC1=O', MaxAttach = 1, x = '(S(=O)(=O)O)', disp = False):
"""
It generates new quinone molecules with mono functional group attachment.
For anthraquinone(AQ), only two attachment positions are unique and
All the other position attachment generate a duplicated result.
"""
#MaxAttach = 1
cs_l = []
en_mol = [''] * MaxAttach
for ix in range( MaxAttach):
en_mol[ix] = x
s = mol.format( *en_mol)
cs_l.append( jchem.csmiles( s))
en_mol[ix] = ''
if disp:
print(ix)
jchem.show_mol( s)
return cs_l
def plot_coef( coef, intercept, shape = (9, 4) , loc = 2, R_g = None):
print(coef[0].shape)
W = np.reshape( coef[0], shape)
print(W.shape)
print("I. X-axis follows functional groups")
plt.figure()
for ii in range( shape[1]):
plt.plot( W[:,ii], 'o', label = "R{}".format( ii+1))
plt.plot( [0, shape[0]-1], [intercept, intercept])
plt.xlabel( "Functional groups")
plt.ylabel( "Property")
plt.legend( loc = loc)
plt.show()
print(R_g)
print("II. X-axis follows attached positions")
plt.figure()
if R_g:
assert (len(R_g) == shape[0]), 'len(R_g) is not equal to coef.shape[0]'
for ii in range( shape[0]):
color = float(ii) / shape[0]
plt.plot( list(range( shape[1])), W[ii, :], 'o', c = (1,color,1,1), ms = 10, label = R_g[ii])
#plt.plot( x, y , '.', c = (1,0.1,1,1))
else:
for ii in range( shape[0]):
plt.plot( W[ii, :], 'o', label = "F#{}".format( ii+1))
plt.plot( [0, shape[1]-1], [intercept, intercept])
plt.xlabel( "Attached positions")
plt.ylabel( "Property")
plt.legend( loc = loc)
plt.show()
def anal_group_position_ridge( xM, yV, alpha = 0.5, shape = (9,4), loc = 1, R_g = None):
"""
Show the relationship between groups & positions vs property
"""
print("1. Testing regression with the given alpha using Ridge regression.")
jutil.mlr_val_vseq_ridge_rand( xM, yV, alpha = alpha, rate = 5)
print("2. Obtained regression coefficient with the same alpha.")
coef, intercept = jutil.mlr3_coef_ridge( xM, yV, alpha = alpha)
print("3. Showing the analyzed results.")
plot_coef( coef, intercept, shape = shape, loc = loc, R_g = R_g)
return coef, intercept
def pd_anal_ridge( pdr, y_id, alpha = 0.5, shape = (9,4), loc = 1, x_id = "SimpleFingerprint", R_g = None):
"""
Analysis the property with respect to the functional groups and their positions.
"""
x = pdr[x_id].values
xM = np.mat( list(map( eval, x)))
yV = np.mat( pdr[y_id].values).T
if R_g is None and shape[0] == 9:
R_g = list(get_R_group_dict().keys())
print(R_g)
return anal_group_position_ridge( xM, yV, alpha = alpha, shape = shape, loc = loc, R_g = R_g)
def gen_pd_from_coef( coef0, Rgc):
df = pd.DataFrame()
r_l, p_l, c_l = [], [], []
ci = 0
for pi in range( 4):
for ri in range( 9):
r_l.append( Rgc[ri])
p_l.append( pi + 1)
c_l.append( coef0[ci])
ci += 1
df['Position'] = p_l
df['R-group'] = r_l
df['Coefficient'] = c_l
return df
| mit |
stackArmor/security_monkey | security_monkey/views/guard_duty_event.py | 1 | 18262 |
"""
.. module: security_monkey.views.GuardDutyEventMapPointsList
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Pritam D. Gautam <[email protected]> @nuagedm
"""
import datetime
from flask import jsonify, request
from security_monkey import db, rbac
from security_monkey.views import AuthenticatedService
from security_monkey.datastore import (
GuardDutyEvent,
Item,
ItemAudit,
Account,
AccountType,
Technology,
AuditorSettings,
Datastore,
ItemRevision)
# Severity Levels for GuardDuty Findings
# https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html#guardduty_findings-severity
def sev_name(val):
if 0.1 <= val <= 3.9:
return 'Low'
if 4.0 <= val <= 6.9:
return 'Medium'
if 7.0 <= val <= 8.9:
return 'High'
# Returns a list of Map Circle Marker Points List
class GuardDutyEventMapPointsList(AuthenticatedService):
decorators = [rbac.allow(['View'], ["GET"])]
def get(self):
"""
.. http:get:: /api/1/worldmapguarddutydata
Get a list of World Map Data points matching the given criteria.
**Example Request**:
.. sourcecode:: http
GET /api/1/worldmapguarddutydata HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"auth": {
"authenticated": true,
"roles": [
{
"name": "Admin"
},
{
"name": "Justify"
},
{
"name": "Comment"
},
{
"name": "View"
}
],
"user": "[email protected]"
},
"items": [
{
"cityName": "Mar del Plata",
"count": 1,
"countryName": "Argentina",
"lat": -38.0,
"localPort": 22.0,
"localPortName": "SSH",
"lon": -57.55,
"remoteIpV4": "186.62.51.117",
"remoteOrg": "Telefonica de Argentina",
"remoteOrgASN": 22927.0,
"remoteOrgASNOrg": "Telefonica de Argentina",
"remoteOrgISP": "Telefonica de Argentina"
}
],
"page": 1,
"total": 197
}
:statuscode 200: no error
:statuscode 401: Authentication Error. Please Login.
"""
# Reference query as provided by Rick
# select
# g.item_id,
# g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join guarddutyevent g ON i.id = g.item_id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' is not NULL;
# """
self.reqparse.add_argument('accounts', type=str, default=None, location='args')
args = self.reqparse.parse_args()
for k, v in args.items():
if not v:
del args[k]
# @pritam: 25 July, 2018
# With implementation of GuardDuty Data Injection using Custom Watcher, changing the source of GuardDutyEvent
# data for this query to ItemRevision Table
# inner join itemrevision g ON i.id = g.item_id
# select g.item_id,
# g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join itemrevision g ON i.latest_revision_id = g.id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' is not NULL;
# Adding following additonal output data fields for display details modal popup of Map
# g.config -> 'Description' as "description",
# g.config -> 'Severity' as "severity",
# g.config -> 'Region' as "region",
# g.config -> 'Service' -> 'Count' as "count",
# g.config -> 'AccountId' as "accountid"
# Read more about filtering:
# https://docs.sqlalchemy.org/en/latest/orm/query.html
from sqlalchemy.sql.functions import coalesce
query = ItemRevision.query.with_entities(
ItemRevision.item_id,
ItemRevision.config[('Service', 'Action', 'PortProbeAction','PortProbeDetails')].label('portprobedetails'),
ItemRevision.config[('Description')].label('description'),
ItemRevision.config[('Severity')].label('severity'),
ItemRevision.config[('Region')].label('region'),
ItemRevision.config[('Service')].label('service'),
ItemRevision.config[('Resource')].label('resource'),
ItemRevision.config[('AccountId')].label('accountid'),
) \
.join((Item, Item.latest_revision_id == ItemRevision.id), (ItemAudit, Item.id == ItemAudit.item_id)) \
.filter((coalesce(ItemAudit.justified, False) == False), (coalesce(ItemAudit.fixed, False) == False),
(ItemRevision.config[
('Service', 'Action', 'PortProbeAction', 'PortProbeDetails')] != None))
if 'accounts' in args:
accounts = args['accounts'].split(',')
query = query.join((Account, Account.id == Item.account_id))
query = query.filter(Account.name.in_(accounts))
records = query.all()
items = []
def flatten_structure( rec):
result = dict(rec.__dict__)
if result.has_key('service'):
result.pop('service')
if result.has_key('resource'):
result.pop('resource')
if result.has_key('portprobedetails'):
result.pop('portprobedetails')
result.update(flatten_json(rec.portprobedetails[0]))
result['probe_count'] = rec.service['Count']
result['first_seen'] = rec.service['EventFirstSeen']
result['last_seen'] = rec.service['EventLastSeen']
result['resource_type'] = rec.resource['ResourceType']
result['instance_id'] = rec.resource['InstanceDetails']['InstanceId']
instance_tag_name = [k['Value'] for k in rec.resource['InstanceDetails']['Tags'] if k['Key']=='Name' ]
if instance_tag_name:
result['instance_name'] = instance_tag_name[0]
else:
result['instance_name'] = 'NA'
if result.has_key('_labels'):
result.pop('_labels')
# Convert Severity from float to Text
result['severity'] = sev_name(result['severity'])
return result
if len(records) > 0:
import pandas as pd
from ..flatten import flatten_json
flatten_records = (flatten_structure(record) for record in records)
fulldata_dataFrame = pd.DataFrame(flatten_records).rename(
columns={'RemoteIpDetails_GeoLocation_Lat': 'lat',
'RemoteIpDetails_GeoLocation_Lon': 'lon',
'LocalPortDetails_Port': 'localPort',
'LocalPortDetails_portName': 'localPortName',
'RemoteIpDetails_City_CityName': 'cityName',
'RemoteIpDetails_Country_CountryName': 'countryName',
'RemoteIpDetails_IpAddressV4': 'remoteIpV4',
'RemoteIpDetails_Organization_Asn': 'remoteOrgASN',
'RemoteIpDetails_Organization_AsnOrg': 'remoteOrgASNOrg',
'RemoteIpDetails_Organization_Isp': 'remoteOrgISP',
'RemoteIpDetails_Organization_Org': 'remoteOrg',
'counts': 'count'})
# Removing drop duplicates as each Probe will probably have different info to be displayed in popup
mapdata_dataframe = fulldata_dataFrame.groupby(['lat', 'lon']).size().reset_index(name='count').merge(
fulldata_dataFrame, on=['lat', 'lon'], how='left')
items = mapdata_dataframe.to_dict('records')
marshaled_dict = {
'page': 1,
'total': len(items),
'auth': self.auth_dict,
'items': items
}
return marshaled_dict, 200
# Returns a list of Top 10 Countries by number of probe events received to display in Bar Chart
class GuardDutyEventTop10Countries(AuthenticatedService):
decorators = [rbac.allow(['View'], ["GET"])]
def get(self):
"""
.. http:get:: /api/1/top10countryguarddutydata
Get a list of Top 10 Countries by number of probe events received to display in Bar Chart
**Example Request**:
.. sourcecode:: http
GET /api/1/worldmapguarddutydata HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"auth": {
"authenticated": true,
"roles": [
{
"name": "Admin"
},
{
"name": "Justify"
},
{
"name": "Comment"
},
{
"name": "View"
}
],
"user": "[email protected]"
},
"items": [
{
"count": 1527,
"countryName": "China"
},
{
"count": 456,
"countryName": "United States"
},
{
"count": 116,
"countryName": "Russia"
},
],
"page": 1,
"total": 197
}
:statuscode 200: no error
:statuscode 401: Authentication Error. Please Login.
"""
self.reqparse.add_argument('accounts', type=str, default=None, location='args')
args = self.reqparse.parse_args()
for k, v in args.items():
if not v:
del args[k]
# Reference query as provided by Rick
# select
# g.item_id,
# g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join guarddutyevent g ON i.id = g.item_id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' is not NULL;
# """
# @pritam: 25 July, 2018
# With implementation of GuardDuty Data Injection using Custom Watcher, changing the source of GuardDutyEvent
# data for this query to ItemRevision Table
# select g.item_id,
# g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join itemrevision g ON i.latest_revision_id = g.id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' is not NULL;
# Read more about filtering:
# https://docs.sqlalchemy.org/en/latest/orm/query.html
from sqlalchemy.sql.functions import coalesce
query = ItemRevision.query.with_entities(
ItemRevision.item_id, ItemRevision.config[('Service', 'Action', 'PortProbeAction',
'PortProbeDetails')]) \
.join((Item, Item.latest_revision_id == ItemRevision.id), (ItemAudit, Item.id == ItemAudit.item_id)) \
.filter((coalesce(ItemAudit.justified, False) == False), (coalesce(ItemAudit.fixed, False) == False),
(ItemRevision.config[
('Service', 'Action', 'PortProbeAction', 'PortProbeDetails')] != None))
if 'accounts' in args:
accounts = args['accounts'].split(',')
query = query.join((Account, Account.id == Item.account_id))
query = query.filter(Account.name.in_(accounts))
records = query.all()
items = []
if len(records) > 0:
import pandas as pd
from ..flatten import flatten_json
flatten_records = (flatten_json(record[1][0]) for record in records)
fulldata_dataFrame = pd.DataFrame(flatten_records).rename(
columns={'RemoteIpDetails_GeoLocation_Lat': 'lat',
'RemoteIpDetails_GeoLocation_Lon': 'lon',
'LocalPortDetails_Port': 'localPort',
'LocalPortDetails_portName': 'localPortName',
'RemoteIpDetails_City_CityName': 'cityName',
'RemoteIpDetails_Country_CountryName': 'countryName',
'RemoteIpDetails_IpAddressV4': 'remoteIpV4',
'RemoteIpDetails_Organization_Asn': 'remoteOrgASN',
'RemoteIpDetails_Organization_AsnOrg': 'remoteOrgASNOrg',
'RemoteIpDetails_Organization_Isp': 'remoteOrgISP',
'RemoteIpDetails_Organization_Org': 'remoteOrg',
'counts': 'count'})
# Sorting and Limiting the resultset to 10
items = fulldata_dataFrame.groupby(['countryName']).size().reset_index(
name='count').sort_values(['count'], ascending=False).head(10).to_dict('records')
marshaled_dict = {
'page': 1,
'total': len(items),
'auth': self.auth_dict,
'items': items
}
return marshaled_dict, 200
class GuardDutyEventService(AuthenticatedService):
decorators = [
rbac.allow(["Admin"], ["POST"])
]
def post(self):
datastore = Datastore()
config = request.get_json(force=True)
#action_type = config['detail']['service']['action']['actionType']
action_type = 'guardduty'
gd_tech = Technology.query.filter(Technology.name == action_type).first()
if not gd_tech:
gd_tech = Technology(name=action_type)
db.session.add(gd_tech)
db.session.commit()
db.session.refresh(gd_tech)
identifier = config['detail']['accountId']
account = Account.query.filter(Account.identifier == identifier).first()
if not account:
raise Exception(
"Account with identifier [{}] not found.".format(identifier)
)
item = datastore.store(
gd_tech.name,
config['region'],
account.name,
config['detail']['type'],
True,
config
)
auditor_settings = AuditorSettings.query.filter(
AuditorSettings.auditor_class=='GuardDuty',
AuditorSettings.tech_id==gd_tech.id,
AuditorSettings.account_id==account.id
).first()
if not auditor_settings:
auditor_settings = AuditorSettings(
disabled=False,
issue_text='Guard Duty',
auditor_class='GuardDuty',
tech_id=gd_tech.id,
account_id=account.id
)
db.session.add(auditor_settings)
db.session.commit()
db.session.refresh(auditor_settings)
issue = ItemAudit(
score=int(config['detail']['severity']),
issue=config['detail']['title'],
notes=config['detail']['description'],
item_id=item.id,
auditor_setting_id=auditor_settings.id,
)
db.session.add(issue)
db.session.commit()
db.session.refresh(issue)
gd_event = GuardDutyEvent(
item_id=item.id,
config=config,
date_created=datetime.datetime.utcnow()
)
db.session.add(gd_event)
db.session.commit()
db.session.refresh(gd_event)
return {
'id': gd_event.id,
'config': gd_event.config,
}, 201
| apache-2.0 |
subutai/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/__init__.py | 69 | 2179 | from geo import AitoffAxes, HammerAxes, LambertAxes
from polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = self._all_projection_types.keys()
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '%s'" % projection)
def projection_factory(projection, figure, rect, **kwargs):
"""
Get a new projection instance.
*projection* is a projection name.
*figure* is a figure to add the axes to.
*rect* is a :class:`~matplotlib.transforms.Bbox` object specifying
the location of the axes within the figure.
Any other kwargs are passed along to the specific projection
constructor being used.
"""
return get_projection_class(projection)(figure, rect, **kwargs)
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
| agpl-3.0 |
markneville/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py | 69 | 16818 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
| agpl-3.0 |
pascalgutjahr/Praktikum-1 | Leerlaufspannung/plt2.py | 1 | 1119 | import numpy as np
import uncertainties.unumpy as unp
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.optimize import curve_fit
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
plt.rcParams['lines.linewidth'] = 1
csfont = {'fontname': 'Times New Roman'}
R, U, I = np.genfromtxt('data2.txt', unpack=True , skip_header=2)
I /=1000 # in Ampere
def f(I, m, n):
return m * I + n
params, covariance = curve_fit(f, I, U)
errors = np.sqrt(np.diag(covariance))
print('m =', params[0], '+-', errors[0])
print('n =', params[1], '+-', errors[1])
# m = 15.7865377557 +- 0.375264383097
# n = 0.929385148384 +- 0.0415200261762
x_plot = np.linspace(min(I), max(I))
plt.plot(x_plot, f(x_plot, *params), 'b-', label='linearer Fit')
plt.plot(I,U, 'rx', label='Messwerte')
plt.ylabel(r'$U \,/\, \mathrm{V}$')
plt.xlabel(r'$I \,/\, \mathrm{A}$')
plt.xlim(min(I), max(I))
# plt.title('Messungen Gegenspannung')
plt.grid()
plt.legend(loc='best')
plt.tight_layout()
plt.savefig('bilder/gegen.pdf')
plt.show()
| mit |
runt18/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/__init__.py | 1 | 2188 | from geo import AitoffAxes, HammerAxes, LambertAxes
from polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = self._all_projection_types.keys()
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '{0!s}'".format(projection))
def projection_factory(projection, figure, rect, **kwargs):
"""
Get a new projection instance.
*projection* is a projection name.
*figure* is a figure to add the axes to.
*rect* is a :class:`~matplotlib.transforms.Bbox` object specifying
the location of the axes within the figure.
Any other kwargs are passed along to the specific projection
constructor being used.
"""
return get_projection_class(projection)(figure, rect, **kwargs)
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
| agpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/pylab_examples/subplots_demo.py | 9 | 2184 | """Examples illustrating the use of plt.subplots().
This function creates a figure and a grid of subplots with a single call, while
providing reasonable control over how the individual plots are created. For
very refined tuning of subplot creation, you can still use add_subplot()
directly on a new figure.
"""
import matplotlib.pyplot as plt
import numpy as np
# Simple data to display in various forms
x = np.linspace(0, 2 * np.pi, 400)
y = np.sin(x ** 2)
plt.close('all')
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, the axes array is 1-d
f, axarr = plt.subplots(2, sharex=True)
axarr[0].plot(x, y)
axarr[0].set_title('Sharing X axis')
axarr[1].scatter(x, y)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Three subplots sharing both x/y axes
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing both axes')
ax2.scatter(x, y)
ax3.scatter(x, 2 * y ** 2 - 1, color='r')
# Fine-tune figure; make subplots close to each other and hide x ticks for
# all but bottom plot.
f.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
# row and column sharing
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
ax1.plot(x, y)
ax1.set_title('Sharing x per column, y per row')
ax2.scatter(x, y)
ax3.scatter(x, 2 * y ** 2 - 1, color='r')
ax4.plot(x, 2 * y ** 2 - 1, color='r')
# Four axes, returned as a 2-d array
f, axarr = plt.subplots(2, 2)
axarr[0, 0].plot(x, y)
axarr[0, 0].set_title('Axis [0,0]')
axarr[0, 1].scatter(x, y)
axarr[0, 1].set_title('Axis [0,1]')
axarr[1, 0].plot(x, y ** 2)
axarr[1, 0].set_title('Axis [1,0]')
axarr[1, 1].scatter(x, y ** 2)
axarr[1, 1].set_title('Axis [1,1]')
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)
plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
plt.show()
| mit |
dennisobrien/bokeh | bokeh/util/serialization.py | 3 | 16114 | '''
Functions for helping with serialization and deserialization of
Bokeh objects.
Certain NunPy array dtypes can be serialized to a binary format for
performance and efficiency. The list of supported dtypes is:
{binary_array_types}
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import base64
import datetime as dt
import math
import sys
from threading import Lock
import uuid
import numpy as np
from ..settings import settings
from .string import format_docstring
from .dependencies import import_optional
pd = import_optional('pandas')
BINARY_ARRAY_TYPES = set([
np.dtype(np.float32),
np.dtype(np.float64),
np.dtype(np.uint8),
np.dtype(np.int8),
np.dtype(np.uint16),
np.dtype(np.int16),
np.dtype(np.uint32),
np.dtype(np.int32),
])
DATETIME_TYPES = set([
dt.datetime,
dt.date,
dt.time,
np.datetime64,
])
if pd:
try:
_pd_timestamp = pd.Timestamp
except AttributeError:
_pd_timestamp = pd.tslib.Timestamp
DATETIME_TYPES.add(_pd_timestamp)
DATETIME_TYPES.add(pd.Timedelta)
DATETIME_TYPES.add(pd.Period)
DATETIME_TYPES.add(type(pd.NaT))
NP_EPOCH = np.datetime64(0, 'ms')
NP_MS_DELTA = np.timedelta64(1, 'ms')
DT_EPOCH = dt.datetime.utcfromtimestamp(0)
__doc__ = format_docstring(__doc__, binary_array_types="\n".join("* ``np." + str(x) + "``" for x in BINARY_ARRAY_TYPES))
_simple_id = 999
_simple_id_lock = Lock()
_dt_tuple = tuple(DATETIME_TYPES)
def is_datetime_type(obj):
''' Whether an object is any date, time, or datetime type recognized by
Bokeh.
Arg:
obj (object) : the object to test
Returns:
bool : True if ``obj`` is a datetime type
'''
return isinstance(obj, _dt_tuple)
def is_timedelta_type(obj):
''' Whether an object is any timedelta type recognized by Bokeh.
Arg:
obj (object) : the object to test
Returns:
bool : True if ``obj`` is a timedelta type
'''
return isinstance(obj, (dt.timedelta, np.timedelta64))
def convert_timedelta_type(obj):
''' Convert any recognized timedelta value to floating point absolute
milliseconds.
Arg:
obj (object) : the object to convert
Returns:
float : milliseconds
'''
if isinstance(obj, dt.timedelta):
return obj.total_seconds() * 1000.
elif isinstance(obj, np.timedelta64):
return (obj / NP_MS_DELTA)
def convert_datetime_type(obj):
''' Convert any recognized date, time, or datetime value to floating point
milliseconds since epoch.
Arg:
obj (object) : the object to convert
Returns:
float : milliseconds
'''
# Pandas NaT
if pd and obj is pd.NaT:
return np.nan
# Pandas Period
if pd and isinstance(obj, pd.Period):
return obj.to_timestamp().value / 10**6.0
# Pandas Timestamp
if pd and isinstance(obj, _pd_timestamp): return obj.value / 10**6.0
# Pandas Timedelta
elif pd and isinstance(obj, pd.Timedelta): return obj.value / 10**6.0
# Datetime (datetime is a subclass of date)
elif isinstance(obj, dt.datetime):
diff = obj.replace(tzinfo=None) - DT_EPOCH
return diff.total_seconds() * 1000.
# Date
elif isinstance(obj, dt.date):
return (dt.datetime(*obj.timetuple()[:6]) - DT_EPOCH).total_seconds() * 1000
# NumPy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - NP_EPOCH
return (epoch_delta / NP_MS_DELTA)
# Time
elif isinstance(obj, dt.time):
return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
def convert_datetime_array(array):
''' Convert NumPy datetime arrays to arrays to milliseconds since epoch.
Args:
array : (obj)
A NumPy array of datetime to convert
If the value passed in is not a NumPy array, it will be returned as-is.
Returns:
array
'''
if not isinstance(array, np.ndarray):
return array
try:
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
except AttributeError as e:
if e.args == ("'module' object has no attribute 'datetime64'",):
# for compatibility with PyPy that doesn't have datetime64
if 'PyPy' in sys.version:
legacy_datetime64 = False
pass
else:
raise e
else:
raise e
# not quite correct, truncates to ms..
if array.dtype.kind == 'M':
if legacy_datetime64:
if array.dtype == np.dtype('datetime64[ns]'):
array = array.astype('int64') / 10**6.0
else:
array = array.astype('datetime64[us]').astype('int64') / 1000.
elif array.dtype.kind == 'm':
array = array.astype('timedelta64[us]').astype('int64') / 1000.
return array
def make_id():
''' Return a new unique ID for a Bokeh object.
Normally this function will return simple monotonically increasing integer
IDs (as strings) for identifying Bokeh ojects within a Document. However,
if it is desirable to have globally unique for every object, this behavior
can be overridden by setting the environment variable ``BOKEH_SIMPLE_IDS=no``.
Returns:
str
'''
global _simple_id
if settings.simple_ids(True):
with _simple_id_lock:
_simple_id += 1
return str(_simple_id)
else:
return make_globally_unique_id()
def make_globally_unique_id():
''' Return a globally unique UUID.
Some situtations, e.g. id'ing dynamincally created Divs in HTML documents,
always require globally unique IDs.
Returns:
str
'''
return str(uuid.uuid4())
def array_encoding_disabled(array):
''' Determine whether an array may be binary encoded.
The NumPy array dtypes that can be encoded are:
{binary_array_types}
Args:
array (np.ndarray) : the array to check
Returns:
bool
'''
# disable binary encoding for non-supported dtypes
return array.dtype not in BINARY_ARRAY_TYPES
array_encoding_disabled.__doc__ = format_docstring(array_encoding_disabled.__doc__,
binary_array_types="\n ".join("* ``np." + str(x) + "``"
for x in BINARY_ARRAY_TYPES))
def transform_array(array, force_list=False, buffers=None):
''' Transform a NumPy arrays into serialized format
Converts un-serializable dtypes and returns JSON serializable
format
Args:
array (np.ndarray) : a NumPy array to be transformed
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
buffers (set, optional) :
If binary buffers are desired, the buffers parameter may be
provided, and any columns that may be sent as binary buffers
will be added to the set. If None, then only base64 encodinfg
will be used (default: None)
If force_list is True, then this value will be ignored, and
no buffers will be generated.
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
JSON
'''
array = convert_datetime_array(array)
return serialize_array(array, force_list=force_list, buffers=buffers)
def transform_array_to_list(array):
''' Transforms a NumPy array into a list of values
Args:
array (np.nadarray) : the NumPy array series to transform
Returns:
list or dict
'''
if (array.dtype.kind in ('u', 'i', 'f') and (~np.isfinite(array)).any()):
transformed = array.astype('object')
transformed[np.isnan(array)] = 'NaN'
transformed[np.isposinf(array)] = 'Infinity'
transformed[np.isneginf(array)] = '-Infinity'
return transformed.tolist()
elif (array.dtype.kind == 'O' and pd and pd.isnull(array).any()):
transformed = array.astype('object')
transformed[pd.isnull(array)] = 'NaN'
return transformed.tolist()
return array.tolist()
def transform_series(series, force_list=False, buffers=None):
''' Transforms a Pandas series into serialized form
Args:
series (pd.Series) : the Pandas series to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
buffers (set, optional) :
If binary buffers are desired, the buffers parameter may be
provided, and any columns that may be sent as binary buffers
will be added to the set. If None, then only base64 encodinfg
will be used (default: None)
If force_list is True, then this value will be ignored, and
no buffers will be generated.
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
list or dict
'''
# not checking for pd here, this function should only be called if it
# is already known that series is a Pandas Series type
if isinstance(series, pd.PeriodIndex):
vals = series.to_timestamp().values
else:
vals = series.values
return transform_array(vals, force_list=force_list, buffers=buffers)
def serialize_array(array, force_list=False, buffers=None):
''' Transforms a NumPy array into serialized form.
Args:
array (np.ndarray) : the NumPy array to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
buffers (set, optional) :
If binary buffers are desired, the buffers parameter may be
provided, and any columns that may be sent as binary buffers
will be added to the set. If None, then only base64 encodinfg
will be used (default: None)
If force_list is True, then this value will be ignored, and
no buffers will be generated.
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
list or dict
'''
if isinstance(array, np.ma.MaskedArray):
array = array.filled(np.nan) # Set masked values to nan
if (array_encoding_disabled(array) or force_list):
return transform_array_to_list(array)
if not array.flags['C_CONTIGUOUS']:
array = np.ascontiguousarray(array)
if buffers is None:
return encode_base64_dict(array)
else:
return encode_binary_dict(array, buffers)
def traverse_data(obj, use_numpy=True, buffers=None):
''' Recursively traverse an object until a flat list is found.
If NumPy is available, the flat list is converted to a numpy array
and passed to transform_array() to handle ``nan``, ``inf``, and
``-inf``.
Otherwise, iterate through all items, converting non-JSON items
Args:
obj (list) : a list of values or lists
use_numpy (bool, optional) toggle NumPy as a dependency for testing
This argument is only useful for testing (default: True)
'''
if use_numpy and all(isinstance(el, np.ndarray) for el in obj):
return [transform_array(el, buffers=buffers) for el in obj]
obj_copy = []
for item in obj:
# Check the base/common case first for performance reasons
# Also use type(x) is float because it's faster than isinstance
if type(item) is float:
if math.isnan(item):
item = 'NaN'
elif math.isinf(item):
if item > 0:
item = 'Infinity'
else:
item = '-Infinity'
obj_copy.append(item)
elif isinstance(item, (list, tuple)): # check less common type second
obj_copy.append(traverse_data(item))
else:
obj_copy.append(item)
return obj_copy
def transform_column_source_data(data, buffers=None, cols=None):
''' Transform ColumnSourceData data to a serialized format
Args:
data (dict) : the mapping of names to data columns to transform
buffers (set, optional) :
If binary buffers are desired, the buffers parameter may be
provided, and any columns that may be sent as binary buffers
will be added to the set. If None, then only base64 encodinfg
will be used (default: None)
**This is an "out" parameter**. The values it contains will be
modified in-place.
cols (list[str], optional) :
Optional list of subset of columns to transform. If None, all
columns will be transformed (default: None)
Returns:
JSON compatible dict
'''
to_transform = set(data) if cols is None else set(cols)
data_copy = {}
for key in to_transform:
if pd and isinstance(data[key], (pd.Series, pd.Index)):
data_copy[key] = transform_series(data[key], buffers=buffers)
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key], buffers=buffers)
else:
data_copy[key] = traverse_data(data[key], buffers=buffers)
return data_copy
def encode_binary_dict(array, buffers):
''' Send a numpy array as an unencoded binary buffer
The encoded format is a dict with the following structure:
.. code:: python
{
'__buffer__' : << an ID to locate the buffer >>,
'shape' : << array shape >>,
'dtype' : << dtype name >>,
'order' : << byte order at origin (little or big)>>
}
Args:
array (np.ndarray) : an array to encode
buffers (set) :
Set to add buffers to
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
dict
'''
buffer_id = make_id()
buf = (dict(id=buffer_id), array.tobytes())
buffers.append(buf)
return {
'__buffer__' : buffer_id,
'shape' : array.shape,
'dtype' : array.dtype.name,
'order' : sys.byteorder
}
def encode_base64_dict(array):
''' Encode a NumPy array using base64:
The encoded format is a dict with the following structure:
.. code:: python
{
'__ndarray__' : << base64 encoded array data >>,
'shape' : << array shape >>,
'dtype' : << dtype name >>,
}
Args:
array (np.ndarray) : an array to encode
Returns:
dict
'''
return {
'__ndarray__' : base64.b64encode(array.data).decode('utf-8'),
'shape' : array.shape,
'dtype' : array.dtype.name
}
def decode_base64_dict(data):
''' Decode a base64 encoded array into a NumPy array.
Args:
data (dict) : encoded array data to decode
Data should have the format encoded by :func:`encode_base64_dict`.
Returns:
np.ndarray
'''
b64 = base64.b64decode(data['__ndarray__'])
array = np.copy(np.frombuffer(b64, dtype=data['dtype']))
if len(data['shape']) > 1:
array = array.reshape(data['shape'])
return array
| bsd-3-clause |
pkruskal/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
AlexRobson/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
apavlo/h-store | graphs/average-throughput-tv.py | 4 | 12436 | #!/usr/bin/env python
import os
import sys
import re
import logging
import fnmatch
import string
import argparse
import pylab
import numpy as np
import matplotlib.pyplot as plot
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MaxNLocator
from pprint import pprint,pformat
from options import *
import graphutil
import datautil
## ==============================================
## LOGGING CONFIGURATION
## ==============================================
LOG = logging.getLogger(__name__)
LOG_handler = logging.StreamHandler()
LOG_formatter = logging.Formatter(
fmt='%(asctime)s [%(funcName)s:%(lineno)03d] %(levelname)-5s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S'
)
LOG_handler.setFormatter(LOG_formatter)
LOG.addHandler(LOG_handler)
LOG.setLevel(logging.INFO)
## ==============================================
## CONFIGURATION
## ==============================================
import matplotlib.ticker as tkr
def func(x, pos): # formatter function takes tick label and tick position
s = '{:0,d}'.format(int(x))
return s
dict = {}
def computeEvictionStats(dataFile):
colMap, csvData = datautil.getCSVData(dataFile)
rpos = dataFile.rfind("/");
pos = dataFile.find("voter");
if pos < 0:
pos = dataFile.find("tpcc")
print dataFile
dataFile = dataFile[0:pos] + dataFile[rpos + 3:]
print dataFile
if len(csvData) == 0: return
tp = []
if not dict.has_key(dataFile):
dict[dataFile] = []
for row in csvData:
tp.append(float(row[colMap["THROUGHPUT"]]))
dict[dataFile].append(np.mean(tp))
print " Average Throughput: %.2f ms" % np.mean(tp)
print
# DEF#
def voter_all(dict, out_path):
fig = plot.figure()
#fig.set_size_inches(8,4.8)
ax = fig.add_subplot(111)
skew = ["voter"]
res1 = []
res1_min = []
res1_max = []
res2 = []
res2_min = []
res2_max = []
res3 = []
res3_min = []
res3_max = []
for s in skew:
for tp in dict:
if tp.find(s + '-') >= 0 and tp.find("lru") >= 0 and tp.find("prime") < 0:
res1.append(np.mean(dict[tp]))
mean = np.mean(dict[tp])
res1_min.append((mean - np.min(dict[tp])))
res1_max.append(np.max(dict[tp]) - mean)
print tp
print np.mean(dict[tp])
for s in skew:
for tp in dict:
if tp.find(s + '-') >= 0 and tp.find("timestamps") >= 0 and tp.find("prime") < 0:
res2.append(np.mean(dict[tp]))
mean = np.mean(dict[tp])
res2_min.append((mean - np.min(dict[tp])))
res2_max.append(np.max(dict[tp]) - mean)
print tp
print np.mean(dict[tp])
for s in skew:
for tp in dict:
if tp.find(s + '-') >= 0 and tp.find("timestamps") >= 0 and tp.find("prime") >= 0:
res3.append(np.mean(dict[tp]))
mean = np.mean(dict[tp])
res3_min.append((mean - np.min(dict[tp])))
res3_max.append(np.max(dict[tp]) - mean)
print tp
print np.mean(dict[tp])
x = [0.5]
ax.bar( [i-0.1 for i in x] ,res1,width=0.1,label='aLRU',hatch='\\',color='#FF6600')
ax.errorbar([i-0.05 for i in x], res1, yerr = [res1_min, res1_max], fmt='o')
ax.bar( [i-0.0 for i in x],res2,width=0.1,label='timestamps',hatch='/',color='#99CC00')
ax.errorbar([i+0.05 for i in x], res2, yerr = [res2_min, res2_max], fmt='o')
ax.bar( [i+0.1 for i in x],res3,width=0.1,label='prime-timestamps',hatch='\\/',color='#CD0000')
ax.errorbar([i+0.15 for i in x], res3, yerr = [res3_min, res3_max], fmt='o')
ax.set_ylabel("Transactions per second",fontsize=16,weight='bold')
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=3)
ax.set_xlim([0.2,0.9])
ax.set_ylim([0,70000])
ax.set_xticklabels(["VOTER"], fontsize=16)
ax.set_xlabel("Workload",fontsize=16,weight='bold')
ax.set_xticks([0.5])
y_format = tkr.FuncFormatter(func) # make formatter
ax.yaxis.set_major_formatter(y_format) # set formatter to needed axis
plot.savefig(out_path)
def voter(dict, out_path):
fig = plot.figure()
#fig.set_size_inches(8,4.8)
ax = fig.add_subplot(111)
skew = ["voter"]
res1 = []
res1_min = []
res1_max = []
res2 = []
res2_min = []
res2_max = []
for s in skew:
for tp in dict:
if tp.find(s + '-') >= 0 and tp.find("lru") >= 0:
res1.append(np.mean(dict[tp]))
mean = np.mean(dict[tp])
res1_min.append((mean - np.min(dict[tp])))
res1_max.append(np.max(dict[tp]) - mean)
print tp
print np.mean(dict[tp])
for s in skew:
for tp in dict:
if tp.find(s + '-') >= 0 and tp.find("timestamps") >= 0:
res2.append(np.mean(dict[tp]))
mean = np.mean(dict[tp])
res2_min.append((mean - np.min(dict[tp])))
res2_max.append(np.max(dict[tp]) - mean)
print tp
print np.mean(dict[tp])
x = [0.5]
ax.bar( [i-0.1 for i in x] ,res1,width=0.1,label='aLRU',hatch='\\',color='#FF6600')
ax.errorbar([i-0.05 for i in x], res1, yerr = [res1_min, res1_max], fmt='o')
ax.bar( [i-0.0 for i in x],res2,width=0.1,label='timestamps',hatch='/',color='#99CC00')
ax.errorbar([i+0.05 for i in x], res2, yerr = [res2_min, res2_max], fmt='o')
ax.set_ylabel("Transactions per second",fontsize=16,weight='bold')
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=2)
ax.set_xlim([0.3,0.7])
ax.set_ylim([0,70000])
ax.set_xticklabels(["VOTER"], fontsize=16)
ax.set_xlabel("Workload",fontsize=16,weight='bold')
ax.set_xticks([0.5])
y_format = tkr.FuncFormatter(func) # make formatter
ax.yaxis.set_major_formatter(y_format) # set formatter to needed axis
plot.savefig(out_path)
def draw_throughput_graph(dict, out_path):
fig = plot.figure()
#fig.set_size_inches(8,4.8)
ax = fig.add_subplot(111)
skew = ["voter", "tpcc"]
res1 = []
res1_min = []
res1_max = []
res2 = []
res2_min = []
res2_max = []
for s in skew:
for tp in dict:
if tp.find(s + '-') >= 0 and tp.find("lru") >= 0:
res1.append(np.mean(dict[tp]))
mean = np.mean(dict[tp])
res1_min.append((mean - np.min(dict[tp])))
res1_max.append(np.max(dict[tp]) - mean)
print tp
print np.mean(dict[tp])
for s in skew:
for tp in dict:
if tp.find(s + '-') >= 0 and tp.find("timestamps") >= 0:
res2.append(np.mean(dict[tp]))
mean = np.mean(dict[tp])
res2_min.append((mean - np.min(dict[tp])))
res2_max.append(np.max(dict[tp]) - mean)
print tp
print np.mean(dict[tp])
#res1 = [2618.45, 17978.96, 30405.52]
#res2 =[6123.74, 28654.0766667, 35181.7266667]
# \#topic ($K$) & 50 & 100 & 150 \\ \hline %\hline
# TI & 0.7476 & 0.7505 & 0.7349 \\ \hline%\cline{2-4}
# WTM & \multicolumn{3}{c}{0.7705} \\ \hline%\cline{2-4}
# COLD(C=100) & 0.8283 & {\bf 0.8397} & 0.8254 \\
# \hline
x = [0.5,1]
ax.bar( [i-0.1 for i in x] ,res1,width=0.1,label='aLRU',hatch='\\',color='#FF6600')
ax.errorbar([i-0.05 for i in x], res1, yerr = [res1_min, res1_max], fmt='o')
ax.bar( [i-0.0 for i in x],res2,width=0.1,label='timestamps',hatch='/',color='#99CC00')
ax.errorbar([i+0.05 for i in x], res2, yerr = [res2_min, res2_max], fmt='o')
ax.set_ylabel("Transactions per second",fontsize=16,weight='bold')
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=2)
ax.set_xlim([0.2,1.2])
ax.set_ylim([0,70000])
ax.set_xticklabels(["VOTER", "TPC-C"], fontsize=16)
ax.set_xlabel("Workload",fontsize=16,weight='bold')
ax.set_xticks([0.5,1])
y_format = tkr.FuncFormatter(func) # make formatter
ax.yaxis.set_major_formatter(y_format) # set formatter to needed axis
plot.savefig(out_path)
def draw_throughput_graph_INF(dict, out_path):
fig = plot.figure()
#fig.set_size_inches(8,4.8)
ax = fig.add_subplot(111)
skew = ["voter", "tpcc"]
res1 = []
res1_min = []
res1_max = []
res2 = []
res2_min = []
res2_max = []
res3 = []
res3_min = []
res3_max = []
for s in skew:
for tp in dict:
if tp.find(s + '-') >= 0 and tp.find("lru") >= 0 and tp.find("A1000") < 0:
res1.append(np.mean(dict[tp]))
mean = np.mean(dict[tp])
res1_min.append((mean - np.min(dict[tp])))
res1_max.append(np.max(dict[tp]) - mean)
print tp
print np.mean(dict[tp])
print res1_min
print res1_max
for s in skew:
for tp in dict:
if tp.find(s + '-') >= 0 and tp.find("timestamp") >= 0:
res2.append(np.mean(dict[tp]))
mean = np.mean(dict[tp])
res2_min.append(mean - np.min(dict[tp]))
res2_max.append(np.max(dict[tp]) - mean)
print tp
print np.mean(dict[tp])
for s in skew:
for tp in dict:
if tp.find(s + '-') >= 0 and ((tp.find("timestamp") < 0 and tp.find("lru") < 0 and tp.find("tpcc") >= 0) or tp.find("none") >= 0):
res3.append(np.mean(dict[tp]))
mean = np.mean(dict[tp])
res3_min.append(mean - np.min(dict[tp]))
res3_max.append(np.max(dict[tp]) - mean)
print tp
print np.mean(dict[tp])
#res1 = [2618.45, 17978.96, 30405.52]
#res2 =[6123.74, 28654.0766667, 35181.7266667]
# \#topic ($K$) & 50 & 100 & 150 \\ \hline %\hline
# TI & 0.7476 & 0.7505 & 0.7349 \\ \hline%\cline{2-4}
# WTM & \multicolumn{3}{c}{0.7705} \\ \hline%\cline{2-4}
# COLD(C=100) & 0.8283 & {\bf 0.8397} & 0.8254 \\
# \hline
x = [0.5,1]
ax.bar( [i-0.15 for i in x] ,res1,width=0.1,label='aLRU',hatch='\\',color='#FF6600')
ax.errorbar([i-0.1 for i in x], res1, yerr = [res1_min, res1_max], fmt='o')
ax.bar( [i-0.05 for i in x],res2,width=0.1,label='timestamps',hatch='/',color='#99CC00')
ax.errorbar([i-0.0 for i in x], res2, yerr = [res2_min, res2_max], fmt='o')
ax.bar( [i+0.05 for i in x],res3,width=0.1,label='none',hatch='-',color='b')
ax.errorbar([i+0.1 for i in x], res3, yerr = [res3_min, res3_max], fmt='o')
ax.set_ylabel("Transactions per second",fontsize=16,weight='bold')
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=3)
ax.set_xlim([0.2,1.2])
ax.set_ylim([0,70000])
ax.set_xticklabels(["VOTER", "TPC-C"], fontsize=16)
ax.set_xlabel("Workload",fontsize=16,weight='bold')
ax.set_xticks([0.5,1])
#plt.show()
plot.savefig(out_path)
## ==============================================
## main
## ==============================================
if __name__ == '__main__':
matches = []
for root, dirnames, filenames in os.walk("./voter/voter-TINF-NoLoop"):
for filename in fnmatch.filter(filenames, '*results.csv'):
matches.append(os.path.join(root, filename))
for root, dirnames, filenames in os.walk("./tpcc/tpcc-TINF-NoLoop"):
for filename in fnmatch.filter(filenames, '*results.csv'):
matches.append(os.path.join(root, filename))
#for root, dirnames, filenames in os.walk("./prime/tpcc-NoLoop"):
# for filename in fnmatch.filter(filenames, '*E50-results.csv'):
# matches.append(os.path.join(root, filename))
map(computeEvictionStats, matches)
#for tp in dict:
# print tp
# print np.mean(dict[tp])
draw_throughput_graph_INF(dict, "tpcc-voter-INF.pdf")
#draw_throughput_graph(dict, "ycsb-T500.pdf")
#draw_throughput_graph(dict, "tpcc-voter-NoLoop-prime.pdf")
#voter(dict, "voter-NoLoop.pdf")
#voter_all(dict, "voter-NoLoop-prime-all.pdf")
## MAIN
| gpl-3.0 |
google/makani | lib/datatools/plot_aio_network_activity.py | 1 | 4416 | #!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quick-and-dirty plotting tool to display network activity.
This reads an activity file written by validate_pcap.cc
and displays both network usage and missing packets.
"""
import imp
import os
import sys
import gflags
import matplotlib.patches as patches
import matplotlib.pyplot as pyplot
gflags.DEFINE_string('file', None, 'Name of input python file.')
gflags.DEFINE_bool('sender_timestamp', False,
'Use sender AIO timestamps instead of capture time.')
gflags.RegisterValidator('file',
lambda f: f and os.path.basename(f),
'--file must have a valid file name.')
def draw(plot, data, x_key, y_key, len_key, edge_color, data_color, y_offset):
"""Draw the data to the plot.
Draws a rectangle for each dict in data.
Args:
plot: a matplotlib subplot.
data: an array of points as
{ x_key: x_coordinate, y_key: y_coordinate, len_key: length_of_rect }.
x_key: a string key into the dicts in data.
y_key: a string key into the dicts in data.
len_key: a string key into the dicts in data.
edge_color: the color used for the circumference of the rectangle drawn.
data_color: the color used for the interior of the rectangle drawn.
y_offset: an extra offset to add to the y coordinate of the rectangle.
Returns:
The bounding box of the boxes drawn.
"""
def handle_minmax(cur_min, cur_max, cur, size):
return (min(cur_min, cur), max(cur_max, cur + size))
height = 0.4
min_x = max_x = data[0][x_key]
min_y = max_y = data[0][y_key]
for p in data:
y_scale = 1
x = p[x_key]
if x < max_x:
# When we see packets together whose ts_us and length collectively
# indicate a full network pipe, we draw them at half height until the
# congestion clears.
y_scale = 0.5
x = max_x
y = p[y_key] + y_offset
length = p[len_key] * 8 / 100
min_x, max_x = handle_minmax(min_x, max_x, x, length)
min_y, max_y = handle_minmax(min_y, max_y, y, 1)
plot.add_patch(
patches.Rectangle(
(x, y),
length, height * y_scale,
facecolor=data_color, edgecolor=edge_color
)
)
plot.add_patch(
patches.Rectangle(
(x, -2 + y_offset),
length, height * y_scale,
facecolor=data_color, edgecolor=edge_color
)
)
return (min_x, max_x, min_y, max_y)
def main(argv):
flags = gflags.FLAGS
try:
argv = flags(argv)
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], flags)
sys.exit(1)
with open(flags.file, 'r') as input_file:
data = imp.load_source('data', '', input_file)
packets = data.packets
gaps = data.gaps
if not (packets or gaps):
print 'File contains no data.'
return
fig = pyplot.figure()
plot = fig.add_subplot(111, aspect='auto')
if flags.sender_timestamp:
x_key = 'timestamp'
else:
x_key = 'ts_us'
y_key = 'message_type'
len_key = 'len'
packet_range = gap_range = None
if packets:
packet_range = draw(plot, packets, x_key, y_key, len_key, 'black',
'blue', 0)
if gaps:
gap_range = draw(plot, gaps, x_key, y_key, len_key, 'red', 'pink', 0.5)
else:
gap_range = packet_range
if not packet_range:
packet_range = gap_range
plot_xrange = (min(packet_range[0], gap_range[0]),
max(packet_range[1], gap_range[1]))
plot_yrange = (-2, max(packet_range[3], gap_range[3]))
pyplot.xlabel(x_key)
pyplot.ylabel(y_key)
pyplot.yticks(range(0, plot_yrange[1] + 1, 5))
plot.set_xbound(lower=plot_xrange[0], upper=plot_xrange[1])
plot.set_ybound(lower=plot_yrange[0], upper=plot_yrange[1])
pyplot.tight_layout()
pyplot.show()
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
trungnt13/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 72 | 13586 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
0asa/scikit-learn | sklearn/linear_model/omp.py | 6 | 29556 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False, return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path
)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path
)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Mathching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X = check_array(X)
y = np.asarray(y)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Mathching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
f171a9a3497c8b/python_playground | Pythonchallenge solutions/pythonchallenge11.py | 1 | 2405 | #!/usr/bin/python3
# http://www.pythonchallenge.com/pc/return/5808.html
# If needed, use username and password from challenge 8
from os import path
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageOps
from utils import download_file
def solve11():
filepath = download_file(
url='http://www.pythonchallenge.com/pc/return/cave.jpg',
binf=True,
username='huge',
password='file'
)
filename = path.split(filepath)[1]
try:
if not path.exists(filepath):
raise IOError('File does not exist')
with Image.open(filepath, 'r') as img:
width, height = img.size
template = (
'{:<8}: {}'.format('Filename', filename),
'{:<8}: {}'.format('Format', img.format),
'{:<8}: {}'.format('Mode', img.mode),
'{:<8}: {:,d} pixels'.format('Width', width),
'{:<8}: {:,d} pixels'.format('Height', height),
'{:<8}: {:,d} pixels'.format('Size', width * height)
)
pixels = np.asarray(img, dtype=np.uint8, order='F')
except (IOError, OSError) as err:
print('Cannot open:', filepath if filepath else '[not downloaded]')
print(err.strerror if err.strerror else err)
else:
print('\n'.join(template), end='\n\n')
del template, width, height
plt.ioff()
plt.figure(num=filename, frameon=False, clear=True)
plt.imshow(pixels, interpolation=None, filternorm=1)
plt.show()
plt.ioff()
plt.figure(num=filename, frameon=False, clear=True)
with Image.fromarray(pixels[0::2, 0::2]) as img:
img.paste(ImageOps.invert(img))
img.paste(ImageOps.autocontrast(img))
part = np.asarray(img, dtype=np.uint8, order='F')
plt.subplot(221)
plt.axis('off')
plt.imshow(part)
plt.subplot(222)
plt.axis('off')
plt.imshow(pixels[1::2, 1::2])
plt.subplot(223)
plt.axis('off')
plt.imshow(pixels[0::2, 1::2])
plt.subplot(224)
plt.axis('off')
plt.imshow(pixels[1::2, 0::2])
plt.show()
print('Magic word: evil')
if __name__ == '__main__':
solve11()
| mit |
dparks1134/RefineM | refinem/plots/mpld3_plugins.py | 1 | 17139 | ###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import matplotlib
import mpld3
class LinkedBrush(mpld3.plugins.PluginBase):
JAVASCRIPT = """
mpld3.LinkedBrushPlugin = refinem_LinkedBrushPlugin;
mpld3.register_plugin("refinem_linkedbrush", refinem_LinkedBrushPlugin);
refinem_LinkedBrushPlugin.prototype = Object.create(mpld3.Plugin.prototype);
refinem_LinkedBrushPlugin.prototype.constructor = refinem_LinkedBrushPlugin;
refinem_LinkedBrushPlugin.prototype.requiredProps = [ "id" ];
refinem_LinkedBrushPlugin.prototype.defaultProps = {
button: true,
enabled: null
};
function refinem_LinkedBrushPlugin(fig, props) {
mpld3.Plugin.call(this, fig, props);
if (this.props.enabled === null) {
this.props.enabled = !this.props.button;
}
var enabled = this.props.enabled;
if (this.props.button) {
var BrushButton = mpld3.ButtonFactory({
buttonID: "refinem_linkedbrush",
sticky: true,
actions: [ "drag" ],
onActivate: this.activate.bind(this),
onDeactivate: this.deactivate.bind(this),
onDraw: function() {
this.setState(enabled);
},
icon: function() {
return mpld3.icons["brush"];
}
});
this.fig.buttons.push(BrushButton);
}
this.extentClass = "refinem_linkedbrush";
}
refinem_LinkedBrushPlugin.prototype.activate = function() {
if (this.enable) this.enable();
};
refinem_LinkedBrushPlugin.prototype.deactivate = function() {
if (this.disable) this.disable();
};
refinem_LinkedBrushPlugin.prototype.get_selected = function() {
if (this.get_selected) this.get_selected();
};
refinem_LinkedBrushPlugin.prototype.draw = function() {
var obj = mpld3.get_element(this.props.id);
if (obj === null) {
throw "LinkedBrush: no object with id='" + this.props.id + "' was found";
}
var fig = this.fig;
if (!("offsets" in obj.props)) {
throw "Plot object with id='" + this.props.id + "' is not a scatter plot";
}
var dataKey = "offsets" in obj.props ? "offsets" : "data";
mpld3.insert_css("#" + fig.figid + " rect.extent." + this.extentClass, {
fill: "#000",
"fill-opacity": .05,
stroke: "#fff"
});
mpld3.insert_css("#" + fig.figid + " path.mpld3-unselected", {
opacity: .2
});
var dataClass = "mpld3data-" + obj.props[dataKey];
var brush = fig.getBrush();
var dataByAx = [];
fig.axes.forEach(function(ax) {
var axData = [];
ax.elements.forEach(function(el) {
if (el.props[dataKey] === obj.props[dataKey]) {
el.group.classed(dataClass, true);
axData.push(el);
}
});
dataByAx.push(axData);
});
var allData = [];
var selectedData = fig.canvas.selectAll("." + dataClass);
var unselectedData = fig.canvas.selectAll("." + dataClass);
var currentAxes;
function brushstart(d) {
if (currentAxes != this) {
d3.select(currentAxes).call(brush.clear());
currentAxes = this;
brush.x(d.xdom).y(d.ydom);
}
}
function brushmove(d) {
var data = dataByAx[d.axnum];
if (data.length > 0) {
var ix = data[0].props.xindex;
var iy = data[0].props.yindex;
var e = brush.extent();
if (brush.empty()) {
selectedData.selectAll("path").classed("mpld3-selected", false);
unselectedData.selectAll("path").classed("mpld3-unselected", false);
} else {
selectedData.selectAll("path").classed("mpld3-selected", function(p) {
return !(e[0][0] > p[ix] || e[1][0] < p[ix] || e[0][1] > p[iy] || e[1][1] < p[iy]);
});
unselectedData.selectAll("path").classed("mpld3-unselected", function(p) {
return e[0][0] > p[ix] || e[1][0] < p[ix] || e[0][1] > p[iy] || e[1][1] < p[iy];
});
}
}
}
function brushend(d) {
if (brush.empty()) {
selectedData.selectAll("path").classed("mpld3-selected", false);
unselectedData.selectAll("path").classed("mpld3-selected", false);
}
}
this.enable = function() {
this.fig.showBrush(this.extentClass);
brush.on("brushstart", brushstart).on("brush", brushmove).on("brushend", brushend);
this.enabled = true;
};
this.disable = function() {
d3.select(currentAxes).call(brush.clear());
this.fig.hideBrush(this.extentClass);
this.enabled = false;
};
this.disable();
};
"""
def __init__(self, points, button=True, enabled=True):
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "refinem_linkedbrush",
"button": button,
"enabled": False,
"id": mpld3.utils.get_id(points, suffix)}
class Tooltip(mpld3.plugins.PluginBase):
"""A Plugin to enable an HTML tooltip.
This extends the PointHTMLTooltip class in mpld3. It adds
a mousedown() event which writes the label of clicked
points to an HTML element with the id 'selected_points'.
formated text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : list
The labels for each point in points, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> labels = ['<h1>{title}</h1>'.format(title=i) for i in range(10)]
>>> plugins.connect(fig, Tooltip(points[0], labels))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("tooltip", Tooltip);
Tooltip.prototype = Object.create(mpld3.Plugin.prototype);
Tooltip.prototype.constructor = Tooltip;
Tooltip.prototype.requiredProps = ["id"];
Tooltip.prototype.defaultProps = {labels:null,
hoffset:0,
voffset:10};
function Tooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
Tooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var labels = this.props.labels;
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mousedown", function(d, i){
var div = document.getElementById("selected_points");
selected_points[selected_points.length] = labels[i]
div.innerHTML = selected_points.join('<br>');})
.on("mouseover", function(d, i){
tooltip.html(labels[i])
.style("visibility", "visible");})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");});
};
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, css=None):
self.points = points
self.labels = labels
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "tooltip",
"id": mpld3.utils.get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset}
# Additional script to add at global scope.
script_global = ('var selected_points = []\n'
'function clear_selection_list() {\n'
'selected_points = []\n'
'var div = document.getElementById("selected_points");\n'
'div.innerHTML = "";\n'
'};\n')
# Additional HTML to add to body
html_body = ('<hr>\n'
'<div id="selected_points"></div>\n'
'<br>\n'
'<button onclick="clear_selection_list()">Clear</button>\n')
class LinkedBrushSave(mpld3.plugins.PluginBase):
JAVASCRIPT="""
mpld3.LinkedBrushSavePlugin = mpld3_LinkedBrushSavePlugin;
mpld3.register_plugin("linkedbrushsave", mpld3_LinkedBrushSavePlugin);
mpld3_LinkedBrushSavePlugin.prototype = Object.create(mpld3.Plugin.prototype);
mpld3_LinkedBrushSavePlugin.prototype.constructor = mpld3_LinkedBrushSavePlugin;
mpld3_LinkedBrushSavePlugin.prototype.requiredProps = [ "id" ];
mpld3_LinkedBrushSavePlugin.prototype.defaultProps = {
labels:null,
button: true,
enabled: null
};
function mpld3_LinkedBrushSavePlugin(fig, props) {
mpld3.Plugin.call(this, fig, props);
if (this.props.enabled === null) {
this.props.enabled = !this.props.button;
}
var enabled = this.props.enabled;
if (this.props.button) {
var BrushButton = mpld3.ButtonFactory({
buttonID: "linkedbrushsave",
sticky: true,
actions: [ "drag" ],
onActivate: this.activate.bind(this),
onDeactivate: this.deactivate.bind(this),
onDraw: function() {
this.setState(enabled);
},
icon: function() {
return mpld3.icons["brush"];
}
});
this.fig.buttons.push(BrushButton);
var my_icon = "data:image/png;base64,longstring_that_I_redacted";
var SaveButton = mpld3.ButtonFactory({
buttonID: "save",
sticky: false,
onActivate: this.get_selected.bind(this),
icon: function(){return mpld3.icons["brush"];},
});
this.fig.buttons.push(SaveButton);
}
this.extentClass = "linkedbrushsave";
}
mpld3_LinkedBrushSavePlugin.prototype.activate = function() {
if (this.enable) this.enable();
};
mpld3_LinkedBrushSavePlugin.prototype.deactivate = function() {
if (this.disable) this.disable();
};
mpld3_LinkedBrushSavePlugin.prototype.get_selected = function() {
if (this.get_selected) this.get_selected();
};
mpld3_LinkedBrushSavePlugin.prototype.draw = function() {
var obj = mpld3.get_element(this.props.id);
var labels = this.props.labels;
if (obj === null) {
throw "LinkedBrush: no object with id='" + this.props.id + "' was found";
}
var fig = this.fig;
if (!("offsets" in obj.props)) {
throw "Plot object with id='" + this.props.id + "' is not a scatter plot";
}
var dataKey = "offsets" in obj.props ? "offsets" : "data";
mpld3.insert_css("#" + fig.figid + " rect.extent." + this.extentClass, {
fill: "#000",
"fill-opacity": .125,
stroke: "#fff"
});
mpld3.insert_css("#" + fig.figid + " path.mpld3-hidden", {
stroke: "#ccc !important",
fill: "#ccc !important"
});
var dataClass = "mpld3data-" + obj.props[dataKey];
var brush = fig.getBrush();
var dataByAx = [];
fig.axes.forEach(function(ax) {
var axData = [];
ax.elements.forEach(function(el) {
if (el.props[dataKey] === obj.props[dataKey]) {
el.group.classed(dataClass, true);
axData.push(el);
}
});
dataByAx.push(axData);
});
var allData = [];
var dataToBrush = fig.canvas.selectAll("." + dataClass);
var currentAxes;
function brushstart(d) {
if (currentAxes != this) {
d3.select(currentAxes).call(brush.clear());
currentAxes = this;
brush.x(d.xdom).y(d.ydom);
}
}
function brushmove(d) {
var data = dataByAx[d.axnum];
if (data.length > 0) {
var ix = data[0].props.xindex;
var iy = data[0].props.yindex;
var e = brush.extent();
if (brush.empty()) {
dataToBrush.selectAll("path").classed("mpld3-hidden", false);
} else {
dataToBrush.selectAll("path").classed("mpld3-hidden", function(p) {
return e[0][0] > p[ix] || e[1][0] < p[ix] || e[0][1] > p[iy] || e[1][1] < p[iy];
});
}
}
}
function brushend(d) {
if (brush.empty()) {
dataToBrush.selectAll("path").classed("mpld3-hidden", false);
}
}
this.get_selected = function(d) {
var brush = fig.getBrush();
var extent = brush.extent();
alert(extent);
}
this.enable = function() {
this.fig.showBrush(this.extentClass);
brush.on("brushstart", brushstart).on("brush", brushmove).on("brushend", brushend);
this.enabled = true;
};
this.disable = function() {
d3.select(currentAxes).call(brush.clear());
this.fig.hideBrush(this.extentClass);
this.enabled = false;
};
this.disable();
};
"""
def __init__(self, points, labels, button=True, enabled=True):
self.labels = labels
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "linkedbrushsave",
"button": button,
"labels": labels,
"enabled": False,
"id": mpld3.utils.get_id(points, suffix)}
| gpl-3.0 |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_source_power_spectrum.py | 19 | 1938 | """
=========================================================
Compute power spectrum densities of the sources with dSPM
=========================================================
Returns an STC file containing the PSD (in dB) of each of the sources.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
plt.plot(1e3 * stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
| bsd-3-clause |
stoneflyop1/py_machine_learning | ch10/machine_dt.py | 1 | 1945 | import pandas as pd
# data from https://archive.ics.uci.edu/ml/datasets/Computer+Hardware
df = pd.read_csv('../data/machine.data', header=None)
df.columns = [
'VENDOR', 'MODEL', 'MYCT', 'MMIN', 'MMAX',
'CACH', 'CHMIN', 'CHMAX', 'PRP', 'ERP'
]
import numpy as np
X = df[['PRP']].values
y = df['ERP'].values
import matplotlib.pyplot as plt
def lin_regplot(X, y, model):
plt.scatter(X, y, c='blue')
plt.plot(X, model.predict(X), color='red')
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(max_depth=3)
tree.fit(X, y)
sort_idx = X.flatten().argsort()
lin_regplot(X[sort_idx], y[sort_idx], tree)
plt.xlabel('[PRP]')
plt.ylabel('[ERP]')
plt.show()
# Random Forests
from sklearn.model_selection import train_test_split
X = df[['CACH', 'CHMIN', 'CHMAX', 'PRP']].values
y = df['ERP'].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=1
)
from sklearn.ensemble import RandomForestRegressor
forest = RandomForestRegressor(
n_estimators=500, criterion="mse", random_state=1, n_jobs=2
)
forest.fit(X_train, y_train)
y_train_pred = forest.predict(X_train)
y_test_pred = forest.predict(X_test)
from sklearn.metrics import mean_squared_error
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)
))
from sklearn.metrics import r2_score
print(
'R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)
)
)
plt.scatter(
y_train_pred, y_train_pred - y_train, c='black',
marker='o', s=35, alpha=0.5, label='Training data'
)
plt.scatter(
y_test_pred, y_test_pred - y_test, c='lightgreen',
marker='s', s=35, alpha=0.7, label='Test data'
)
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=0, xmax=1000, lw=2, color='red')
plt.xlim([0, 1000])
plt.show() | mit |
datacratic/StarCluster | utils/scimage_13_04.py | 19 | 17696 | #!/usr/bin/env python
"""
This script is meant to be run inside of a ubuntu cloud image available at
uec-images.ubuntu.com::
$ EC2_UBUNTU_IMG_URL=http://uec-images.ubuntu.com/precise/current
$ wget $EC2_UBUNTU_IMG_URL/precise-server-cloudimg-amd64.tar.gz
or::
$ wget $EC2_UBUNTU_IMG_URL/precise-server-cloudimg-i386.tar.gz
After downloading a Ubuntu cloud image the next step is to extract the image::
$ tar xvzf precise-server-cloudimg-amd64.tar.gz
Then resize it to 10GB::
$ e2fsck -f precise-server-cloudimg-amd64.img
$ resize2fs precise-server-cloudimg-amd64.img 10G
Next you need to mount the image::
$ mkdir /tmp/img-mount
$ mount precise-server-cloudimg-amd64.img /tmp/img-mount
$ mount -t proc none /tmp/img-mount/proc
$ mount -t sysfs none /tmp/img-mount/sys
$ mount -o bind /dev /tmp/img-mount/dev
$ mount -t devpts none /tmp/img-mount/dev/pts
$ mount -o rbind /var/run/dbus /tmp/img-mount/var/run/dbus
Copy /etc/resolv.conf and /etc/mtab to the image::
$ mkdir -p /tmp/img-mount/var/run/resolvconf
$ cp /etc/resolv.conf /tmp/img-mount/var/run/resolvconf/resolv.conf
$ grep -v rootfs /etc/mtab > /tmp/img-mount/etc/mtab
Next copy this script inside the image::
$ cp /path/to/scimage.py /tmp/img-mount/root/scimage.py
Finally chroot inside the image and run this script:
$ chroot /tmp/img-mount /bin/bash
$ cd $HOME
$ python scimage.py
"""
import os
import sys
import glob
import shutil
import fileinput
import subprocess
import multiprocessing
SRC_DIR = "/usr/local/src"
APT_SOURCES_FILE = "/etc/apt/sources.list"
BUILD_UTILS_PKGS = "build-essential devscripts debconf debconf-utils dpkg-dev "
BUILD_UTILS_PKGS += "python-dev python-setuptools python-pip python-nose rar "
BUILD_UTILS_PKGS += "python-distutils-extra gfortran unzip unace cdbs patch "
GRID_SCHEDULER_GIT = 'git://github.com/jtriley/gridscheduler.git'
CLOUDERA_ARCHIVE_KEY = 'http://archive.cloudera.com/debian/archive.key'
CLOUDERA_APT = 'http://archive.cloudera.com/debian squeeze-cdh3u5 contrib'
PPAS = ["ppa:staticfloat/julia-deps", "ppa:justin-t-riley/starcluster",
"ppa:staticfloat/julianightlies"]
STARCLUSTER_MOTD = """\
#!/bin/sh
cat<<"EOF"
_ _ _
__/\_____| |_ __ _ _ __ ___| |_ _ ___| |_ ___ _ __
\ / __| __/ _` | '__/ __| | | | / __| __/ _ \ '__|
/_ _\__ \ || (_| | | | (__| | |_| \__ \ || __/ |
\/ |___/\__\__,_|_| \___|_|\__,_|___/\__\___|_|
StarCluster Ubuntu 13.04 AMI
Software Tools for Academics and Researchers (STAR)
Homepage: http://star.mit.edu/cluster
Documentation: http://star.mit.edu/cluster/docs/latest
Code: https://github.com/jtriley/StarCluster
Mailing list: http://star.mit.edu/cluster/mailinglist.html
This AMI Contains:
* Open Grid Scheduler (OGS - formerly SGE) queuing system
* Condor workload management system
* OpenMPI compiled with Open Grid Scheduler support
* OpenBLAS - Highly optimized Basic Linear Algebra Routines
* NumPy/SciPy linked against OpenBlas
* Pandas - Data Analysis Library
* IPython 1.1.0 with parallel and notebook support
* Julia 0.3pre
* and more! (use 'dpkg -l' to show all installed packages)
Open Grid Scheduler/Condor cheat sheet:
* qstat/condor_q - show status of batch jobs
* qhost/condor_status- show status of hosts, queues, and jobs
* qsub/condor_submit - submit batch jobs (e.g. qsub -cwd ./job.sh)
* qdel/condor_rm - delete batch jobs (e.g. qdel 7)
* qconf - configure Open Grid Scheduler system
Current System Stats:
EOF
landscape-sysinfo | grep -iv 'graph this data'
"""
def run_command(cmd, ignore_failure=False, failure_callback=None,
get_output=False):
kwargs = {}
if get_output:
kwargs.update(dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE))
p = subprocess.Popen(cmd, shell=True, **kwargs)
output = []
if get_output:
line = None
while line != '':
line = p.stdout.readline()
if line != '':
output.append(line)
print line,
for line in p.stderr.readlines():
if line != '':
output.append(line)
print line,
retval = p.wait()
if retval != 0:
errmsg = "command '%s' failed with status %d" % (cmd, retval)
if failure_callback:
ignore_failure = failure_callback(retval)
if not ignore_failure:
raise Exception(errmsg)
else:
sys.stderr.write(errmsg + '\n')
if get_output:
return retval, ''.join(output)
return retval
def apt_command(cmd):
dpkg_opts = "Dpkg::Options::='--force-confnew'"
cmd = "apt-get -o %s -y --force-yes %s" % (dpkg_opts, cmd)
cmd = "DEBIAN_FRONTEND='noninteractive' " + cmd
run_command(cmd)
def apt_install(pkgs):
apt_command('install %s' % pkgs)
def chdir(directory):
opts = glob.glob(directory)
isdirlist = [o for o in opts if os.path.isdir(o)]
if len(isdirlist) > 1:
raise Exception("more than one dir matches: %s" % directory)
os.chdir(isdirlist[0])
def _fix_atlas_rules(rules_file='debian/rules'):
for line in fileinput.input(rules_file, inplace=1):
if 'ATLAS=None' not in line:
print line,
def configure_apt_sources():
srcfile = open(APT_SOURCES_FILE)
contents = srcfile.readlines()
srcfile.close()
srclines = []
for line in contents:
if not line.strip() or line.startswith('#'):
continue
parts = line.split()
if parts[0] == 'deb':
parts[0] = 'deb-src'
srclines.append(' '.join(parts).strip())
with open(APT_SOURCES_FILE, 'w') as srcfile:
srcfile.write(''.join(contents))
srcfile.write('\n'.join(srclines) + '\n')
with open('/etc/apt/sources.list.d/cloudera-hadoop.list', 'w') as srcfile:
srcfile.write('deb %s\n' % CLOUDERA_APT)
srcfile.write('deb-src %s\n' % CLOUDERA_APT)
run_command('gpg --keyserver keyserver.ubuntu.com --recv-keys 0F932C9C')
run_command('curl -s %s | sudo apt-key add -' % CLOUDERA_ARCHIVE_KEY)
apt_install('debian-archive-keyring')
for ppa in PPAS:
run_command('add-apt-repository %s -y -s' % ppa)
def upgrade_packages():
apt_command('update')
apt_command('upgrade')
def install_build_utils():
"""docstring for configure_build"""
apt_install(BUILD_UTILS_PKGS)
def install_gridscheduler():
chdir(SRC_DIR)
apt_command('build-dep gridengine')
if os.path.isfile('gridscheduler-scbuild.tar.gz'):
run_command('tar xvzf gridscheduler-scbuild.tar.gz')
run_command('mv gridscheduler /opt/sge6-fresh')
return
run_command('git clone %s' % GRID_SCHEDULER_GIT)
sts, out = run_command('readlink -f `which java`', get_output=True)
java_home = out.strip().split('/jre')[0]
chdir(os.path.join(SRC_DIR, 'gridscheduler', 'source'))
run_command('git checkout -t -b develop origin/develop')
env = 'JAVA_HOME=%s' % java_home
run_command('%s ./aimk -only-depend' % env)
run_command('%s scripts/zerodepend' % env)
run_command('%s ./aimk depend' % env)
run_command('%s ./aimk -no-secure -no-gui-inst -man' % env)
sge_root = '/opt/sge6-fresh'
os.mkdir(sge_root)
env += ' SGE_ROOT=%s' % sge_root
run_command('%s scripts/distinst -all -local -noexit -y -- man' % env)
def install_condor():
chdir(SRC_DIR)
run_command("rm -f /var/lock")
#apt_install('condor=7.7.2-1')
#run_command('echo condor hold | dpkg --set-selections')
#run_command('ln -s /etc/condor/condor_config /etc/condor_config.local')
#run_command('mkdir /var/lib/condor/log')
#run_command('mkdir /var/lib/condor/run')
#run_command('chown -R condor:condor /var/lib/condor/log')
#run_command('chown -R condor:condor /var/lib/condor/run')
apt_install('condor')
def install_pydrmaa():
chdir(SRC_DIR)
run_command('pip install drmaa')
def install_atlas():
"""docstring for install_atlas"""
chdir(SRC_DIR)
apt_command('build-dep atlas')
if glob.glob("*atlas*.deb"):
run_command('dpkg -i *atlas*.deb')
return
apt_command('source atlas')
chdir('atlas-*')
run_command('fakeroot debian/rules custom')
run_command('dpkg -i ../*atlas*.deb')
def install_openblas():
"""docstring for install_openblas"""
chdir(SRC_DIR)
apt_command('build-dep libopenblas-dev')
if glob.glob("*openblas*.deb"):
run_command('dpkg -i *openblas*.deb')
else:
apt_command('source libopenblas-dev')
chdir('openblas-*')
rule_file = open('Makefile.rule', 'a')
# NO_AFFINITY=1 is required to utilize all cores on all non
# cluster-compute/GPU instance types due to the shared virtualization
# layer not supporting processor affinity properly. However, Cluster
# Compute/GPU instance types use a near-bare-metal hypervisor which
# *does* support processor affinity. From minimal testing it appears
# that there is a ~20% increase in performance when using affinity on
# cc1/cg1 types implying NO_AFFINITY=1 should *not* be set for cluster
# compute/GPU AMIs.
lines = ['DYNAMIC_ARCH=1', 'NUM_THREADS=64', 'NO_LAPACK=1',
'NO_AFFINITY=1']
rule_file.write('\n'.join(lines))
rule_file.close()
run_command('fakeroot debian/rules custom')
run_command('dpkg -i ../*openblas*.deb')
run_command('echo libopenblas-base hold | dpkg --set-selections')
run_command('echo libopenblas-dev hold | dpkg --set-selections')
run_command("ldconfig")
def install_python_packages():
install_pydrmaa()
install_numpy_scipy()
install_pandas()
install_ipython()
apt_command('build-dep python-imaging')
pkgs = "virtualenv pillow boto matplotlib django mpi4py ctypes Cython "
pkgs += "pudb supervisor "
run_command("pip install %s" % pkgs)
def install_numpy_scipy():
"""docstring for install_numpy"""
chdir(SRC_DIR)
apt_command('build-dep python-numpy')
apt_command('build-dep python-scipy')
run_command('pip install -d . numpy')
run_command('tar xvzf numpy*.tar.gz')
run_command("sed -i 's/return None #/pass #/' numpy*/numpy/core/setup.py")
run_command("cd numpy* && python setup.py install")
run_command('pip install scipy')
def install_pandas():
"""docstring for install_pandas"""
chdir(SRC_DIR)
apt_command('build-dep pandas')
run_command('pip install pandas')
def install_openmpi():
chdir(SRC_DIR)
apt_command('build-dep openmpi')
apt_install('blcr-util')
if glob.glob('*openmpi*.deb'):
run_command('dpkg -i *openmpi*.deb')
else:
apt_command('source openmpi')
chdir('openmpi*')
for line in fileinput.input('debian/rules', inplace=1):
print line,
if '--enable-heterogeneous' in line:
print ' --with-sge \\'
def _deb_failure_callback(retval):
if not glob.glob('../*openmpi*.deb'):
return False
return True
run_command('dch --local=\'+custom\' '
'"custom build on: `uname -s -r -v -m -p -i -o`"')
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*openmpi*.deb')
sts, out = run_command('ompi_info | grep -i grid', get_output=True)
if 'gridengine' not in out:
raise Exception("failed to build OpenMPI with "
"Open Grid Scheduler support")
run_command('echo libopenmpi1.3 hold | dpkg --set-selections')
run_command('echo libopenmpi-dev hold | dpkg --set-selections')
run_command('echo libopenmpi-dbg hold | dpkg --set-selections')
run_command('echo openmpi-bin hold | dpkg --set-selections')
run_command('echo openmpi-checkpoint hold | dpkg --set-selections')
run_command('echo openmpi-common hold | dpkg --set-selections')
run_command('echo openmpi-doc hold | dpkg --set-selections')
run_command('ldconfig')
def install_hadoop():
chdir(SRC_DIR)
hadoop_pkgs = ['namenode', 'datanode', 'tasktracker', 'jobtracker',
'secondarynamenode']
pkgs = ['hadoop-0.20'] + ['hadoop-0.20-%s' % pkg for pkg in hadoop_pkgs]
apt_install(' '.join(pkgs))
run_command('easy_install dumbo')
def install_ipython():
chdir(SRC_DIR)
apt_install('libzmq-dev')
run_command('pip install ipython[parallel,notebook]')
# This is broken in IPy 1.1.0
#mjax_install = 'from IPython.external.mathjax import install_mathjax'
#mjax_install += '; install_mathjax()'
#run_command("python -c '%s'" % mjax_install)
def install_julia():
#chdir(SRC_DIR)
#apt_install('zlib1g-dev patchelf llvm-3.3-dev libsuitesparse-dev '
# 'libncurses5-dev libopenblas-dev liblapack-dev '
# 'libarpack2-dev libfftw3-dev libgmp-dev libpcre3-dev '
# 'libunwind8-dev libreadline-dev libdouble-conversion-dev '
# 'libopenlibm-dev librmath-dev libmpfr-dev')
#run_command('git clone git://github.com/JuliaLang/julia.git')
#buildopts = 'LLVM_CONFIG=llvm-config-3.3 VERBOSE=1 USE_BLAS64=0 '
#libs = ['LLVM', 'ZLIB', 'SUITESPARSE', 'ARPACK', 'BLAS', 'FFTW', 'LAPACK',
# 'GMP', 'MPFR', 'PCRE', 'LIBUNWIND', 'READLINE', 'GRISU',
# 'OPENLIBM', 'RMATH']
#buildopts += ' '.join(['USE_SYSTEM_%s=1' % lib for lib in libs])
#run_command('cd julia && make %s PREFIX=/usr install' % buildopts)
apt_install("julia")
def configure_motd():
for f in glob.glob('/etc/update-motd.d/*'):
os.unlink(f)
motd = open('/etc/update-motd.d/00-starcluster', 'w')
motd.write(STARCLUSTER_MOTD)
motd.close()
os.chmod(motd.name, 0755)
def configure_bash():
completion_line_found = False
for line in fileinput.input('/etc/bash.bashrc', inplace=1):
if 'bash_completion' in line and line.startswith('#'):
print line.replace('#', ''),
completion_line_found = True
elif completion_line_found:
print line.replace('#', ''),
completion_line_found = False
else:
print line,
aliasfile = open('/root/.bash_aliases', 'w')
aliasfile.write("alias ..='cd ..'\n")
aliasfile.close()
def setup_environ():
num_cpus = multiprocessing.cpu_count()
os.environ['MAKEFLAGS'] = '-j%d' % (num_cpus + 1)
os.environ['DEBIAN_FRONTEND'] = "noninteractive"
if os.path.isfile('/sbin/initctl') and not os.path.islink('/sbin/initctl'):
run_command('mv /sbin/initctl /sbin/initctl.bak')
run_command('ln -s /bin/true /sbin/initctl')
def install_nfs():
chdir(SRC_DIR)
run_command('initctl reload-configuration')
apt_install('nfs-kernel-server')
run_command('ln -s /etc/init.d/nfs-kernel-server /etc/init.d/nfs')
def install_default_packages():
# stop mysql for interactively asking for password
preseedf = '/tmp/mysql-preseed.txt'
mysqlpreseed = open(preseedf, 'w')
preseeds = """\
mysql-server mysql-server/root_password select
mysql-server mysql-server/root_password seen true
mysql-server mysql-server/root_password_again select
mysql-server mysql-server/root_password_again seen true
"""
mysqlpreseed.write(preseeds)
mysqlpreseed.close()
run_command('debconf-set-selections < %s' % mysqlpreseed.name)
run_command('rm %s' % mysqlpreseed.name)
pkgs = "git vim mercurial subversion cvs encfs keychain screen tmux zsh "
pkgs += "ksh csh tcsh ec2-api-tools ec2-ami-tools mysql-server "
pkgs += "mysql-client apache2 libapache2-mod-wsgi nginx sysv-rc-conf "
pkgs += "pssh emacs irssi htop vim-scripts mosh default-jdk mpich2 xvfb "
pkgs += "openmpi-bin libopenmpi-dev libopenblas-dev liblapack-dev julia"
apt_install(pkgs)
def configure_init():
scripts = ['nfs-kernel-server', 'hadoop', 'condor', 'apache', 'mysql',
'nginx']
for script in scripts:
run_command('find /etc/rc* -iname \*%s\* -delete' % script)
def cleanup():
run_command('rm -rf /run/resolvconf')
run_command('rm -f /etc/mtab')
run_command('rm -rf /root/*')
exclude = ['/root/.bashrc', '/root/.profile', '/root/.bash_aliases']
for dot in glob.glob("/root/.*"):
if dot not in exclude:
run_command('rm -rf %s' % dot)
for path in glob.glob('/usr/local/src/*'):
if os.path.isdir(path):
shutil.rmtree(path)
run_command('rm -f /var/cache/apt/archives/*.deb')
run_command('rm -f /var/cache/apt/archives/partial/*')
for f in glob.glob('/etc/profile.d'):
if 'byobu' in f:
run_command('rm -f %s' % f)
if os.path.islink('/sbin/initctl') and os.path.isfile('/sbin/initctl.bak'):
run_command('mv -f /sbin/initctl.bak /sbin/initctl')
def main():
"""docstring for main"""
if os.getuid() != 0:
sys.stderr.write('you must be root to run this script\n')
return
setup_environ()
configure_motd()
configure_bash()
configure_apt_sources()
upgrade_packages()
install_build_utils()
install_nfs()
install_default_packages()
install_python_packages()
# Only use these to build the packages locally
# These should normally be installed from the PPAs
#install_openblas()
#install_openmpi()
#install_julia()
install_gridscheduler()
install_condor()
install_hadoop()
configure_init()
cleanup()
if __name__ == '__main__':
main()
| gpl-3.0 |
zrick/tlab | scripts/python/NcRegrid.py | 2 | 7598 | '''
Interpolate and concatenate up to five netCDF4 files with different vertical grids. A cubic spline interpolation is performed. All netCDF4 files need to be located in the same
directory and should follow the naming convention (i.e. first set: avg2000-4000.nc, avg1s2000-4000.nc,... and second set: avg4000-6000.nc, avg1s4000-6000.nc,...).
(Note: To perform a visual inspection of the results uncomment line 70-83 and adapt it to your needs.)
Bernhard Schulz, May 2019
'''
import warnings
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interp
''' define iterations to be merged '''
it_1 = '3500' # first index
it_2 = '6500' # second index
it_3 = '10100' # third index
number_scalars = 4 # maximum 5
'''-----------------------------------------------------------------------------------------------------------------------'''
'''-----------------------------------------------------------------------------------------------------------------------'''
'''-----------------------------------------------------------------------------------------------------------------------'''
''' load data; _1 for SMALL GRID; _2 for LARGE GRID; _12 CONCATENATED FILES '''
path_avg_1 = './avg' + it_1 + '-' + it_2 + '.nc'; avg_1 = nc.Dataset(path_avg_1, 'r')
path_avg_2 = './avg' + it_2 + '-' + it_3 + '.nc'; avg_2 = nc.Dataset(path_avg_2, 'r')
print( 'merge ', path_avg_1, 'with', path_avg_2 )
''' create and read scalars '''
list_scalars = ['avg1s', 'avg2s', 'avg3s', 'avg4s', 'avg5s']
avg1s_1 = 'nan'; avg2s_1 = 'nan'; avg3s_1 = 'nan'; avg4s_1 = 'nan'; avg5s_1 = 'nan' # create dummy varibles
avg1s_2 = 'nan'; avg2s_2 = 'nan'; avg3s_2 = 'nan'; avg4s_2 = 'nan'; avg5s_2 = 'nan'
list_1 = [avg1s_1, avg2s_1, avg3s_1, avg4s_1, avg5s_1]
list_2 = [avg1s_2, avg2s_2, avg3s_2, avg4s_2, avg5s_2]
for idx in range(number_scalars):
path_1 = './' + list_scalars[idx] + it_1 + '-' + it_2 + '.nc'
list_1[idx] = nc.Dataset(path_avg_1, 'r')
path_2 = './' + list_scalars[idx] + it_2 + '-' + it_3 + '.nc'
list_2[idx] = nc.Dataset(path_avg_2, 'r')
print( 'interpolate and concatenate ', path_1, 'with ', path_2 )
''' names for concatenated files '''
avg_12 = 'avg' + it_1 + '-' + it_3 + '.nc'
avg1s_12 = 'nan'; avg2s_12 = 'nan'; avg3s_12 = 'nan'; avg4s_12 = 'nan'; avg5s_12 ='nan'
list_12 = [avg1s_12, avg2s_12, avg3s_12, avg4s_12, avg5s_12]
for idx in range(number_scalars):
list_12[idx] = list_scalars[idx] + it_1 + '-' + it_3 + '.nc'
#print('list of variables', avg_1.variables.keys())
'''-----------------------------------------------------------------------------------------------------------------------'''
'''-----------------------------------------------------------------------------------------------------------------------'''
'''-----------------------------------------------------------------------------------------------------------------------'''
''' define functions '''
def grid_12(y_1,A_1,y_2,A_2,varname):
''' perform n-th order (k=3) spline interpolation '''
dim_t = len(A_1[:,0])
dim_y = len(y_2)
A_12 = np.zeros( (dim_t,dim_y) )
for idx in range(dim_t):
tck = interp.splrep(y_1, A_1[idx,:], k=3)
A_12[idx,:] = interp.splev(y_2, tck)
# ''' quick and dirty check '''
# diff = abs(abs(A_2[0,:]) - abs(A_12[-1,:]))
# relative_diff = 100.0*np.nanmax(diff)/A_2[0,np.nanargmax(diff)]
#
# if relative_diff > 0.1 and np.nanmax(diff) > 0.1 and y_2[np.nanargmax(diff)] > -19.0: # adapt this line to your needs
# print('WARNING: profiles deviate by ',np.nanmax(diff),' at height ',y_2[np.nanargmax(diff)])
# plt.figure('Profiles for ' + str(varname) + ' deviate by ' + str( round(np.nanmax(diff),2) ) + ' at height ' + str(y_2[np.nanargmax(diff)]) )
# plt.plot(A_1[idx,:],y_1,'b',label='initial data')
# plt.plot(A_12[idx,:],y_2,'r--',label='interpolated data')
# plt.xlabel(str(varname)+' at iteration'+str(idx))
# plt.ylabel('height')
# plt.legend()
# plt.show()
return A_12
def interpolate(data_1,data_2,species):
''' interpolate data_1 into vertical grid of data_2 '''
dictonary = {}
for varname in data_1.variables:
print('interpolate and concatenate',str(species),str(varname))
y_1 = data_1.variables['y']
y_2 = data_2.variables['y']
var_1 = data_1.variables[str(varname)]
var_2 = data_2.variables[str(varname)]
if len(var_1.shape) == 1 and varname == 'y':
dictonary[str(varname)] = y_2
if len(var_1.shape) == 1 and varname != 'y':
if len(var_1) != len(data_1.variables['it']):
warnings.warn('Warning: length of varname_1 is unequal number of iterations')
dictonary[str(varname)] = np.concatenate( (var_1,var_2), axis=0 )
if len(var_1.shape) == 2:
var_12 = str(varname)+'_12'
var_12 = grid_12(y_1,var_1, y_2,var_2,varname)
''' concatenate data '''
dictonary[str(varname)] = np.concatenate( (var_12,var_2), axis=0 )
#print('shape of',str(varname)+'_1:',var_1.shape, str(varname)+'_12:',var_12.shape, str(varname)+'_2:',var_2.shape, str(varname)+':',dictonary[str(varname)].shape)
return dictonary
def writeNC(dictonary,name_12):
''' process the dictionary to netcdf files '''
avgnc = nc.Dataset(name_12, 'w')
''' dimension '''
ntimes = len(dictonary['t'])
jmax = len(dictonary['y'])
print("Creating netCDF file with ntimes = {} and jmax = {}".format(ntimes, jmax))
''' create dimensions in netCDF file '''
dim_y = avgnc.createDimension('y', jmax)
dim_t = avgnc.createDimension('t', ntimes)
''' create variables '''
var_t = avgnc.createVariable('t', 'f8',('t',))
var_t.units='Days since 0000-01-01 00:00'
var_y = avgnc.createVariable('y', 'f8',('y',))
var_y.long_name='Height above Surface'
var_y.positive='up'
var_y.standard_name='height'
var_y.units='level'
var_it= avgnc.createVariable('it','i4',('t',))
''' first, handle the dimensions '''
var_t[:] = dictonary['t'][:]
var_y[:] = dictonary['y'][:]
var_it[:] = [int(f) for f in dictonary['it'][:]]
''' now make a loop through all vars '''
for varname in list(dictonary):
if( not( (varname == "it") or (varname == "y") or (varname == "t") ) ):
vardata = dictonary[varname]
if(len(vardata.shape) == 2):
if( (vardata.shape[0] == ntimes) and (vardata.shape[1] == jmax) ):
#print("Storing {} in 2D (t,y) array".format(varname))
var_name = avgnc.createVariable(varname,'f8',('t','y',))
var_name[:,:] = vardata
if(len(vardata.shape) == 1):
if(vardata.shape[0] == ntimes):
#print("Storing {} in 1D (t) array".format(varname))
var_name = avgnc.createVariable(varname,'f8',('t',))
var_name[:] = vardata
avgnc.close()
return
'''-----------------------------------------------------------------------------------------------------------------------'''
'''-----------------------------------------------------------------------------------------------------------------------'''
'''-----------------------------------------------------------------------------------------------------------------------'''
''' run code '''
''' interpolate and concatenate data '''
dict_avg = interpolate(avg_1,avg_2,'avg')
writeNC(dict_avg,avg_12)
for idx in range(number_scalars):
dictonary = interpolate(list_1[idx], list_2[idx], list_scalars[idx] )
writeNC(dictonary, list_12[idx])
| gpl-3.0 |
thegooglecodearchive/healpy | healpy/visufunc.py | 2 | 22006 | #
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
import projaxes as PA
import pylab
import numpy as npy
import matplotlib
import matplotlib.colors as colors
import matplotlib.cbook as cbook
import pixelfunc
pi = npy.pi
dtor = pi/180.
def mollview(map=None,fig=None,rot=None,coord=None,unit='',
xsize=800,title='Mollweide view',nest=False,
min=None,max=None,flip='astro',
remove_dip=False,remove_mono=False,
gal_cut=0,
format='%g',format2='%g',
cbar=True,cmap=None, notext=False,
norm=None,hold=False,margins=None,sub=None):
"""Plot an healpix map (given as an array) in Mollweide projection.
Input:
- map : an ndarray containing the map
if None, use map with inf value (white map), useful for
overplotting
Parameters:
- fig: a figure number. Default: create a new figure
- rot: rotation, either 1,2 or 3 angles describing the rotation
Default: None
- coord: either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to make
rotation from the first to the second coordinate system.
Default: None
- unit: a text describing the unit. Default: ''
- xsize: the size of the image. Default: 800
- title: the title of the plot. Default: 'Mollweide view'
- nest: if True, ordering scheme is NEST. Default: False (RING)
- min: the minimum range value
- max: the maximum range value
- flip: 'astro' (default, east towards left, west towards right) or 'geo'
- remove_dip: if True, remove the dipole+monopole
- remove_mono: if True, remove the monopole
- gal_cut: galactic cut for the dipole/monopole fit
- format: the format of the scale label. Default: '%g'
- format2: format of the pixel value under mouse. Default: '%g'
- cbar: display the colorbar. Default: True
- notext: if True, no text is printed around the map
- norm: color normalization, hist= histogram equalized color mapping, log=
logarithmic color mapping, default: None (linear color mapping)
- hold: if True, replace the current Axes by a MollweideAxes.
use this if you want to have multiple maps on the same
figure. Default: False
- sub: use a part of the current figure (same syntax as subplot).
Default: None
- margins: either None, or a sequence (left,bottom,right,top)
giving the margins on left,bottom,right and top
of the axes. Values are relative to figure (0-1).
Default: None
"""
# Create the figure
if not (hold or sub):
f=pylab.figure(fig,figsize=(8.5,5.4))
extent = (0.02,0.05,0.96,0.9)
elif hold:
f=pylab.gcf()
left,bottom,right,top = npy.array(f.gca().get_position()).ravel()
extent = (left,bottom,right-left,top-bottom)
f.delaxes(f.gca())
else: # using subplot syntax
f=pylab.gcf()
if hasattr(sub,'__len__'):
nrows, ncols, idx = sub
else:
nrows, ncols, idx = sub/100, (sub%100)/10, (sub%10)
if idx < 1 or idx > ncols*nrows:
raise ValueError('Wrong values for sub: %d, %d, %d'%(nrows,
ncols,
idx))
c,r = (idx-1)%ncols,(idx-1)/ncols
if not margins:
margins = (0.01,0.0,0.0,0.02)
extent = (c*1./ncols+margins[0],
1.-(r+1)*1./nrows+margins[1],
1./ncols-margins[2]-margins[0],
1./nrows-margins[3]-margins[1])
extent = (extent[0]+margins[0],
extent[1]+margins[1],
extent[2]-margins[2]-margins[0],
extent[3]-margins[3]-margins[1])
#extent = (c*1./ncols, 1.-(r+1)*1./nrows,1./ncols,1./nrows)
#f=pylab.figure(fig,figsize=(8.5,5.4))
# Starting to draw : turn interactive off
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
if map is None:
map = npy.zeros(12)+npy.inf
cbar=False
ax=PA.HpxMollweideAxes(f,extent,coord=coord,rot=rot,
format=format2,flipconv=flip)
f.add_axes(ax)
if remove_dip:
map=pixelfunc.remove_dipole(map,gal_cut=gal_cut,
nest=nest,copy=True,
verbose=True)
elif remove_mono:
map=pixelfunc.remove_monopole(map,gal_cut=gal_cut,nest=nest,
copy=True,verbose=True)
ax.projmap(map,nest=nest,xsize=xsize,coord=coord,vmin=min,vmax=max,
cmap=cmap,norm=norm)
if cbar:
im = ax.get_images()[0]
b = im.norm.inverse(npy.linspace(0,1,im.cmap.N+1))
v = npy.linspace(im.norm.vmin,im.norm.vmax,im.cmap.N)
if matplotlib.__version__ >= '0.91.0':
cb=f.colorbar(ax.get_images()[0],ax=ax,
orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.05,fraction=0.1,boundaries=b,values=v,
format=format)
else:
# for older matplotlib versions, no ax kwarg
cb=f.colorbar(ax.get_images()[0],orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.05,fraction=0.1,boundaries=b,values=v,
format=format)
ax.set_title(title)
if not notext:
ax.text(0.86,0.05,ax.proj.coordsysstr,fontsize=14,
fontweight='bold',transform=ax.transAxes)
if cbar:
cb.ax.text(0.5,-1.0,unit,fontsize=14,
transform=cb.ax.transAxes,ha='center',va='center')
f.sca(ax)
finally:
pylab.draw()
if wasinteractive:
pylab.ion()
#pylab.show()
def gnomview(map=None,fig=None,rot=None,coord=None,unit='',
xsize=200,ysize=None,reso=1.5,degree=False,
title='Gnomonic view',nest=False,remove_dip=False,
remove_mono=False,gal_cut=0,
min=None,max=None,flip='astro',
format='%.3g',cbar=True,
cmap=None, norm=None,
hold=False,sub=None,margins=None,notext=False):
"""Plot an healpix map (given as an array) in Gnomonic projection.
Input:
- map : an ndarray containing the map.
if None, use map with inf value (white map), useful for
overplotting
Parameters:
- fig: a figure number. Default: create a new figure
- rot: rotation, either 1,2 or 3 angles describing the rotation
Default: None
- coord: either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to make
rotation from the first to the second coordinate system.
Default: None
- unit: a text describing the unit. Default: ''
- xsize: the size of the image. Default: 200
- ysize: the size of the image. Default: xsize
- reso: resolution in arcmin if degree is False. Default: 1.5 arcmin
- degree: if True, reso is in degree. Default: False
- title: the title of the plot. Default: 'Mollweide view'
- nest: if True, ordering scheme is NEST. Default: False (RING)
- min: the minimum range value
- max: the maximum range value
- flip: 'astro' (default, east towards left, west towards right) or 'geo'
- remove_dip: if True, remove the dipole+monopole
- remove_mono: if True, remove the monopole
- gal_cut: galactic cut for the dipole/monopole fit
- format: the format of the scale. Default: '%.3g'
- hold: if True, replace the current Axes by a MollweideAxes.
use this if you want to have multiple maps on the same
figure. Default: False
- sub: use a part of the current figure (same syntax as subplot).
Default: None
- margins: either None, or a sequence (left,bottom,right,top)
giving the margins on left,bottom,right and top
of the axes. Values are relative to figure (0-1).
Default: None
- notext: True: do not add resolution info text
Default=False
"""
if not (hold or sub):
f=pylab.figure(fig,figsize=(5.8,6.4))
if not margins:
margins = (0.075,0.05,0.075,0.05)
extent = (0.0,0.0,1.0,1.0)
elif hold:
f=pylab.gcf()
left,bottom,right,top = npy.array(pylab.gca().get_position()).ravel()
if not margins:
margins = (0.0,0.0,0.0,0.0)
extent = (left,bottom,right-left,top-bottom)
f.delaxes(pylab.gca())
else: # using subplot syntax
f=pylab.gcf()
if hasattr(sub,'__len__'):
nrows, ncols, idx = sub
else:
nrows, ncols, idx = sub/100, (sub%100)/10, (sub%10)
if idx < 1 or idx > ncols*nrows:
raise ValueError('Wrong values for sub: %d, %d, %d'%(nrows,
ncols,
idx))
c,r = (idx-1)%ncols,(idx-1)/ncols
if not margins:
margins = (0.01,0.0,0.0,0.02)
extent = (c*1./ncols+margins[0],
1.-(r+1)*1./nrows+margins[1],
1./ncols-margins[2]-margins[0],
1./nrows-margins[3]-margins[1])
extent = (extent[0]+margins[0],
extent[1]+margins[1],
extent[2]-margins[2]-margins[0],
extent[3]-margins[3]-margins[1])
#f=pylab.figure(fig,figsize=(5.5,6))
# Starting to draw : turn interactive off
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
if map is None:
map = npy.zeros(12)+npy.inf
cbar=False
ax=PA.HpxGnomonicAxes(f,extent,coord=coord,rot=rot,
format=format,flipconv=flip)
f.add_axes(ax)
if remove_dip:
map=pixelfunc.remove_dipole(map,gal_cut=gal_cut,nest=nest,copy=True)
elif remove_mono:
map=pixelfunc.remove_monopole(map,gal_cut=gal_cut,nest=nest,copy=True)
ax.projmap(map,nest=nest,coord=coord,vmin=min,vmax=max,
xsize=xsize,ysize=ysize,reso=reso,cmap=cmap,norm=norm)
if cbar:
if matplotlib.__version__ >= '0.91.0':
cb=f.colorbar(ax.get_images()[0],ax=ax,
orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.08,fraction=0.1,format=format)
else:
cb=f.colorbar(ax.get_images()[0],orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.08,fraction=0.1,format=format)
ax.set_title(title)
if not notext:
ax.text(-0.07,0.02,
"%g '/pix, %dx%d pix"%(ax.proj.arrayinfo['reso'],
ax.proj.arrayinfo['xsize'],
ax.proj.arrayinfo['ysize']),
fontsize=12,verticalalignment='bottom',
transform=ax.transAxes,rotation=90)
ax.text(-0.07,0.6,ax.proj.coordsysstr,fontsize=14,
fontweight='bold',rotation=90,transform=ax.transAxes)
lon,lat = npy.around(ax.proj.get_center(lonlat=True),ax._coordprec)
ax.text(0.5,-0.03,'(%g,%g)'%(lon,lat),
verticalalignment='center', horizontalalignment='center',
transform=ax.transAxes)
if cbar:
cb.ax.text(1.05,0.30,unit,fontsize=14,fontweight='bold',
transform=cb.ax.transAxes,ha='left',va='center')
f.sca(ax)
finally:
pylab.draw()
if wasinteractive:
pylab.ion()
#pylab.show()
def cartview(map=None,fig=None,rot=None,zat=None,coord=None,unit='',
xsize=800,ysize=None,lonra=None,latra=None,
title='Cartesian view',nest=False,remove_dip=False,
remove_mono=False,gal_cut=0,
min=None,max=None,flip='astro',
format='%.3g',cbar=True,
cmap=None, norm=None,aspect=None,
hold=False,sub=None,margins=None,notext=False):
"""Plot an healpix map (given as an array) in Cartesian projection.
Input:
- map : an ndarray containing the map.
if None, use map with inf value (white map), useful for
overplotting
Parameters:
- fig: a figure number. Default: create a new figure
- rot: rotation, either 1,2 or 3 angles describing the rotation
Default: None
- coord: either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to make
rotation from the first to the second coordinate system.
Default: None
- unit: a text describing the unit. Default: ''
- xsize: the size of the image. Default: 200
- lonra: range in longitude. Default: [-180,180]
- latra: range in latitude. Default: [-90,90]
- title: the title of the plot. Default: 'Mollweide view'
- nest: if True, ordering scheme is NEST. Default: False (RING)
- min: the minimum range value
- max: the maximum range value
- flip: 'astro' (default, east towards left, west towards right) or 'geo'
- remove_dip: if True, remove the dipole+monopole
- remove_mono: if True, remove the monopole
- gal_cut: galactic cut for the dipole/monopole fit
- format: the format of the scale. Default: '%.3g'
- hold: if True, replace the current Axes by a MollweideAxes.
use this if you want to have multiple maps on the same
figure. Default: False
- sub: use a part of the current figure (same syntax as subplot).
Default: None
- margins: either None, or a sequence (left,bottom,right,top)
giving the margins on left,bottom,right and top
of the axes. Values are relative to figure (0-1).
Default: None
- notext: True: do not add resolution info text
Default=False
"""
if not (hold or sub):
f=pylab.figure(fig,figsize=(8.5,5.4))
if not margins:
margins = (0.075,0.05,0.075,0.05)
extent = (0.0,0.0,1.0,1.0)
elif hold:
f=pylab.gcf()
left,bottom,right,top = npy.array(pylab.gca().get_position()).ravel()
if not margins:
margins = (0.0,0.0,0.0,0.0)
extent = (left,bottom,right-left,top-bottom)
f.delaxes(pylab.gca())
else: # using subplot syntax
f=pylab.gcf()
if hasattr(sub,'__len__'):
nrows, ncols, idx = sub
else:
nrows, ncols, idx = sub/100, (sub%100)/10, (sub%10)
if idx < 1 or idx > ncols*nrows:
raise ValueError('Wrong values for sub: %d, %d, %d'%(nrows,
ncols,
idx))
c,r = (idx-1)%ncols,(idx-1)/ncols
if not margins:
margins = (0.01,0.0,0.0,0.02)
extent = (c*1./ncols+margins[0],
1.-(r+1)*1./nrows+margins[1],
1./ncols-margins[2]-margins[0],
1./nrows-margins[3]-margins[1])
extent = (extent[0]+margins[0],
extent[1]+margins[1],
extent[2]-margins[2]-margins[0],
extent[3]-margins[3]-margins[1])
#f=pylab.figure(fig,figsize=(5.5,6))
# Starting to draw : turn interactive off
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
if map is None:
map = npy.zeros(12)+npy.inf
cbar=False
if zat and rot:
raise ValueError('Only give rot or zat, not both')
if zat:
rot = npy.array(zat,dtype=npy.float64)
rot.resize(3)
rot[1] -= 90
ax=PA.HpxCartesianAxes(f,extent,coord=coord,rot=rot,
format=format,flipconv=flip)
f.add_axes(ax)
if remove_dip:
map=pixelfunc.remove_dipole(map,gal_cut=gal_cut,nest=nest,copy=True)
elif remove_mono:
map=pixelfunc.remove_monopole(map,gal_cut=gal_cut,nest=nest,copy=True)
ax.projmap(map,nest=nest,coord=coord,vmin=min,vmax=max,
xsize=xsize,ysize=ysize,lonra=lonra,latra=latra,
cmap=cmap,norm=norm,aspect=aspect)
if cbar:
if matplotlib.__version__ >= '0.91.0':
cb=f.colorbar(ax.get_images()[0],ax=ax,
orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.08,fraction=0.1,format=format)
else:
cb=f.colorbar(ax.get_images()[0],orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.08,fraction=0.1,format=format)
ax.set_title(title)
if not notext:
ax.text(-0.07,0.6,ax.proj.coordsysstr,fontsize=14,
fontweight='bold',rotation=90,transform=ax.transAxes)
if cbar:
cb.ax.text(1.05,0.30,unit,fontsize=14,fontweight='bold',
transform=cb.ax.transAxes,ha='left',va='center')
f.sca(ax)
finally:
if wasinteractive:
pylab.ion()
pylab.draw()
#pylab.show()
def graticule(dpar=None,dmer=None,coord=None,local=None,**kwds):
"""Create a graticule, either on an existing mollweide map or not.
Parameters:
- dpar, dmer: interval in degrees between meridians and between parallels
- coord: the coordinate system of the graticule (make rotation if needed,
using coordinate system of the map if it is defined)
- local: True if local graticule (no rotation is performed)
Return:
None
"""
f = pylab.gcf()
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
if len(f.get_axes()) == 0:
ax=PA.HpxMollweideAxes(f,(0.02,0.05,0.96,0.9),coord=coord)
f.add_axes(ax)
ax.text(0.86,0.05,ax.proj.coordsysstr,fontsize=14,
fontweight='bold',transform=ax.transAxes)
for ax in f.get_axes():
if isinstance(ax,PA.SphericalProjAxes):
ax.graticule(dpar=dpar,dmer=dmer,coord=coord,
local=local,**kwds)
finally:
pylab.draw()
if wasinteractive:
pylab.ion()
#pylab.show()
graticule.__doc__ = PA.SphericalProjAxes.graticule.__doc__
def delgraticules():
f = pylab.gcf()
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
for ax in f.get_axes():
if isinstance(ax,PA.SphericalProjAxes):
ax.delgraticules()
finally:
pylab.draw()
if wasinteractive:
pylab.ion()
#pylab.show()
delgraticules.__doc__ = PA.SphericalProjAxes.delgraticules.__doc__
def projplot(*args,**kwds):
f = pylab.gcf()
wasinteractive = pylab.isinteractive()
pylab.ioff()
ret = None
try:
for ax in f.get_axes():
if isinstance(ax,PA.SphericalProjAxes):
ret = ax.projplot(*args,**kwds)
finally:
pylab.draw()
if wasinteractive:
pylab.ion()
#pylab.show()
return ret
projplot.__doc__ = PA.SphericalProjAxes.projplot.__doc__
def projscatter(*args,**kwds):
f = pylab.gcf()
wasinteractive = pylab.isinteractive()
pylab.ioff()
ret=None
try:
for ax in f.get_axes():
if isinstance(ax,PA.SphericalProjAxes):
ret = ax.projscatter(*args,**kwds)
finally:
pylab.draw()
if wasinteractive:
pylab.ion()
#pylab.show()
return ret
projscatter.__doc__ = PA.SphericalProjAxes.projscatter.__doc__
def projtext(*args,**kwds):
f = pylab.gcf()
wasinteractive = pylab.isinteractive()
pylab.ioff()
ret = None
try:
for ax in f.get_axes():
if isinstance(ax,PA.SphericalProjAxes):
ret = ax.projtext(*args,**kwds)
finally:
pylab.draw()
if wasinteractive:
pylab.ion()
#pylab.show()
return ret
projtext.__doc__ = PA.SphericalProjAxes.projtext.__doc__
| gpl-2.0 |
anntzer/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 9 | 5243 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
import pytest
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
@pytest.mark.parametrize('sparse', (True, False))
@pytest.mark.parametrize('dtype', (int, np.float32, np.int16))
@pytest.mark.parametrize('sort', (True, False))
@pytest.mark.parametrize('iterable', (True, False))
def test_dictvectorizer(sparse, dtype, sort, iterable):
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert sp.issparse(X) == sparse
assert X.shape == (3, 5)
assert X.sum() == 14
assert v.inverse_transform(X) == D
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert (v.feature_names_ ==
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert v.get_feature_names() == ["useful1", "useful2"]
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert X.shape == (3, 5)
D_out = v.inverse_transform(X)
assert D_out[0] == {"version=1": 1, "ham": 2}
names = v.get_feature_names()
assert "version=2" in names
assert "version" not in names
def test_iterable_value():
D_names = ['ham', 'spam', 'version=1', 'version=2', 'version=3']
X_expected = [[2.0, 0.0, 2.0, 1.0, 0.0],
[0.0, 0.3, 0.0, 1.0, 0.0],
[0.0, -1.0, 0.0, 0.0, 1.0]]
D_in = [{"version": ["1", "2", "1"], "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
X = X.toarray()
assert_array_equal(X, X_expected)
D_out = v.inverse_transform(X)
assert D_out[0] == {"version=1": 2, "version=2": 1, "ham": 2}
names = v.get_feature_names()
assert names == D_names
def test_iterable_not_string_error():
error_value = ("Unsupported type <class 'int'> in iterable value. "
"Only iterables of string are supported.")
D2 = [{'foo': '1', 'bar': '2'},
{'foo': '3', 'baz': '1'},
{'foo': [1, 'three']}]
v = DictVectorizer(sparse=False)
with pytest.raises(TypeError) as error:
v.fit(D2)
assert str(error.value) == error_value
def test_mapping_error():
error_value = ("Unsupported value type <class 'dict'> "
"for foo: {'one': 1, 'three': 3}.\n"
"Mapping objects are not supported.")
D2 = [{'foo': '1', 'bar': '2'},
{'foo': '3', 'baz': '1'},
{'foo': {'one': 1, 'three': 3}}]
v = DictVectorizer(sparse=False)
with pytest.raises(TypeError) as error:
v.fit(D2)
assert str(error.value) == error_value
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert "empty" in str(e)
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert v_1.vocabulary_ == v_2.vocabulary_
def test_n_features_in():
# For vectorizers, n_features_in_ does not make sense and does not exist.
dv = DictVectorizer()
assert not hasattr(dv, 'n_features_in_')
d = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
dv.fit(d)
assert not hasattr(dv, 'n_features_in_')
| bsd-3-clause |
eriklindernoren/Keras-GAN | infogan/infogan.py | 1 | 8640 | from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, concatenate
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import to_categorical
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
class INFOGAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.num_classes = 10
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 72
optimizer = Adam(0.0002, 0.5)
losses = ['binary_crossentropy', self.mutual_info_loss]
# Build and the discriminator and recognition network
self.discriminator, self.auxilliary = self.build_disk_and_q_net()
self.discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy'])
# Build and compile the recognition network Q
self.auxilliary.compile(loss=[self.mutual_info_loss],
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
gen_input = Input(shape=(self.latent_dim,))
img = self.generator(gen_input)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
valid = self.discriminator(img)
# The recognition network produces the label
target_label = self.auxilliary(img)
# The combined model (stacked generator and discriminator)
self.combined = Model(gen_input, [valid, target_label])
self.combined.compile(loss=losses,
optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
model.summary()
return Model(gen_input, img)
def build_disk_and_q_net(self):
img = Input(shape=self.img_shape)
# Shared layers between discriminator and recognition network
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
img_embedding = model(img)
# Discriminator
validity = Dense(1, activation='sigmoid')(img_embedding)
# Recognition
q_net = Dense(128, activation='relu')(img_embedding)
label = Dense(self.num_classes, activation='softmax')(q_net)
# Return discriminator and recognition network
return Model(img, validity), Model(img, label)
def mutual_info_loss(self, c, c_given_x):
"""The mutual information metric we aim to minimize"""
eps = 1e-8
conditional_entropy = K.mean(- K.sum(K.log(c_given_x + eps) * c, axis=1))
entropy = K.mean(- K.sum(K.log(c + eps) * c, axis=1))
return conditional_entropy + entropy
def sample_generator_input(self, batch_size):
# Generator inputs
sampled_noise = np.random.normal(0, 1, (batch_size, 62))
sampled_labels = np.random.randint(0, self.num_classes, batch_size).reshape(-1, 1)
sampled_labels = to_categorical(sampled_labels, num_classes=self.num_classes)
return sampled_noise, sampled_labels
def train(self, epochs, batch_size=128, sample_interval=50):
# Load the dataset
(X_train, y_train), (_, _) = mnist.load_data()
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
y_train = y_train.reshape(-1, 1)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample noise and categorical labels
sampled_noise, sampled_labels = self.sample_generator_input(batch_size)
gen_input = np.concatenate((sampled_noise, sampled_labels), axis=1)
# Generate a half batch of new images
gen_imgs = self.generator.predict(gen_input)
# Train on real and generated data
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
# Avg. loss
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator and Q-network
# ---------------------
g_loss = self.combined.train_on_batch(gen_input, [valid, sampled_labels])
# Plot the progress
print ("%d [D loss: %.2f, acc.: %.2f%%] [Q loss: %.2f] [G loss: %.2f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[1], g_loss[2]))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 10, 10
fig, axs = plt.subplots(r, c)
for i in range(c):
sampled_noise, _ = self.sample_generator_input(c)
label = to_categorical(np.full(fill_value=i, shape=(r,1)), num_classes=self.num_classes)
gen_input = np.concatenate((sampled_noise, label), axis=1)
gen_imgs = self.generator.predict(gen_input)
gen_imgs = 0.5 * gen_imgs + 0.5
for j in range(r):
axs[j,i].imshow(gen_imgs[j,:,:,0], cmap='gray')
axs[j,i].axis('off')
fig.savefig("images/%d.png" % epoch)
plt.close()
def save_model(self):
def save(model, model_name):
model_path = "saved_model/%s.json" % model_name
weights_path = "saved_model/%s_weights.hdf5" % model_name
options = {"file_arch": model_path,
"file_weight": weights_path}
json_string = model.to_json()
open(options['file_arch'], 'w').write(json_string)
model.save_weights(options['file_weight'])
save(self.generator, "generator")
save(self.discriminator, "discriminator")
if __name__ == '__main__':
infogan = INFOGAN()
infogan.train(epochs=50000, batch_size=128, sample_interval=50)
| mit |
larsmans/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 26 | 2870 | import numpy as np
from scipy.sparse import csr_matrix
from .... import datasets
from ..unsupervised import silhouette_score
from ... import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
"""Tests the Silhouette Coefficient. """
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
"""Assert Silhouette Coefficient != nan when there is 1 sample in a class.
This tests for the condition that caused issue 960.
"""
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
""" Assert 2 <= n_labels <= nsample -1 """
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
"Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
"Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
r-mart/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
ronalcc/zipline | tests/test_assets.py | 2 | 29139 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
import sys
from unittest import TestCase
from datetime import datetime, timedelta
import pickle
import uuid
import warnings
import pandas as pd
from pandas.tseries.tools import normalize_date
from nose_parameterized import parameterized
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.assets.futures import FutureChain
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
RootSymbolNotFound,
)
def build_lookup_generic_cases():
"""
Generate test cases for AssetFinder test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
frame = pd.DataFrame.from_records(
[
{
'sid': 0,
'file_name': 'duplicated',
'company_name': 'duplicated_0',
'start_date_nano': dupe_0_start.value,
'end_date_nano': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'file_name': 'duplicated',
'company_name': 'duplicated_1',
'start_date_nano': dupe_1_start.value,
'end_date_nano': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'file_name': 'unique',
'company_name': 'unique',
'start_date_nano': unique_start.value,
'end_date_nano': unique_end.value,
'exchange': '',
},
],
)
finder = AssetFinder(metadata=frame)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'duplicated', dupe_0_start, dupe_0),
(finder, 'duplicated', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'unique', unique_start, unique),
(finder, 'unique', None, unique),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),
(finder, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('duplicated', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'end_date',
'asset_name',
'start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
future = Future(
2468,
symbol='OMH15',
root_symbol='OM',
notice_date=pd.Timestamp('2014-01-20', tz='UTC'),
expiration_date=pd.Timestamp('2014-02-20', tz='UTC'),
contract_multiplier=500
)
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMH15" in reprd)
self.assertTrue("root_symbol='OM'" in reprd)
self.assertTrue(("notice_date=Timestamp('2014-01-20 00:00:00+0000', "
"tz='UTC')") in reprd)
self.assertTrue("expiration_date=Timestamp('2014-02-20 00:00:00+0000'"
in reprd)
self.assertTrue("contract_multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
self.assertTrue('root_symbol' in dictd)
self.assertTrue('notice_date' in dictd)
self.assertTrue('expiration_date' in dictd)
self.assertTrue('contract_multiplier' in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
class AssetFinderTestCase(TestCase):
def test_lookup_symbol_fuzzy(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'TEST@%d' % i,
'company_name': "company%d" % i,
'start_date_nano': as_of.value,
'end_date_nano': as_of.value,
'exchange': uuid.uuid4().hex,
}
for i in range(3)
]
)
finder = AssetFinder(frame, fuzzy_char='@')
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
for i in range(2): # we do it twice to test for caching bugs
self.assertIsNone(finder.lookup_symbol('test', as_of))
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of)
)
# Adding an unnecessary fuzzy shouldn't matter.
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of, fuzzy=True)
)
# Shouldn't find this with no fuzzy_str passed.
self.assertIsNone(finder.lookup_symbol('test1', as_of))
# Should find exact match.
self.assertEqual(
asset_1,
finder.lookup_symbol('test1', as_of, fuzzy=True),
)
def test_lookup_symbol_resolve_multiple(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'existing',
'company_name': 'existing',
'start_date_nano': date.value,
'end_date_nano': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
finder = AssetFinder(df)
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol_resolve_multiple('non_existing', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol_resolve_multiple('existing', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol_resolve_multiple(
'existing',
date,
)
self.assertEqual(result.symbol, 'existing')
self.assertEqual(result.sid, i)
@parameterized.expand(
build_lookup_generic_cases()
)
def test_lookup_generic(self, finder, symbols, reference_date, expected):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
results, missing = finder.lookup_generic(symbols, reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
# Sids that will be found when we do lookups.
{
'sid': 0,
'file_name': 'real',
'company_name': 'real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 1,
'file_name': 'also_real',
'company_name': 'also_real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'file_name': 'real_but_old',
'company_name': 'real_but_old',
'start_date_nano': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 3,
'file_name': 'real_but_in_the_future',
'company_name': 'real_but_in_the_future',
'start_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
finder = AssetFinder(data)
results, missing = finder.lookup_generic(
['real', 1, 'fake', 'real_but_old', 'real_but_in_the_future'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'real')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'also_real')
self.assertEqual(results[1].sid, 1)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'fake')
self.assertEqual(missing[1], 'real_but_in_the_future')
def test_insert_metadata(self):
finder = AssetFinder()
finder.insert_metadata(0,
asset_type='equity',
start_date='2014-01-01',
end_date='2015-01-01',
symbol="PLAY",
foo_data="FOO",)
# Test proper insertion
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
self.assertEqual(pd.Timestamp('2015-01-01', tz='UTC'),
equity.end_date)
# Test invalid field
self.assertFalse('foo_data' in finder.metadata_cache[0])
def test_consume_metadata(self):
# Test dict consumption
finder = AssetFinder()
dict_to_consume = {0: {'symbol': 'PLAY'},
1: {'symbol': 'MSFT'}}
finder.consume_metadata(dict_to_consume)
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
finder = AssetFinder()
# Test dataframe consumption
df = pd.DataFrame(columns=['asset_name', 'exchange'], index=[0, 1])
df['asset_name'][0] = "Dave'N'Busters"
df['exchange'][0] = "NASDAQ"
df['asset_name'][1] = "Microsoft"
df['exchange'][1] = "NYSE"
finder.consume_metadata(df)
self.assertEqual('NASDAQ', finder.metadata_cache[0]['exchange'])
self.assertEqual('Microsoft', finder.metadata_cache[1]['asset_name'])
def test_consume_asset_as_identifier(self):
# Build some end dates
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
fut_end = pd.Timestamp('2008-01-01', tz='UTC')
# Build some simple Assets
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
future_asset = Future(200, symbol="TESTFUT", end_date=fut_end)
# Consume the Assets
finder = AssetFinder()
finder.consume_identifiers([equity_asset, future_asset])
# Test equality with newly built Assets
self.assertEqual(equity_asset, finder.retrieve_asset(1))
self.assertEqual(future_asset, finder.retrieve_asset(200))
self.assertEqual(eq_end, finder.retrieve_asset(1).end_date)
self.assertEqual(fut_end, finder.retrieve_asset(200).end_date)
def test_sid_assignment(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
today = normalize_date(pd.Timestamp('2015-07-09', tz='UTC'))
# Build a finder that is allowed to assign sids
finder = AssetFinder(metadata=metadata,
allow_sid_assignment=True)
# Verify that Assets were built and different sids were assigned
play = finder.lookup_symbol('PLAY', today)
msft = finder.lookup_symbol('MSFT', today)
self.assertEqual('PLAY', play.symbol)
self.assertIsNotNone(play.sid)
self.assertNotEqual(play.sid, msft.sid)
def test_sid_assignment_failure(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
# Build a finder that is not allowed to assign sids, asserting failure
with self.assertRaises(SidAssignmentError):
AssetFinder(metadata=metadata, allow_sid_assignment=False)
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_lookup_future_chain(self):
metadata = {
# Notice day is today, so not valid
2: {
'symbol': 'ADN15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-05-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
1: {
'symbol': 'ADV15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-08-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
# Starts trading today, so should be valid.
0: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-05-14', tz='UTC')
},
# Copy of the above future, but starts trading in August,
# so it isn't valid.
3: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-08-01', tz='UTC')
},
}
finder = AssetFinder(metadata=metadata)
dt = pd.Timestamp('2015-05-14', tz='UTC')
last_year = pd.Timestamp('2014-01-01', tz='UTC')
first_day = pd.Timestamp('2015-01-01', tz='UTC')
# Check that we get the expected number of contracts, in the
# right order
ad_contracts = finder.lookup_future_chain('AD', dt, dt)
self.assertEqual(len(ad_contracts), 2)
self.assertEqual(ad_contracts[0].sid, 1)
self.assertEqual(ad_contracts[1].sid, 0)
# Check that we get nothing if our knowledge date is last year
ad_contracts = finder.lookup_future_chain('AD', dt, last_year)
self.assertEqual(len(ad_contracts), 0)
# Check that we get things that start on the knowledge date
ad_contracts = finder.lookup_future_chain('AD', dt, first_day)
self.assertEqual(len(ad_contracts), 1)
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = AssetFinder()
asset1 = Equity(1, symbol="AAPL")
asset2 = Equity(2, symbol="GOOG")
asset200 = Future(200, symbol="CLK15")
asset201 = Future(201, symbol="CLM15")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
class TestFutureChain(TestCase):
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
asset_finder = AssetFinder(metadata=metadata)
def test_len(self):
""" Test the __len__ method of FutureChain.
"""
# None of the contracts have started yet.
cl = FutureChain(self.asset_finder, lambda: '2005-11-30', 'CL')
self.assertEqual(len(cl), 0)
# Sids 0, 1, & 2 have started, 3 has not yet started.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is still valid the day before its notice date.
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is now invalid, leaving only Sids 1 & 2 valid.
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(len(cl), 2)
# Sid 3 has started, so 1, 2, & 3 are now valid.
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(len(cl), 3)
# All contracts are no longer valid.
cl = FutureChain(self.asset_finder, lambda: '2006-09-20', 'CL')
self.assertEqual(len(cl), 0)
def test_getitem(self):
""" Test the __getitem__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl[0], 0)
self.assertEqual(cl[1], 1)
self.assertEqual(cl[2], 2)
with self.assertRaises(IndexError):
cl[3]
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(cl[0], 0)
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(cl[0], 1)
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(cl[-1], 3)
def test_root_symbols(self):
""" Test that different variations on root symbols are handled
as expected.
"""
# Make sure this successfully gets the chain for CL.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl.root_symbol, 'CL')
# These root symbols don't exist, so RootSymbolNotFound should
# be raised immediately.
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', 'CLZ')
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', '')
def test_repr(self):
""" Test the __repr__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
cl_feb = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL',
as_of_date='2006-02-01')
# The default chain should not include the as of date.
self.assertEqual(repr(cl), "FutureChain(root_symbol='CL')")
# An explicit as of date should show up in the repr.
self.assertEqual(
repr(cl_feb),
("FutureChain(root_symbol='CL', "
"as_of_date='2006-02-01 00:00:00+00:00')")
)
def test_as_of(self):
""" Test the as_of method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that the as_of_date is set correctly to the future
feb = '2006-02-01'
cl_feb = cl.as_of(feb)
self.assertEqual(
cl_feb.as_of_date,
pd.Timestamp(feb, tz='UTC')
)
# Test that the as_of_date is set correctly to the past, with
# args of str, datetime.datetime, and pd.Timestamp.
feb_prev = '2005-02-01'
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = datetime(year=2005, month=2, day=1)
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp('2005-02-01')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
# The chain as of the current dt should always be the same as
# the defualt chain. Tests date as str, pd.Timestamp, and
# datetime.datetime.
self.assertEqual(cl[0], cl.as_of('2005-12-01')[0])
self.assertEqual(cl[0], cl.as_of(pd.Timestamp('2005-12-01'))[0])
self.assertEqual(
cl[0],
cl.as_of(datetime(year=2005, month=12, day=1))[0]
)
def test_offset(self):
""" Test the offset method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that an offset forward sets as_of_date as expected
self.assertEqual(
cl.offset('3 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=3)
)
# Test that an offset backward sets as_of_date as expected, with
# time delta given as str, datetime.timedelta, and pd.Timedelta.
self.assertEqual(
cl.offset('-1000 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(timedelta(days=-1000)).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(pd.Timedelta('-1000 days')).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
# An offset of zero should give the original chain.
self.assertEqual(cl[0], cl.offset(0)[0])
self.assertEqual(cl[0], cl.offset("0 days")[0])
# A string that doesn't represent a time delta should raise a
# ValueError.
with self.assertRaises(ValueError):
cl.offset("blah")
| apache-2.0 |
roshantha9/AbstractManycoreSim | src/analyse_results/AnalyseResults_HEVCWorkload_CCR.py | 1 | 22437 | import sys, os, csv, pprint, math
#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from collections import OrderedDict
import numpy as np
import traceback
import re
#import pylab
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
#plt.style.use('bmh_rosh')
#import seaborn as sns
import seaborn.apionly as sns
import scipy.stats
import json
from matplotlib import colors
import matplotlib.cm as cm
import seaborn.apionly as sns
#sns.set_color_codes()
plt.style.use('bmh_rosh')
#from SimParams import SimParams
from util_scripts.resolution_combos import generate_resolution_combos
from libNoCModel.NoCFlow import NoCFlow
#EXP_DATADIR = "../experiment_data/workload_validation_fast/"
EXP_DATADIR = "Z:/MCASim/experiment_data/workload_validation_fast_cucc_scale_06/"
#EXP_DATADIR = "Z:/MCASim/experiment_data/workload_validation_fast_cucc_scale_06_01/"
RANDOM_SEEDS = [81665, 33749, 43894, 26358, 80505, 83660, 22817, 70263, 29917, 26044]
#RANDOM_SEEDS = [81665, 33749, 43894, 26358, 80505]
NOC_W = 8
NOC_H = 8
NOC_PERIOD = 0.00000001
NOC_ARBITRATION_COST = 7.0 * NOC_PERIOD
NUM_GOPS = 8
GOP_LEN = 31
NBMAX = 4
MOV_TYPE = "ANIM"
#MOV_TYPE = "DOC"
VID_RESOLUTION_LIST = [(3840,2160),(2560,1440),
(1920,1080),(1280,720),
(854,480),(640,360),
(512,288),
]
# VID_RESOLUTION_LIST = [(1280,720),
# ]
VID_RESOLUTION_LIST.reverse()
MOV_LIST = [ 'ACTION', 'DOC', 'SPORT', 'SPEECH', 'ANIM' ]
GOP_LEN_LIST = [16, 31]
def plot_Resolution_vs_CCR():
all_realccr_per_gop = {} # keys are resolutions
all_wc_ccr_per_vid = {} # keys are resolutions
res_str_to_pixels_mapping = {}
# get data per video res
for rix, each_vid_res in enumerate(VID_RESOLUTION_LIST):
res_key_int = each_vid_res[0] * each_vid_res[1] # num pixels
res_key_str = str(each_vid_res[0]) + "x" + str(each_vid_res[1])
res_str_to_pixels_mapping[res_key_str] = (res_key_int, rix+1)
# data structs
all_realccr_per_gop[res_key_str] = []
Icc_list = []
Pcc_list = []
Bcc_list = []
# get data per seed
for each_seed in RANDOM_SEEDS:
res_str = str(each_vid_res[0]) + "x" + str(each_vid_res[1])
fname_prefix = "__s%d_G%d_N%d_B%d_%s" % (each_seed, NUM_GOPS, GOP_LEN, NBMAX, MOV_TYPE)
fname = EXP_DATADIR + "vidworkload_data_res" + res_str + "_" + fname_prefix + ".js"
json_data=open(fname)
file_data = json.load(json_data)
# record real-CCRs per gop
real_ccrs = [g['real_ccr'] for g in file_data['gop_info']]
all_realccr_per_gop[res_key_str].extend(real_ccrs)
Icc_list.append(np.max(file_data['fr_cc_I']))
Pcc_list.append(np.max(file_data['fr_cc_P']))
Bcc_list.append(np.max(file_data['fr_cc_B']))
Iwcc = np.max(Icc_list)
Pwcc = np.max(Pcc_list)
Bwcc = np.max(Bcc_list)
max_flw_payload = res_key_int * 3
#sys.exit()
print each_vid_res
vid_min_ccr, vid_max_ccr = _get_minmax_ccr(Iwcc, Pwcc, Bwcc, GOP_LEN, max_flw_payload)
vid_avg_ccr = np.mean([vid_min_ccr, vid_max_ccr])
all_wc_ccr_per_vid[res_key_str] = [vid_min_ccr, vid_max_ccr, vid_avg_ccr]
#print Iwcc, Pwcc, Bwcc , max_flw_payload
#print vid_min_ccr, vid_max_ccr
#sys.exit()
## now we plot
fig = plt.figure(figsize=(11,4))
fig.canvas.set_window_title('Scatter - resolution vs. ccr (per gop)')
print VID_RESOLUTION_LIST
xaxis_ticks_str = [str(s[0]) + "x" + str(s[1]) for s in VID_RESOLUTION_LIST]
plt.xticks(range(1, len(xaxis_ticks_str)+1), xaxis_ticks_str)
plt.xlim([0.5, len(xaxis_ticks_str)+0.5])
# worst-case CCR #
y_data_min_max = []
x_data_min_max = []
y_data_avg = []
x_data_avg = []
for each_res_k, each_res_ccr_data in all_wc_ccr_per_vid.iteritems():
x_data_min_max.extend( [res_str_to_pixels_mapping[each_res_k][1]] * 2)
x_data_avg.extend([res_str_to_pixels_mapping[each_res_k][1]] * 1)
y_data_min_max.extend([each_res_ccr_data[0], each_res_ccr_data[1]])
y_data_avg.extend([each_res_ccr_data[2]])
plt.plot(x_data_min_max, y_data_min_max, marker='o', alpha=0.8, color='r', linestyle='', label='$CCR(J_i)$ (calculated lower/upper bound)')
#plt.plot(x_data_avg, y_data_avg, marker='^', alpha=0.8, color='y', linestyle='', label='CCR (avg. of lower/upper bound)', zorder=10, markersize=10)
plt.hold(True)
# real-CCR #
y_data = []
x_data = []
for each_res_k, each_res_ccr_data in all_realccr_per_gop.iteritems():
x_data.extend( [res_str_to_pixels_mapping[each_res_k][1]] * len(each_res_ccr_data))
#x_data.extend([each_res_k] * len(each_res_ccr_data))
y_data.extend(each_res_ccr_data)
#plt.scatter(x_data, y_data, marker='x', alpha=0.8, color='b')
plt.plot(x_data, y_data, marker='x', alpha=0.8, color='b', linestyle='', label='$aCCR(J_i)$ (synthetic workload)')
plt.xlabel("Video stream resolution")
plt.ylabel("Job CCR")
leg = plt.legend(numpoints=1)
leg.draggable()
plt.grid(True)
def plot_Resolution_vs_CompCost():
I_cc_dist = {}
P_cc_dist = {}
B_cc_dist = {}
res_str_to_pixels_mapping = {}
res_list_order = []
for rix, each_vid_res in enumerate(VID_RESOLUTION_LIST):
res_key_int = each_vid_res[0] * each_vid_res[1] # num pixels
res_key_str = str(each_vid_res[0]) + "x" + str(each_vid_res[1])
res_list_order.append(res_key_str)
res_str_to_pixels_mapping[res_key_str] = (res_key_int, rix+1)
I_cc_dist[res_key_str] = []
P_cc_dist[res_key_str] = []
B_cc_dist[res_key_str] = []
for each_seed in RANDOM_SEEDS:
res_str = str(each_vid_res[0]) + "x" + str(each_vid_res[1])
fname_prefix = "__s%d_G%d_N%d_B%d_%s" % (each_seed, NUM_GOPS, GOP_LEN, NBMAX, MOV_TYPE)
fname = EXP_DATADIR + "vidworkload_data_res" + res_str + "_" + fname_prefix + ".js"
json_data=open(fname)
file_data = json.load(json_data)
I_cc_dist[res_key_str].extend(file_data['fr_cc_I'])
P_cc_dist[res_key_str].extend(file_data['fr_cc_P'])
B_cc_dist[res_key_str].extend(file_data['fr_cc_B'])
## plotting - comp cost ##
fig1 = plt.figure(figsize=(11,4))
fig1.canvas.set_window_title('boxplot - resolution vs. comp.cost')
xaxis_ticks_str = [str(s[0]) + "x" + str(s[1]) for s in VID_RESOLUTION_LIST]
plt.xticks(range(1, len(xaxis_ticks_str)+1), xaxis_ticks_str)
plt.xlim([0.5, len(xaxis_ticks_str)+0.5])
bp_data = [I_cc_dist[s] for s in res_list_order]
plt.boxplot(bp_data, positions=range(1, len(xaxis_ticks_str)+1))
plt.xlabel("Video stream resolution")
plt.ylabel("I-frame computation cost (s)")
## plotting - max.payload ##
fig2 = plt.figure(figsize=(11,4))
fig2.canvas.set_window_title('boxplot - plot_Resolution_vs_CompCost')
data = [vid_res[0]*vid_res[1]*3 for vid_res in VID_RESOLUTION_LIST] # 3 bytes per pixel
xaxis_ticks_str = [str(s[0]) + "x" + str(s[1]) for s in VID_RESOLUTION_LIST]
plt.xticks(range(1, len(xaxis_ticks_str)+1), xaxis_ticks_str)
plt.xlim([0.5, len(xaxis_ticks_str)+0.5])
plt.plot(range(1, len(xaxis_ticks_str)+1), data, marker='d')
plt.xlabel("Video stream resolution")
plt.ylabel("Max. Reference frame payload")
def plot_CC_Dist_single_res():
I_cc_dist = {}
P_cc_dist = {}
B_cc_dist = {}
res_str_to_pixels_mapping = {}
res_list_order = []
res_key = None
for rix, each_vid_res in enumerate(VID_RESOLUTION_LIST):
res_key_int = each_vid_res[0] * each_vid_res[1] # num pixels
res_key_str = str(each_vid_res[0]) + "x" + str(each_vid_res[1])
res_list_order.append(res_key_str)
res_str_to_pixels_mapping[res_key_str] = (res_key_int, rix+1)
res_key = res_key_str
I_cc_dist[res_key_str] = []
P_cc_dist[res_key_str] = []
B_cc_dist[res_key_str] = []
for each_seed in RANDOM_SEEDS:
res_str = str(each_vid_res[0]) + "x" + str(each_vid_res[1])
fname_prefix = "__s%d_G%d_N%d_B%d_%s" % (each_seed, NUM_GOPS, GOP_LEN, NBMAX, MOV_TYPE)
fname = EXP_DATADIR + "vidworkload_data_res" + res_str + "_" + fname_prefix + ".js"
json_data=open(fname)
file_data = json.load(json_data)
I_cc_dist[res_key_str].extend(file_data['fr_cc_I'])
P_cc_dist[res_key_str].extend(file_data['fr_cc_P'])
B_cc_dist[res_key_str].extend(file_data['fr_cc_B'])
## plotting - comp cost ##
fig1 = plt.figure(figsize=(11,4))
fig1.canvas.set_window_title('boxplot - resolution vs. comp.cost')
bp_data_I = I_cc_dist[res_key]
bp_data_P = P_cc_dist[res_key]
bp_data_B = B_cc_dist[res_key]
#plt.boxplot(bp_data, positions=range(1, len(xaxis_ticks_str)+1))
plt.hist(bp_data_I, color='r', histtype='step', normed=1)
plt.hold(True)
plt.hist(bp_data_P, color='g', histtype='step', normed=1)
plt.hold(True)
plt.hist(bp_data_B, color='b', histtype='step', normed=1)
plt.hold(True)
plt.xlabel("comp_cost (s)")
plt.ylabel("freq")
def plot_CCR_boxplots_various_range_of_workloads():
all_realccr_per_gop = {} # keys are resolutions
all_wc_ccr_per_vid = {} # keys are resolutions
res_str_to_pixels_mapping = {}
res_list = []
all_vid_info = OrderedDict()
x_ticks_labels = []
# get data per video res
for each_vid_res in VID_RESOLUTION_LIST:
res_key_int = each_vid_res[0] * each_vid_res[1] # num pixels
res_key_str = str(each_vid_res[0]) + "x" + str(each_vid_res[1])
res_list.append(res_key_str)
all_vid_info[res_key_str] = OrderedDict()
for each_gop_len in GOP_LEN_LIST:
for each_mov_type in MOV_LIST:
vid_key = "%s_N%d" % (each_mov_type, each_gop_len)
for each_seed in RANDOM_SEEDS:
res_str_to_pixels_mapping[res_key_str] = res_key_int
fname_prefix = "__s%d_G%d_N%d_B%d_%s" % (each_seed, NUM_GOPS, each_gop_len, NBMAX, each_mov_type)
fname = EXP_DATADIR + "vidworkload_data_res" + res_key_str + "_" + fname_prefix + ".js"
print fname
json_data=open(fname)
file_data = json.load(json_data)
# specific data from file - cc's and realccrs
real_ccrs = [g['real_ccr'] for g in file_data['gop_info']]
ref_data_payloads = file_data['fr_ref_data_payloads']
runtime_ccrs = [
_get_runtime_ccr(file_data['wc_comp_cost_I'],
file_data['wc_comp_cost_P'],
file_data['wc_comp_cost_B'],
each_vid_res[0] * each_vid_res[1] * 3,
g['num_edges'],
g['gop_sequence']
)
for g in file_data['gop_info']
]
if vid_key not in all_vid_info[res_key_str]:
all_vid_info[res_key_str][vid_key] = {
'real_ccr': real_ccrs,
'runtime_ccr': runtime_ccrs,
'Icc': file_data['fr_cc_I'],
'Pcc': file_data['fr_cc_P'],
'Bcc': file_data['fr_cc_B'],
'num_edges': [g['num_edges'] for g in file_data["gop_info"]],
'numB': [g['numB'] for g in file_data["gop_info"]],
'numP': [g['numP'] for g in file_data["gop_info"]],
'lb_ccr': None,
'ub_ccr': None,
'avg_ccr' : None,
'fr_ref_data_payloads': ref_data_payloads
}
else:
all_vid_info[res_key_str][vid_key]['real_ccr'].extend(real_ccrs)
all_vid_info[res_key_str][vid_key]['runtime_ccr'].extend(runtime_ccrs)
all_vid_info[res_key_str][vid_key]['Icc'].extend(file_data['fr_cc_I'])
all_vid_info[res_key_str][vid_key]['Pcc'].extend(file_data['fr_cc_P'])
all_vid_info[res_key_str][vid_key]['Bcc'].extend(file_data['fr_cc_B'])
all_vid_info[res_key_str][vid_key]['num_edges'].extend([g['num_edges'] for g in file_data["gop_info"]])
all_vid_info[res_key_str][vid_key]['numB'].extend([g['numB'] for g in file_data["gop_info"]])
all_vid_info[res_key_str][vid_key]['numP'].extend([g['numP'] for g in file_data["gop_info"]])
all_vid_info[res_key_str][vid_key]['fr_ref_data_payloads'].extend(ref_data_payloads)
# calculate and populate min/max ccrs
#res_key_str = str(each_vid_res[0]) + "x" + str(each_vid_res[1])
#vid_key = "%s_G%d_N%d_B%d_%s" % (res_key_str, NUM_GOPS, each_gop_len, NBMAX, each_mov_type)
Iwcc = np.max(all_vid_info[res_key_str][vid_key]['Icc'])
Pwcc = np.max(all_vid_info[res_key_str][vid_key]['Pcc'])
Bwcc = np.max(all_vid_info[res_key_str][vid_key]['Bcc'])
Ibcc = np.min(all_vid_info[res_key_str][vid_key]['Icc'])
Pbcc = np.min(all_vid_info[res_key_str][vid_key]['Pcc'])
Bbcc = np.min(all_vid_info[res_key_str][vid_key]['Bcc'])
max_flw_payload = each_vid_res[0] * each_vid_res[1] * 3
vid_min_ccr, vid_max_ccr = _get_minmax_ccr(Iwcc, Pwcc, Bwcc,
Ibcc,Pbcc, Bbcc,
each_gop_len, max_flw_payload)
vid_avg_ccr = np.mean([vid_min_ccr, vid_max_ccr])
all_vid_info[res_key_str][vid_key]['lb_ccr'] = vid_min_ccr
all_vid_info[res_key_str][vid_key]['ub_ccr'] = vid_max_ccr
all_vid_info[res_key_str][vid_key]['avg_ccr'] = vid_avg_ccr
# x_ticks_labels.append(
# "%s_N%d_%s" % (res_key_str, each_gop_len, each_mov_type)
# )
## now we plot
fig, axes = plt.subplots(ncols=len(res_list), sharey=True)
fig.subplots_adjust(wspace=0)
#plt.xticks(ind, x_ticks_labels)
i=0
for ax, res in zip(axes, res_list):
all_k = [k for k in all_vid_info[res].keys() if "N16" not in k]
#all_k = [k for k in all_vid_info[res].keys()]
bp_data = [all_vid_info[res][k]['Bcc'] for k in all_k if "N16" not in k]
#bp_data = [all_vid_info[res][k]['runtime_ccr'] for k in all_k]
data_lb_ccr = [all_vid_info[res][k]['lb_ccr'] for k in all_k]
data_ub_ccr = [all_vid_info[res][k]['ub_ccr'] for k in all_k]
data_avgb_ccr = [ (all_vid_info[res][k]['ub_ccr']+all_vid_info[res][k]['lb_ccr'])/2.0 for k in all_k]
ind = range(1, len(bp_data)+1)
bps = ax.boxplot(bp_data, positions = ind, sym='x', showmeans=False, patch_artist=True)
_stylize_boxplots(bps)
#ax.set(xticklabels=all_k, xlabel=res)
ax.set_xticks(ind)
ax.set_xticklabels(all_k, fontsize=10, rotation=40, ha='right')
ax.set_xlabel(res)
#ax.tick_params(axis='both', which='major', labelsize=9)
ax.margins(0.05) # Optional
# plot min/max ccr
# ax.hold(True)
# ax.plot(ind, data_lb_ccr, color='g', marker='d', linewidth=0.5, markersize=6)
# ax.hold(True)
# ax.plot(ind, data_ub_ccr, color='g', marker='d', linewidth=0.5, markersize=6, label="$CCR(J_i)$ (calculated lower/upper bound)")
#ax.hold(True)
#ax.plot(ind, data_avgb_ccr, color='g', marker='d', linewidth=0.5, markersize=5)
ax.grid(True, axis='y')
ax.grid(False, axis='x')
if i!=0:
ax.yaxis.tick_left()
ax.yaxis.set_ticks_position('none')
else:
ax.yaxis.set_ticks_position('left')
# top axis ticks
#ax2 = ax.twin()
#ax2.set_xticks(ind)
#plt.xlabel("Synthetic video profile")
i+=1
#axes[0].set_ylabel("Job CCR")
axes[0].set_ylabel("Number of edges in a GoP")
#plt.xticks(ind, x_ticks_labels, rotation=40, ha='right', fontsize=9)
#p1 = plt.Line2D((0,1),(0,0), linestyle='-', color='y', marker='d')
#l = fig.legend(p1, "$CCR(J_i)$ (calculated lower/upper bound)", loc='top center', ncol=1, numpoints=2,labelspacing=1)
#l = plt.legend()
#l.draggable()
#l.get_frame().set_facecolor('#FFFFFF')
#l.get_frame().set_linewidth(0.0)
# v_l_locs = range(1,len(x_ticks_labels)+1, 10)[1:]
# for i in v_l_locs:
# plt.axvline(x=i, linewidth=0.5, color='k', alpha=0.6)
# plt.hold(True)
###################################
# HELPERS
###################################
def _get_runtime_ccr(Iwcc, Pwcc, Bwcc, max_payload, num_edges, gop_seq):
total_node_cost = 0.0
for ft_type in gop_seq:
if ft_type == "I": total_node_cost+= Iwcc
elif ft_type == "P": total_node_cost+= Pwcc
elif ft_type == "B": total_node_cost+= Bwcc
else:
sys.exit("Error - _get_runtime_ccr")
nhops = (NOC_H-1) + (NOC_W-1)
max_bl = NoCFlow.getCommunicationCost(max_payload, nhops, NOC_PERIOD, NOC_ARBITRATION_COST)
total_edges_cost = max_bl * float(num_edges)
runtime_ccr = float(total_edges_cost)/float(total_node_cost)
return runtime_ccr
def _get_minmax_ccr(Iwcc, Pwcc, Bwcc, Ibcc, Pbcc, Bbcc,
gop_len, max_payload):
nPmax = gop_len -1
nPmin = int(float(gop_len-1)/float(NBMAX + 1))
nBmax = (gop_len - 1) - nPmin
nBmin = 0
nhops = (NOC_H-1) + (NOC_W-1)
#nhops = (NOC_H) + (NOC_W)
max_bl = NoCFlow.getCommunicationCost(max_payload*0.45, nhops, NOC_PERIOD, NOC_ARBITRATION_COST)
# print "Iwcc, %.9f" % Iwcc
# print "Pwcc, %.9f" % Pwcc
# print "Bwcc, %.9f" % Bwcc
print "max_bl: ", max_bl
## calculate upper-bound CCR ##
# how many num P's and B's do we consider ?
nP = nPmin
nB = nBmax
# upper ccr occurs when there are as many as possible edges, when there are max amount of B-frames
# B frames can have max 3 incoming edges
# P frames can have only 1 incoming edge
# and have to take best-case ccs
num_edges = (nP * 1) + (nB * 3)
print "w.c. num_edges:", num_edges
wc_comm_cost = num_edges * max_bl
bc_comp_cost = Ibcc + (nP * Pbcc) + (nB * Bbcc)
ub_ccr = float(wc_comm_cost)/float(bc_comp_cost)
## calculate best-case CCR ##
# how many num P's and B's do we consider ?
nP = nPmax
nB = nBmin
# bc ccr occurs when there are as less as possible edges, when there are min amount of B-frames
# B frames can have max 3 incoming edges
# P frames can have only 1 incoming edge
num_edges = (nP * 1) + (nB * 2)
print "b.c. num_edges:", num_edges
bl = max_bl
#print bl
bc_comm_cost = float(num_edges) * bl
wc_comp_cost = Iwcc + (nP * Pwcc) + (nB * Bwcc)
#print bc_comm_cost, bc_comp_cost
lb_ccr = float(bc_comm_cost)/float(wc_comp_cost)
return (lb_ccr, ub_ccr)
def _stylize_boxplots(bp, c_boxface='#348ABD', c_boxline='k',
c_cap='k', c_wh='k', c_fly='k'):
# stylise boxplots
for box, medians in zip(bp['boxes'], bp['medians']):
plt.setp(box, color='k', linewidth=1.00)
plt.setp(box, facecolor=c_boxface, alpha=0.5)
plt.setp(medians,linewidth=1.00, color=c_boxline)
for caps in bp['caps']:
plt.setp(caps, linewidth=1.00, color=c_cap)
for whiskers in bp['whiskers']:
plt.setp(whiskers, linewidth=1.00, color=c_wh, linestyle='-')
for fliers in bp['fliers']:
plt.setp(fliers, linewidth=1.00, color=c_fly)
###################################
# MAIN
###################################
#plot_Resolution_vs_CCR()
#plot_Resolution_vs_CompCost()
plot_CCR_boxplots_various_range_of_workloads()
#plot_CC_Dist_single_res()
print "finished"
plt.show()
| gpl-3.0 |
alexandrebarachant/mne-python | mne/io/base.py | 1 | 100287 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Martin Luessi <[email protected]>
# Denis Engemann <[email protected]>
# Teon Brooks <[email protected]>
# Marijn van Vliet <[email protected]>
#
# License: BSD (3-clause)
import copy
from copy import deepcopy
import os
import os.path as op
import numpy as np
from .constants import FIFF
from .pick import pick_types, channel_type, pick_channels, pick_info
from .pick import _pick_data_channels, _pick_data_or_ica
from .meas_info import write_meas_info, anonymize_info
from .proj import setup_proj, activate_proj, _proj_equal, ProjMixin
from ..channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from ..channels.montage import read_montage, _set_montage, Montage
from .compensator import set_current_comp, make_compensator
from .write import (start_file, end_file, start_block, end_block,
write_dau_pack16, write_float, write_double,
write_complex64, write_complex128, write_int,
write_id, write_string, write_name_list, _get_split_size)
from ..filter import (filter_data, notch_filter, resample, next_fast_len,
_resample_stim_channels)
from ..fixes import in1d
from ..parallel import parallel_func
from ..utils import (_check_fname, _check_pandas_installed, sizeof_fmt,
_check_pandas_index_arguments, _check_copy_dep,
check_fname, _get_stim_channel,
logger, verbose, _time_mask, warn, SizeMixin)
from ..viz import plot_raw, plot_raw_psd, plot_raw_psd_topo
from ..defaults import _handle_default
from ..externals.six import string_types
from ..event import find_events, concatenate_events
from ..annotations import _combine_annotations, _onset_to_seconds
class ToDataFrameMixin(object):
"""Class to add to_data_frame capabilities to certain classes."""
def _get_check_picks(self, picks, picks_check):
if picks is None:
picks = list(range(self.info['nchan']))
else:
if not in1d(picks, np.arange(len(picks_check))).all():
raise ValueError('At least one picked channel is not present '
'in this object instance.')
return picks
def to_data_frame(self, picks=None, index=None, scale_time=1e3,
scalings=None, copy=True, start=None, stop=None):
"""Export data in tabular structure as a pandas DataFrame.
Columns and indices will depend on the object being converted.
Generally this will include as much relevant information as
possible for the data type being converted. This makes it easy
to convert data for use in packages that utilize dataframes,
such as statsmodels or seaborn.
Parameters
----------
picks : array-like of int | None
If None only MEG and EEG channels are kept
otherwise the channels indices in picks are kept.
index : tuple of str | None
Column to be used as index for the data. Valid string options
are 'epoch', 'time' and 'condition'. If None, all three info
columns will be included in the table as categorial data.
scale_time : float
Scaling to be applied to time units.
scalings : dict | None
Scaling to be applied to the channels picked. If None, defaults to
``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)``.
copy : bool
If true, data will be copied. Else data may be modified in place.
start : int | None
If it is a Raw object, this defines a starting index for creating
the dataframe from a slice. The times will be interpolated from the
index and the sampling rate of the signal.
stop : int | None
If it is a Raw object, this defines a stop index for creating
the dataframe from a slice. The times will be interpolated from the
index and the sampling rate of the signal.
Returns
-------
df : instance of pandas.core.DataFrame
A dataframe suitable for usage with other
statistical/plotting/analysis packages. Column/Index values will
depend on the object type being converted, but should be
human-readable.
"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
from ..source_estimate import _BaseSourceEstimate
pd = _check_pandas_installed()
mindex = list()
# Treat SourceEstimates special because they don't have the same info
if isinstance(self, _BaseSourceEstimate):
if self.subject is None:
default_index = ['time']
else:
default_index = ['subject', 'time']
data = self.data.T
times = self.times
shape = data.shape
mindex.append(('subject', np.repeat(self.subject, shape[0])))
if isinstance(self.vertices, list):
# surface source estimates
col_names = [i for e in [
['{0} {1}'.format('LH' if ii < 1 else 'RH', vert)
for vert in vertno]
for ii, vertno in enumerate(self.vertices)]
for i in e]
else:
# volume source estimates
col_names = ['VOL {0}'.format(vert) for vert in self.vertices]
elif isinstance(self, (_BaseEpochs, _BaseRaw, Evoked)):
picks = self._get_check_picks(picks, self.ch_names)
if isinstance(self, _BaseEpochs):
default_index = ['condition', 'epoch', 'time']
data = self.get_data()[:, picks, :]
times = self.times
n_epochs, n_picks, n_times = data.shape
data = np.hstack(data).T # (time*epochs) x signals
# Multi-index creation
times = np.tile(times, n_epochs)
id_swapped = dict((v, k) for k, v in self.event_id.items())
names = [id_swapped[k] for k in self.events[:, 2]]
mindex.append(('condition', np.repeat(names, n_times)))
mindex.append(('epoch',
np.repeat(np.arange(n_epochs), n_times)))
col_names = [self.ch_names[k] for k in picks]
elif isinstance(self, (_BaseRaw, Evoked)):
default_index = ['time']
if isinstance(self, _BaseRaw):
data, times = self[picks, start:stop]
elif isinstance(self, Evoked):
data = self.data[picks, :]
times = self.times
n_picks, n_times = data.shape
data = data.T
col_names = [self.ch_names[k] for k in picks]
types = [channel_type(self.info, idx) for idx in picks]
n_channel_types = 0
ch_types_used = []
scalings = _handle_default('scalings', scalings)
for t in scalings.keys():
if t in types:
n_channel_types += 1
ch_types_used.append(t)
for t in ch_types_used:
scaling = scalings[t]
idx = [picks[i] for i in range(len(picks)) if types[i] == t]
if len(idx) > 0:
data[:, idx] *= scaling
else:
# In case some other object gets this mixin w/o an explicit check
raise NameError('Object must be one of Raw, Epochs, Evoked, or ' +
'SourceEstimate. This is {0}'.format(type(self)))
# Make sure that the time index is scaled correctly
times = np.round(times * scale_time)
mindex.append(('time', times))
if index is not None:
_check_pandas_index_arguments(index, default_index)
else:
index = default_index
if copy is True:
data = data.copy()
assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
df = pd.DataFrame(data, columns=col_names)
for i, (k, v) in enumerate(mindex):
df.insert(i, k, v)
if index is not None:
if 'time' in index:
logger.info('Converting time column to int64...')
df['time'] = df['time'].astype(np.int64)
df.set_index(index, inplace=True)
if all(i in default_index for i in index):
df.columns.name = 'signal'
return df
class TimeMixin(object):
"""Class to add sfreq and time_as_index capabilities to certain classes."""
def time_as_index(self, times, use_rounding=False):
"""Convert time to indices
Parameters
----------
times : list-like | float | int
List of numbers or a number representing points in time.
use_rounding : boolean
If True, use rounding (instead of truncation) when converting
times to indices. This can help avoid non-unique indices.
Returns
-------
index : ndarray
Indices corresponding to the times supplied.
"""
from ..source_estimate import _BaseSourceEstimate
if isinstance(self, _BaseSourceEstimate):
sfreq = 1. / self.tstep
else:
sfreq = self.info['sfreq']
index = (np.atleast_1d(times) - self.times[0]) * sfreq
if use_rounding:
index = np.round(index)
return index.astype(int)
def _check_fun(fun, d, *args, **kwargs):
want_shape = d.shape
d = fun(d, *args, **kwargs)
if not isinstance(d, np.ndarray):
raise TypeError('Return value must be an ndarray')
if d.shape != want_shape:
raise ValueError('Return data must have shape %s not %s'
% (want_shape, d.shape))
return d
class _BaseRaw(ProjMixin, ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin, ToDataFrameMixin,
TimeMixin, SizeMixin):
"""Base class for Raw data
Subclasses must provide the following methods:
* _read_segment_file(self, data, idx, fi, start, stop, cals, mult)
(only needed for types that support on-demand disk reads)
The `_BaseRaw._raw_extras` list can contain whatever data is necessary for
such on-demand reads. For `RawFIF` this means a list of variables formerly
known as ``_rawdirs``.
"""
@verbose
def __init__(self, info, preload=False,
first_samps=(0,), last_samps=None,
filenames=(None,), raw_extras=(None,),
orig_format='double', dtype=np.float64, verbose=None):
# wait until the end to preload data, but triage here
if isinstance(preload, np.ndarray):
# some functions (e.g., filtering) only work w/64-bit data
if preload.dtype not in (np.float64, np.complex128):
raise RuntimeError('datatype must be float64 or complex128, '
'not %s' % preload.dtype)
if preload.dtype != dtype:
raise ValueError('preload and dtype must match')
self._data = preload
self.preload = True
assert len(first_samps) == 1
last_samps = [first_samps[0] + self._data.shape[1] - 1]
load_from_disk = False
else:
if last_samps is None:
raise ValueError('last_samps must be given unless preload is '
'an ndarray')
if preload is False:
self.preload = False
load_from_disk = False
elif preload is not True and not isinstance(preload, string_types):
raise ValueError('bad preload: %s' % preload)
else:
load_from_disk = True
self._last_samps = np.array(last_samps)
self._first_samps = np.array(first_samps)
info._check_consistency() # make sure subclass did a good job
self.info = info
if info.get('buffer_size_sec', None) is None:
raise RuntimeError('Reader error, notify mne-python developers')
cals = np.empty(info['nchan'])
for k in range(info['nchan']):
cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
self.verbose = verbose
self._cals = cals
self._raw_extras = list(raw_extras)
# deal with compensation (only relevant for CTF data, either CTF
# reader or MNE-C converted CTF->FIF files)
self._read_comp_grade = self.compensation_grade # read property
if self._read_comp_grade is not None:
logger.info('Current compensation grade : %d'
% self._read_comp_grade)
self._comp = None
self._filenames = list(filenames)
self.orig_format = orig_format
self._projectors = list()
self._projector = None
self._dtype_ = dtype
self.annotations = None
# If we have True or a string, actually do the preloading
self._update_times()
if load_from_disk:
self._preload_data(preload)
@verbose
def apply_gradient_compensation(self, grade, verbose=None):
"""Apply CTF gradient compensation
.. warning:: The compensation matrices are stored with single
precision, so repeatedly switching between different
of compensation (e.g., 0->1->3->2) can increase
numerical noise, especially if data are saved to
disk in between changing grades. It is thus best to
only use a single gradient compensation level in
final analyses.
Parameters
----------
grade : int
CTF gradient compensation level.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : instance of Raw
The modified Raw instance. Works in-place.
"""
grade = int(grade)
current_comp = self.compensation_grade
if current_comp != grade:
if self.proj:
raise RuntimeError('Cannot change compensation on data where '
'projectors have been applied')
# Figure out what operator to use (varies depending on preload)
from_comp = current_comp if self.preload else self._read_comp_grade
comp = make_compensator(self.info, from_comp, grade)
logger.info('Compensator constructed to change %d -> %d'
% (current_comp, grade))
set_current_comp(self.info, grade)
# We might need to apply it to our data now
if self.preload:
logger.info('Applying compensator to loaded data')
lims = np.concatenate([np.arange(0, len(self.times), 10000),
[len(self.times)]])
for start, stop in zip(lims[:-1], lims[1:]):
self._data[:, start:stop] = np.dot(
comp, self._data[:, start:stop])
else:
self._comp = comp # store it for later use
return self
@property
def _dtype(self):
"""dtype for loading data (property so subclasses can override)"""
# most classes only store real data, they won't need anything special
return self._dtype_
def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
projector=None, verbose=None):
"""Read a chunk of raw data
Parameters
----------
start : int, (optional)
first sample to include (first is 0). If omitted, defaults to the
first sample in data.
stop : int, (optional)
First sample to not include.
If omitted, data is included to the end.
sel : array, optional
Indices of channels to select.
data_buffer : array or str, optional
numpy array to fill with data read, must have the correct shape.
If str, a np.memmap with the correct data type will be used
to store the data.
projector : array
SSP operator to apply to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
"""
# Initial checks
start = int(start)
stop = self.n_times if stop is None else min([int(stop), self.n_times])
if start >= stop:
raise ValueError('No data in this range')
# Initialize the data and calibration vector
n_sel_channels = self.info['nchan'] if sel is None else len(sel)
# convert sel to a slice if possible for efficiency
if sel is not None and len(sel) > 1 and np.all(np.diff(sel) == 1):
sel = slice(sel[0], sel[-1] + 1)
idx = slice(None, None, None) if sel is None else sel
data_shape = (n_sel_channels, stop - start)
dtype = self._dtype
if isinstance(data_buffer, np.ndarray):
if data_buffer.shape != data_shape:
raise ValueError('data_buffer has incorrect shape: %s != %s'
% (data_buffer.shape, data_shape))
data = data_buffer
elif isinstance(data_buffer, string_types):
# use a memmap
data = np.memmap(data_buffer, mode='w+',
dtype=dtype, shape=data_shape)
else:
data = np.zeros(data_shape, dtype=dtype)
# deal with having multiple files accessed by the raw object
cumul_lens = np.concatenate(([0], np.array(self._raw_lengths,
dtype='int')))
cumul_lens = np.cumsum(cumul_lens)
files_used = np.logical_and(np.less(start, cumul_lens[1:]),
np.greater_equal(stop - 1,
cumul_lens[:-1]))
# set up cals and mult (cals, compensation, and projector)
cals = self._cals.ravel()[np.newaxis, :]
if self._comp is not None:
if projector is not None:
mult = self._comp * cals
mult = np.dot(projector[idx], mult)
else:
mult = self._comp[idx] * cals
elif projector is not None:
mult = projector[idx] * cals
else:
mult = None
cals = cals.T[idx]
# read from necessary files
offset = 0
for fi in np.nonzero(files_used)[0]:
start_file = self._first_samps[fi]
# first iteration (only) could start in the middle somewhere
if offset == 0:
start_file += start - cumul_lens[fi]
stop_file = np.min([stop - cumul_lens[fi] + self._first_samps[fi],
self._last_samps[fi] + 1])
if start_file < self._first_samps[fi] or stop_file < start_file:
raise ValueError('Bad array indexing, could be a bug')
n_read = stop_file - start_file
this_sl = slice(offset, offset + n_read)
self._read_segment_file(data[:, this_sl], idx, fi,
int(start_file), int(stop_file),
cals, mult)
offset += n_read
return data
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file
Only needs to be implemented for readers that support
``preload=False``.
Parameters
----------
data : ndarray, shape (len(idx), stop - start + 1)
The data array. Should be modified inplace.
idx : ndarray | slice
The requested channel indices.
fi : int
The file index that must be read from.
start : int
The start sample in the given file.
stop : int
The stop sample in the given file (inclusive).
cals : ndarray, shape (len(idx), 1)
Channel calibrations (already sub-indexed).
mult : ndarray, shape (len(idx), len(info['chs']) | None
The compensation + projection + cals matrix, if applicable.
"""
raise NotImplementedError
def _check_bad_segment(self, start, stop, picks,
reject_by_annotation=False):
"""Function for checking if data segment is bad.
If the slice is good, returns the data in desired range.
If rejected based on annotation, returns description of the
bad segment as a string.
Parameters
----------
start : int
First sample of the slice.
stop : int
End of the slice.
picks : array of int
Channel picks.
reject_by_annotation : bool
Whether to perform rejection based on annotations.
False by default.
Returns
-------
data : array | str
Data in the desired range (good segment) or description of the bad
segment.
"""
if start < 0:
return None
if reject_by_annotation and self.annotations is not None:
annot = self.annotations
sfreq = self.info['sfreq']
onset = _onset_to_seconds(self, annot.onset)
overlaps = np.where(onset < stop / sfreq)
overlaps = np.where(onset[overlaps] + annot.duration[overlaps] >
start / sfreq)
for descr in annot.description[overlaps]:
if descr.lower().startswith('bad'):
return descr
return self[picks, start:stop][0]
@verbose
def load_data(self, verbose=None):
"""Load raw data
Parameters
----------
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : instance of Raw
The raw object with data.
Notes
-----
This function will load raw data if it was not already preloaded.
If data were already preloaded, it will do nothing.
.. versionadded:: 0.10.0
"""
if not self.preload:
self._preload_data(True)
return self
@verbose
def _preload_data(self, preload, verbose=None):
"""This function actually preloads the data"""
data_buffer = preload if isinstance(preload, (string_types,
np.ndarray)) else None
logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' %
(0, len(self.times) - 1, 0., self.times[-1]))
self._data = self._read_segment(data_buffer=data_buffer)
assert len(self._data) == self.info['nchan']
self.preload = True
self._comp = None # no longer needed
self.close()
def _update_times(self):
"""Helper to update times"""
self._times = np.arange(self.n_times) / float(self.info['sfreq'])
# make it immutable
self._times.flags.writeable = False
@property
def first_samp(self):
return self._first_samps[0]
@property
def last_samp(self):
return self.first_samp + sum(self._raw_lengths) - 1
@property
def _raw_lengths(self):
return [l - f + 1 for f, l in zip(self._first_samps, self._last_samps)]
def __del__(self):
# remove file for memmap
if hasattr(self, '_data') and hasattr(self._data, 'filename'):
# First, close the file out; happens automatically on del
filename = self._data.filename
del self._data
# Now file can be removed
try:
os.remove(filename)
except OSError:
pass # ignore file that no longer exists
def __enter__(self):
""" Entering with block """
return self
def __exit__(self, exception_type, exception_val, trace):
""" Exiting with block """
try:
self.close()
except:
return exception_type, exception_val, trace
def _parse_get_set_params(self, item):
# make sure item is a tuple
if not isinstance(item, tuple): # only channel selection passed
item = (item, slice(None, None, None))
if len(item) != 2: # should be channels and time instants
raise RuntimeError("Unable to access raw data (need both channels "
"and time)")
if isinstance(item[0], slice):
start = item[0].start if item[0].start is not None else 0
nchan = self.info['nchan']
if start < 0:
start += nchan
if start < 0:
raise ValueError('start must be >= -%s' % nchan)
stop = item[0].stop if item[0].stop is not None else nchan
step = item[0].step if item[0].step is not None else 1
sel = list(range(start, stop, step))
else:
sel = item[0]
if isinstance(item[1], slice):
time_slice = item[1]
start, stop, step = (time_slice.start, time_slice.stop,
time_slice.step)
else:
item1 = item[1]
# Let's do automated type conversion to integer here
if np.array(item[1]).dtype.kind == 'i':
item1 = int(item1)
if isinstance(item1, (int, np.integer)):
start, stop, step = item1, item1 + 1, 1
else:
raise ValueError('Must pass int or slice to __getitem__')
if start is None:
start = 0
if (step is not None) and (step is not 1):
raise ValueError('step needs to be 1 : %d given' % step)
if isinstance(sel, (int, np.integer)):
sel = np.array([sel])
if sel is not None and len(sel) == 0:
raise ValueError("Empty channel list")
return sel, start, stop
def __getitem__(self, item):
"""Get raw data and times
Parameters
----------
item : tuple or array-like
See below for use cases.
Returns
-------
data : ndarray, shape (n_channels, n_times)
The raw data.
times : ndarray, shape (n_times,)
The times associated with the data.
Examples
--------
Generally raw data is accessed as::
>>> data, times = raw[picks, time_slice] # doctest: +SKIP
To get all data, you can thus do either of::
>>> data, times = raw[:] # doctest: +SKIP
Which will be equivalent to:
>>> data, times = raw[:, :] # doctest: +SKIP
To get only the good MEG data from 10-20 seconds, you could do::
>>> picks = mne.pick_types(raw.info, meg=True, exclude='bads') # doctest: +SKIP
>>> t_idx = raw.time_as_index([10., 20.]) # doctest: +SKIP
>>> data, times = raw[picks, t_idx[0]:t_idx[1]] # doctest: +SKIP
""" # noqa
sel, start, stop = self._parse_get_set_params(item)
if self.preload:
data = self._data[sel, start:stop]
else:
data = self._read_segment(start=start, stop=stop, sel=sel,
projector=self._projector,
verbose=self.verbose)
times = self.times[start:stop]
return data, times
def __setitem__(self, item, value):
"""setting raw data content with python slicing"""
_check_preload(self, 'Modifying data of Raw')
sel, start, stop = self._parse_get_set_params(item)
# set the data
self._data[sel, start:stop] = value
def anonymize(self):
"""Anonymize data.
This function will remove 'subject_info', 'meas_date', 'file_id',
'meas_id' if they exist in ``raw.info``.
Returns
-------
raw : instance of Raw
The raw object. Operates in place.
"""
anonymize_info(self.info)
return self
@verbose
def apply_function(self, fun, picks, dtype, n_jobs, *args, **kwargs):
""" Apply a function to a subset of channels.
The function "fun" is applied to the channels defined in "picks". The
data of the Raw object is modified inplace. If the function returns
a different data type (e.g. numpy.complex) it must be specified using
the dtype parameter, which causes the data type used for representing
the raw data to change.
The Raw object has to have the data loaded e.g. with ``preload=True``
or ``self.load_data()``.
.. note:: If n_jobs > 1, more memory is required as
``len(picks) * n_times`` additional time points need to
be temporaily stored in memory.
.. note:: If the data type changes (dtype != None), more memory is
required since the original and the converted data needs
to be stored in memory.
Parameters
----------
fun : function
A function to be applied to the channels. The first argument of
fun has to be a timeseries (numpy.ndarray). The function must
return an numpy.ndarray with the same size as the input.
picks : array-like of int | None
Indices of channels to apply the function to. If None, all
M-EEG channels are used.
dtype : numpy.dtype
Data type to use for raw data after applying the function. If None
the data type is not modified.
n_jobs: int
Number of jobs to run in parallel.
*args :
Additional positional arguments to pass to fun (first pos. argument
of fun is the timeseries of a channel).
**kwargs :
Keyword arguments to pass to fun. Note that if "verbose" is passed
as a member of ``kwargs``, it will be consumed and will override
the default mne-python verbose level (see mne.verbose).
"""
_check_preload(self, 'raw.apply_function')
if picks is None:
picks = _pick_data_channels(self.info, exclude=[],
with_ref_meg=False)
if not callable(fun):
raise ValueError('fun needs to be a function')
data_in = self._data
if dtype is not None and dtype != self._data.dtype:
self._data = self._data.astype(dtype)
if n_jobs == 1:
# modify data inplace to save memory
for idx in picks:
self._data[idx, :] = _check_fun(fun, data_in[idx, :],
*args, **kwargs)
else:
# use parallel function
parallel, p_fun, _ = parallel_func(_check_fun, n_jobs)
data_picks_new = parallel(p_fun(fun, data_in[p], *args, **kwargs)
for p in picks)
for pp, p in enumerate(picks):
self._data[p, :] = data_picks_new[pp]
@verbose
def apply_hilbert(self, picks, envelope=False, n_jobs=1, n_fft='',
verbose=None):
""" Compute analytic signal or envelope for a subset of channels.
If envelope=False, the analytic signal for the channels defined in
"picks" is computed and the data of the Raw object is converted to
a complex representation (the analytic signal is complex valued).
If envelope=True, the absolute value of the analytic signal for the
channels defined in "picks" is computed, resulting in the envelope
signal.
.. warning: Do not use ``envelope=True`` if you intend to compute
an inverse solution from the raw data. If you want to
compute the envelope in source space, use
``envelope=False`` and compute the envelope after the
inverse solution has been obtained.
.. note:: If envelope=False, more memory is required since the
original raw data as well as the analytic signal have
temporarily to be stored in memory.
.. note:: If n_jobs > 1, more memory is required as
``len(picks) * n_times`` additional time points need to
be temporaily stored in memory.
Parameters
----------
picks : array-like of int
Indices of channels to apply the function to.
envelope : bool (default: False)
Compute the envelope signal of each channel.
n_jobs: int
Number of jobs to run in parallel.
n_fft : int | None | str
Points to use in the FFT for Hilbert transformation. The signal
will be padded with zeros before computing Hilbert, then cut back
to original length. If None, n == self.n_times. If 'auto',
the next highest fast FFT length will be use.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
The analytic signal "x_a(t)" of "x(t)" is::
x_a = F^{-1}(F(x) 2U) = x + i y
where "F" is the Fourier transform, "U" the unit step function,
and "y" the Hilbert transform of "x". One usage of the analytic
signal is the computation of the envelope signal, which is given by
"e(t) = abs(x_a(t))". Due to the linearity of Hilbert transform and the
MNE inverse solution, the enevlope in source space can be obtained
by computing the analytic signal in sensor space, applying the MNE
inverse, and computing the envelope in source space.
Also note that the n_fft parameter will allow you to pad the signal
with zeros before performing the Hilbert transform. This padding
is cut off, but it may result in a slightly different result
(particularly around the edges). Use at your own risk.
"""
if n_fft is None:
n_fft = len(self.times)
elif isinstance(n_fft, string_types):
if n_fft == '':
n_fft = len(self.times)
warn('n_fft is None by default in 0.13 but will change to '
'"auto" in 0.14', DeprecationWarning)
elif n_fft == 'auto':
n_fft = next_fast_len(len(self.times))
n_fft = int(n_fft)
if n_fft < self.n_times:
raise ValueError("n_fft must be greater than n_times")
if envelope is True:
self.apply_function(_my_hilbert, picks, None, n_jobs, n_fft,
envelope=envelope)
else:
self.apply_function(_my_hilbert, picks, np.complex64, n_jobs,
n_fft, envelope=envelope)
@verbose
def filter(self, l_freq, h_freq, picks=None, filter_length='',
l_trans_bandwidth=None, h_trans_bandwidth=None, n_jobs=1,
method='fir', iir_params=None, phase='', fir_window='',
verbose=None):
"""Filter a subset of channels.
Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
filter to the channels selected by ``picks``. By default the data
of the Raw object is modified inplace.
The Raw object has to have the data loaded e.g. with ``preload=True``
or ``self.load_data()``.
``l_freq`` and ``h_freq`` are the frequencies below which and above
which, respectively, to filter out of the data. Thus the uses are:
* ``l_freq < h_freq``: band-pass filter
* ``l_freq > h_freq``: band-stop filter
* ``l_freq is not None and h_freq is None``: high-pass filter
* ``l_freq is None and h_freq is not None``: low-pass filter
``self.info['lowpass']`` and ``self.info['highpass']`` are only
updated with picks=None.
.. note:: If n_jobs > 1, more memory is required as
``len(picks) * n_times`` additional time points need to
be temporaily stored in memory.
Parameters
----------
l_freq : float | None
Low cut-off frequency in Hz. If None the data are only low-passed.
h_freq : float | None
High cut-off frequency in Hz. If None the data are only
high-passed.
picks : array-like of int | None
Indices of channels to filter. If None only the data (MEG/EEG)
channels will be filtered.
filter_length : str | int
Length of the FIR filter to use (if applicable):
* int: specified length in samples.
* 'auto' (default in 0.14): the filter length is chosen based
on the size of the transition regions (6.6 times the
reciprocal of the shortest transition band for
fir_window='hamming').
* str: (default in 0.13 is "10s") a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be
converted to that number of samples if ``phase="zero"``, or
the shortest power-of-two length at least that duration for
``phase="zero-double"``.
l_trans_bandwidth : float | str
Width of the transition band at the low cut-off frequency in Hz
(high pass or cutoff 1 in bandpass). Can be "auto"
(default in 0.14) to use a multiple of ``l_freq``::
min(max(l_freq * 0.25, 2), l_freq)
Only used for ``method='fir'``.
h_trans_bandwidth : float | str
Width of the transition band at the high cut-off frequency in Hz
(low pass or cutoff 2 in bandpass). Can be "auto"
(default in 0.14) to use a multiple of ``h_freq``::
min(max(h_freq * 0.25, 2.), info['sfreq'] / 2. - h_freq)
Only used for ``method='fir'``.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fir'.
method : str
'fir' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt).
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
phase : str
Phase of the filter, only used if ``method='fir'``.
By default, a symmetric linear-phase FIR filter is constructed.
If ``phase='zero'`` (default in 0.14), the delay of this filter
is compensated for. If ``phase=='zero-double'`` (default in 0.13
and before), then this filter is applied twice, once forward, and
once backward.
.. versionadded:: 0.13
fir_window : str
The window to use in FIR design, can be "hamming" (default in
0.14), "hann" (default in 0.13), or "blackman".
.. versionadded:: 0.13
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
raw : instance of Raw
The raw instance with filtered data.
See Also
--------
mne.Epochs.savgol_filter
mne.io.Raw.notch_filter
mne.io.Raw.resample
mne.filter.filter_data
Notes
-----
For more information, see the tutorials :ref:`tut_background_filtering`
and :ref:`tut_artifacts_filter`.
"""
_check_preload(self, 'raw.filter')
data_picks = _pick_data_or_ica(self.info)
update_info = False
if picks is None:
picks = data_picks
update_info = True
# let's be safe.
if len(picks) == 0:
raise RuntimeError('Could not find any valid channels for '
'your Raw object. Please contact the '
'MNE-Python developers.')
elif h_freq is not None or l_freq is not None:
if in1d(data_picks, picks).all():
update_info = True
else:
logger.info('Filtering a subset of channels. The highpass and '
'lowpass values in the measurement info will not '
'be updated.')
filter_data(self._data, self.info['sfreq'], l_freq, h_freq, picks,
filter_length, l_trans_bandwidth, h_trans_bandwidth,
n_jobs, method, iir_params, copy=False, phase=phase,
fir_window=fir_window)
# update info if filter is applied to all data channels,
# and it's not a band-stop filter
if update_info:
if h_freq is not None and (l_freq is None or l_freq < h_freq) and \
(self.info["lowpass"] is None or
h_freq < self.info['lowpass']):
self.info['lowpass'] = float(h_freq)
if l_freq is not None and (h_freq is None or l_freq < h_freq) and \
(self.info["highpass"] is None or
l_freq > self.info['highpass']):
self.info['highpass'] = float(l_freq)
return self
@verbose
def notch_filter(self, freqs, picks=None, filter_length='',
notch_widths=None, trans_bandwidth=1.0, n_jobs=1,
method='fft', iir_params=None, mt_bandwidth=None,
p_value=0.05, phase='', fir_window='', verbose=None):
"""Notch filter a subset of channels.
Applies a zero-phase notch filter to the channels selected by
"picks". By default the data of the Raw object is modified inplace.
The Raw object has to have the data loaded e.g. with ``preload=True``
or ``self.load_data()``.
.. note:: If n_jobs > 1, more memory is required as
``len(picks) * n_times`` additional time points need to
be temporaily stored in memory.
Parameters
----------
freqs : float | array of float | None
Specific frequencies to filter out from data, e.g.,
np.arange(60, 241, 60) in the US or np.arange(50, 251, 50) in
Europe. None can only be used with the mode 'spectrum_fit',
where an F test is used to find sinusoidal components.
picks : array-like of int | None
Indices of channels to filter. If None only the data (MEG/EEG)
channels will be filtered.
filter_length : str | int
Length of the FIR filter to use (if applicable):
* int: specified length in samples.
* 'auto' (default in 0.14): the filter length is chosen based
on the size of the transition regions (6.6 times the
reciprocal of the shortest transition band for
fir_window='hamming').
* str: (default in 0.13 is "10s") a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be
converted to that number of samples if ``phase="zero"``, or
the shortest power-of-two length at least that duration for
``phase="zero-double"``.
notch_widths : float | array of float | None
Width of each stop band (centred at each freq in freqs) in Hz.
If None, freqs / 200 is used.
trans_bandwidth : float
Width of the transition band in Hz.
Only used for ``method='fir'``.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fir'.
method : str
'fir' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt). 'spectrum_fit' will
use multi-taper estimation of sinusoidal components.
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'spectrum_fit' mode.
p_value : float
p-value to use in F-test thresholding to determine significant
sinusoidal components to remove when method='spectrum_fit' and
freqs=None. Note that this will be Bonferroni corrected for the
number of frequencies, so large p-values may be justified.
phase : str
Phase of the filter, only used if ``method='fir'``.
By default, a symmetric linear-phase FIR filter is constructed.
If ``phase='zero'`` (default in 0.14), the delay of this filter
is compensated for. If ``phase=='zero-double'`` (default in 0.13
and before), then this filter is applied twice, once forward, and
once backward.
.. versionadded:: 0.13
fir_window : str
The window to use in FIR design, can be "hamming" (default in
0.14), "hann" (default in 0.13), or "blackman".
.. versionadded:: 0.13
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
raw : instance of Raw
The raw instance with filtered data.
See Also
--------
mne.io.Raw.filter
Notes
-----
For details, see :func:`mne.filter.notch_filter`.
"""
fs = float(self.info['sfreq'])
if picks is None:
picks = _pick_data_or_ica(self.info)
# let's be safe.
if len(picks) < 1:
raise RuntimeError('Could not find any valid channels for '
'your Raw object. Please contact the '
'MNE-Python developers.')
_check_preload(self, 'raw.notch_filter')
self._data = notch_filter(
self._data, fs, freqs, filter_length=filter_length,
notch_widths=notch_widths, trans_bandwidth=trans_bandwidth,
method=method, iir_params=iir_params, mt_bandwidth=mt_bandwidth,
p_value=p_value, picks=picks, n_jobs=n_jobs, copy=False,
phase=phase, fir_window=fir_window)
return self
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None,
n_jobs=1, events=None, copy=None, verbose=None):
"""Resample all channels.
The Raw object has to have the data loaded e.g. with ``preload=True``
or ``self.load_data()``.
.. warning:: The intended purpose of this function is primarily to
speed up computations (e.g., projection calculation) when
precise timing of events is not required, as downsampling
raw data effectively jitters trigger timings. It is
generally recommended not to epoch downsampled data,
but instead epoch and then downsample, as epoching
downsampled data jitters triggers.
For more, see
`this illustrative gist <https://gist.github.com/Eric89GXL/01642cb3789992fbca59>`_.
If resampling the continuous data is desired, it is
recommended to construct events using the original data.
The event onsets can be jointly resampled with the raw
data using the 'events' parameter.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : string or tuple
Frequency-domain window to use in resampling.
See :func:`scipy.signal.resample`.
stim_picks : array of int | None
Stim channels. These channels are simply subsampled or
supersampled (without applying any filtering). This reduces
resampling artifacts in stim channels, but may lead to missing
triggers. If None, stim channels are automatically chosen using
:func:`mne.pick_types`.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly and CUDA is initialized.
events : 2D array, shape (n_events, 3) | None
An optional event matrix. When specified, the onsets of the events
are resampled jointly with the data.
copy : bool
Whether to operate on a copy of the data (True) or modify data
in-place (False). Defaults to False.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
raw : instance of Raw
The resampled version of the raw object.
See Also
--------
mne.io.Raw.filter
mne.Epochs.resample
Notes
-----
For some data, it may be more accurate to use ``npad=0`` to reduce
artifacts. This is dataset dependent -- check your data!
""" # noqa
_check_preload(self, 'raw.resample')
inst = _check_copy_dep(self, copy)
# When no event object is supplied, some basic detection of dropped
# events is performed to generate a warning. Finding events can fail
# for a variety of reasons, e.g. if no stim channel is present or it is
# corrupted. This should not stop the resampling from working. The
# warning should simply not be generated in this case.
if events is None:
try:
original_events = find_events(inst)
except:
pass
sfreq = float(sfreq)
o_sfreq = float(inst.info['sfreq'])
offsets = np.concatenate(([0], np.cumsum(inst._raw_lengths)))
new_data = list()
ratio = sfreq / o_sfreq
# set up stim channel processing
if stim_picks is None:
stim_picks = pick_types(inst.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_picks = np.asanyarray(stim_picks)
for ri in range(len(inst._raw_lengths)):
data_chunk = inst._data[:, offsets[ri]:offsets[ri + 1]]
new_data.append(resample(data_chunk, sfreq, o_sfreq, npad,
window=window, n_jobs=n_jobs))
new_ntimes = new_data[ri].shape[1]
# In empirical testing, it was faster to resample all channels
# (above) and then replace the stim channels than it was to only
# resample the proper subset of channels and then use np.insert()
# to restore the stims.
if len(stim_picks) > 0:
stim_resampled = _resample_stim_channels(
data_chunk[stim_picks], new_data[ri].shape[1],
data_chunk.shape[1])
new_data[ri][stim_picks] = stim_resampled
inst._first_samps[ri] = int(inst._first_samps[ri] * ratio)
inst._last_samps[ri] = inst._first_samps[ri] + new_ntimes - 1
inst._raw_lengths[ri] = new_ntimes
inst._data = np.concatenate(new_data, axis=1)
inst.info['sfreq'] = sfreq
if inst.info.get('lowpass') is not None:
inst.info['lowpass'] = min(inst.info['lowpass'], sfreq / 2.)
inst._update_times()
# See the comment above why we ignore all errors here.
if events is None:
try:
# Did we loose events?
resampled_events = find_events(inst)
if len(resampled_events) != len(original_events):
warn('Resampling of the stim channels caused event '
'information to become unreliable. Consider finding '
'events on the original data and passing the event '
'matrix as a parameter.')
except:
pass
return inst
else:
if copy:
events = events.copy()
events[:, 0] = np.minimum(
np.round(events[:, 0] * ratio).astype(int),
inst._data.shape[1]
)
return inst, events
def crop(self, tmin=0.0, tmax=None, copy=None):
"""Crop raw data file.
Limit the data from the raw file to go between specific times. Note
that the new tmin is assumed to be t=0 for all subsequently called
functions (e.g., time_as_index, or Epochs). New first_samp and
last_samp are set accordingly.
Parameters
----------
tmin : float
New start time in seconds (must be >= 0).
tmax : float | None
New end time in seconds of the data (cannot exceed data duration).
copy : bool
This parameter has been deprecated and will be removed in 0.14.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
Returns
-------
raw : instance of Raw
The cropped raw object.
"""
raw = _check_copy_dep(self, copy)
max_time = (raw.n_times - 1) / raw.info['sfreq']
if tmax is None:
tmax = max_time
if tmin > tmax:
raise ValueError('tmin must be less than tmax')
if tmin < 0.0:
raise ValueError('tmin must be >= 0')
elif tmax > max_time:
raise ValueError('tmax must be less than or equal to the max raw '
'time (%0.4f sec)' % max_time)
smin, smax = np.where(_time_mask(self.times, tmin, tmax,
sfreq=self.info['sfreq']))[0][[0, -1]]
cumul_lens = np.concatenate(([0], np.array(raw._raw_lengths,
dtype='int')))
cumul_lens = np.cumsum(cumul_lens)
keepers = np.logical_and(np.less(smin, cumul_lens[1:]),
np.greater_equal(smax, cumul_lens[:-1]))
keepers = np.where(keepers)[0]
raw._first_samps = np.atleast_1d(raw._first_samps[keepers])
# Adjust first_samp of first used file!
raw._first_samps[0] += smin - cumul_lens[keepers[0]]
raw._last_samps = np.atleast_1d(raw._last_samps[keepers])
raw._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax
raw._raw_extras = [r for ri, r in enumerate(raw._raw_extras)
if ri in keepers]
raw._filenames = [r for ri, r in enumerate(raw._filenames)
if ri in keepers]
if raw.preload:
# slice and copy to avoid the reference to large array
raw._data = raw._data[:, smin:smax + 1].copy()
raw._update_times()
return raw
@verbose
def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=10,
drop_small_buffer=False, proj=False, fmt='single',
overwrite=False, split_size='2GB', verbose=None):
"""Save raw data to file
Parameters
----------
fname : string
File name of the new dataset. This has to be a new filename
unless data have been preloaded. Filenames should end with
raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif
or raw_tsss.fif.gz.
picks : array-like of int | None
Indices of channels to include. If None all channels are kept.
tmin : float | None
Time in seconds of first sample to save. If None first sample
is used.
tmax : float | None
Time in seconds of last sample to save. If None last sample
is used.
buffer_size_sec : float | None
Size of data chunks in seconds. If None, the buffer size of
the original file is used.
drop_small_buffer : bool
Drop or not the last buffer. It is required by maxfilter (SSS)
that only accepts raw files with buffers of the same size.
proj : bool
If True the data is saved with the projections applied (active).
.. note:: If ``apply_proj()`` was used to apply the projections,
the projectons will be active even if ``proj`` is False.
fmt : str
Format to use to save raw data. Valid options are 'double',
'single', 'int', and 'short' for 64- or 32-bit float, or 32- or
16-bit integers, respectively. It is **strongly** recommended to
use 'single', as this is backward-compatible, and is standard for
maintaining precision. Note that using 'short' or 'int' may result
in loss of precision, complex data cannot be saved as 'short',
and neither complex data types nor real data stored as 'double'
can be loaded with the MNE command-line tools. See raw.orig_format
to determine the format the original data were stored in.
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
split_size : string | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
.. note:: Due to FIFF file limitations, the maximum split
size is 2GB.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
If Raw is a concatenation of several raw files, **be warned** that
only the measurement information from the first raw file is stored.
This likely means that certain operations with external tools may not
work properly on a saved concatenated file (e.g., probably some
or all forms of SSS). It is recommended not to concatenate and
then save raw files for this reason.
"""
check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif',
'raw.fif.gz', 'raw_sss.fif.gz',
'raw_tsss.fif.gz'))
split_size = _get_split_size(split_size)
fname = op.realpath(fname)
if not self.preload and fname in self._filenames:
raise ValueError('You cannot save data to the same file.'
' Please use a different filename.')
if self.preload:
if np.iscomplexobj(self._data):
warn('Saving raw file with complex data. Loading with '
'command-line MNE tools will not work.')
type_dict = dict(short=FIFF.FIFFT_DAU_PACK16,
int=FIFF.FIFFT_INT,
single=FIFF.FIFFT_FLOAT,
double=FIFF.FIFFT_DOUBLE)
if fmt not in type_dict.keys():
raise ValueError('fmt must be "short", "int", "single", '
'or "double"')
reset_dict = dict(short=False, int=False, single=True, double=True)
reset_range = reset_dict[fmt]
data_type = type_dict[fmt]
data_test = self[0, 0][0]
if fmt == 'short' and np.iscomplexobj(data_test):
raise ValueError('Complex data must be saved as "single" or '
'"double", not "short"')
# check for file existence
_check_fname(fname, overwrite)
if proj:
info = copy.deepcopy(self.info)
projector, info = setup_proj(info)
activate_proj(info['projs'], copy=False)
else:
info = self.info
projector = None
#
# Set up the reading parameters
#
# Convert to samples
start = int(np.floor(tmin * self.info['sfreq']))
# "stop" is the first sample *not* to save, so we need +1's here
if tmax is None:
stop = np.inf
else:
stop = self.time_as_index(float(tmax), use_rounding=True)[0] + 1
stop = min(stop, self.last_samp - self.first_samp + 1)
buffer_size = self._get_buffer_size(buffer_size_sec)
# write the raw file
_write_raw(fname, self, info, picks, fmt, data_type, reset_range,
start, stop, buffer_size, projector, drop_small_buffer,
split_size, 0, None)
def plot(self, events=None, duration=10.0, start=0.0, n_channels=20,
bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
event_color='cyan', scalings=None, remove_dc=True, order='type',
show_options=False, title=None, show=True, block=False,
highpass=None, lowpass=None, filtorder=4, clipping=None):
"""Plot raw data
Parameters
----------
events : array | None
Events to show with vertical bars.
duration : float
Time window (sec) to plot in a given time.
start : float
Initial time to show (can be changed dynamically once plotted).
n_channels : int
Number of channels to plot at once. Defaults to 20. Has no effect
if ``order`` is 'position' or 'selection'.
bgcolor : color object
Color of the background.
color : dict | color object | None
Color for the data traces. If None, defaults to::
dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
emg='k', ref_meg='steelblue', misc='k', stim='k',
resp='k', chpi='k')
bad_color : color object
Color to make bad channels.
event_color : color object
Color to use for events.
scalings : dict | None
Scaling factors for the traces. If any fields in scalings are
'auto', the scaling factor is set to match the 99.5th percentile of
a subset of the corresponding data. If scalings == 'auto', all
scalings fields are set to 'auto'. If any fields are 'auto' and
data is not preloaded, a subset of times up to 100mb will be
loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
resp=1, chpi=1e-4)
remove_dc : bool
If True remove DC component when plotting data.
order : str | array of int
Order in which to plot data. 'type' groups by channel type,
'original' plots in the order of ch_names, 'selection' uses
Elekta's channel groupings (only works for Neuromag data),
'position' groups the channels by the positions of the sensors.
'selection' and 'position' modes allow custom selections by using
lasso selector on the topomap. Pressing ``ctrl`` key while
selecting allows appending to the current selection. If array, only
the channels in the array are plotted in the given order. Defaults
to 'type'.
show_options : bool
If True, a dialog for options related to projection is shown.
title : str | None
The title of the window. If None, and either the filename of the
raw object or '<unknown>' will be displayed as title.
show : bool
Show figures if True
block : bool
Whether to halt program execution until the figure is closed.
Useful for setting bad channels on the fly (click on line).
May not work on all systems / platforms.
highpass : float | None
Highpass to apply when displaying data.
lowpass : float | None
Lowpass to apply when displaying data.
filtorder : int
Filtering order. Note that for efficiency and simplicity,
filtering during plotting uses forward-backward IIR filtering,
so the effective filter order will be twice ``filtorder``.
Filtering the lines for display may also produce some edge
artifacts (at the left and right edges) of the signals
during display. Filtering requires scipy >= 0.10.
clipping : str | None
If None, channels are allowed to exceed their designated bounds in
the plot. If "clamp", then values are clamped to the appropriate
range for display, creating step-like artifacts. If "transparent",
then excessive values are not shown, creating gaps in the traces.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Raw traces.
Notes
-----
The arrow keys (up/down/left/right) can typically be used to navigate
between channels and time ranges, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
The scaling can be adjusted with - and + (or =) keys. The viewport
dimensions can be adjusted with page up/page down and home/end keys.
Full screen mode can be to toggled with f11 key. To mark or un-mark a
channel as bad, click on the rather flat segments of a channel's time
series. The changes will be reflected immediately in the raw object's
``raw.info['bads']`` entry.
"""
return plot_raw(self, events, duration, start, n_channels, bgcolor,
color, bad_color, event_color, scalings, remove_dc,
order, show_options, title, show, block, highpass,
lowpass, filtorder, clipping)
@verbose
def plot_psd(self, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
proj=False, n_fft=2048, picks=None, ax=None,
color='black', area_mode='std', area_alpha=0.33,
n_overlap=0, dB=True, show=True, n_jobs=1, verbose=None):
"""Plot the power spectral density across channels
Parameters
----------
tmin : float
Start time for calculations.
tmax : float
End time for calculations.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
picks : array-like of int | None
List of channels to use. Cannot be None if `ax` is supplied. If
both `picks` and `ax` are None, separate subplots will be created
for each standard channel type (`mag`, `grad`, and `eeg`).
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
How to plot area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels)
will be plotted. Bad channels will be excluded from these
calculations. If None, no area will be plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
dB : bool
If True, transform data to decibels.
show : bool
Call pyplot.show() at the end.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure with frequency spectra of the data channels.
"""
return plot_raw_psd(self, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
proj=proj, n_fft=n_fft, picks=picks, ax=ax,
color=color, area_mode=area_mode,
area_alpha=area_alpha, n_overlap=n_overlap,
dB=dB, show=show, n_jobs=n_jobs)
def plot_psd_topo(self, tmin=0., tmax=None, fmin=0, fmax=100, proj=False,
n_fft=2048, n_overlap=0, layout=None, color='w',
fig_facecolor='k', axis_facecolor='k', dB=True,
show=True, n_jobs=1, verbose=None):
"""Function for plotting channel wise frequency spectra as topography.
Parameters
----------
tmin : float
Start time for calculations. Defaults to zero.
tmax : float | None
End time for calculations. If None (default), the end of data is
used.
fmin : float
Start frequency to consider. Defaults to zero.
fmax : float
End frequency to consider. Defaults to 100.
proj : bool
Apply projection. Defaults to False.
n_fft : int
Number of points to use in Welch FFT calculations. Defaults to
2048.
n_overlap : int
The number of points of overlap between blocks. Defaults to 0
(no overlap).
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If None (default), the correct
layout is inferred from the data.
color : str | tuple
A matplotlib-compatible color to use for the curves. Defaults to
white.
fig_facecolor : str | tuple
A matplotlib-compatible color to use for the figure background.
Defaults to black.
axis_facecolor : str | tuple
A matplotlib-compatible color to use for the axis background.
Defaults to black.
dB : bool
If True, transform data to decibels. Defaults to True.
show : bool
Show figure if True. Defaults to True.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_raw_psd_topo(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=proj, n_fft=n_fft,
n_overlap=n_overlap, layout=layout,
color=color, fig_facecolor=fig_facecolor,
axis_facecolor=axis_facecolor, dB=dB,
show=show, n_jobs=n_jobs, verbose=verbose)
def estimate_rank(self, tstart=0.0, tstop=30.0, tol=1e-4,
return_singular=False, picks=None, scalings='norm'):
"""Estimate rank of the raw data
This function is meant to provide a reasonable estimate of the rank.
The true rank of the data depends on many factors, so use at your
own risk.
Parameters
----------
tstart : float
Start time to use for rank estimation. Default is 0.0.
tstop : float | None
End time to use for rank estimation. Default is 30.0.
If None, the end time of the raw file is used.
tol : float
Tolerance for singular values to consider non-zero in
calculating the rank. The singular values are calculated
in this method such that independent data are expected to
have singular value around one.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
picks : array_like of int, shape (n_selected_channels,)
The channels to be considered for rank estimation.
If None (default) meg and eeg channels are included.
scalings : dict | 'norm'
To achieve reliable rank estimation on multiple sensors,
sensors have to be rescaled. This parameter controls the
rescaling. If dict, it will update the
following dict of defaults:
dict(mag=1e11, grad=1e9, eeg=1e5)
If 'norm' data will be scaled by internally computed
channel-wise norms.
Defaults to 'norm'.
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
Notes
-----
If data are not pre-loaded, the appropriate data will be loaded
by this function (can be memory intensive).
Projectors are not taken into account unless they have been applied
to the data using apply_proj(), since it is not always possible
to tell whether or not projectors have been applied previously.
Bad channels will be excluded from calculations.
"""
from ..cov import _estimate_rank_meeg_signals
start = max(0, self.time_as_index(tstart)[0])
if tstop is None:
stop = self.n_times - 1
else:
stop = min(self.n_times - 1, self.time_as_index(tstop)[0])
tslice = slice(start, stop + 1)
if picks is None:
picks = _pick_data_channels(self.info, exclude='bads',
with_ref_meg=False)
# ensure we don't get a view of data
if len(picks) == 1:
return 1.0, 1.0
# this should already be a copy, so we can overwrite it
data = self[picks, tslice][0]
out = _estimate_rank_meeg_signals(
data, pick_info(self.info, picks),
scalings=scalings, tol=tol, return_singular=return_singular)
return out
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
@property
def times(self):
"""Time points"""
return self._times
@property
def n_times(self):
"""Number of time points"""
return self.last_samp - self.first_samp + 1
def __len__(self):
"""The number of time points
Returns
-------
len : int
The number of time points.
Examples
--------
This can be used as::
>>> len(raw) # doctest: +SKIP
1000
"""
return self.n_times
def load_bad_channels(self, bad_file=None, force=False):
"""
Mark channels as bad from a text file
This function operates mostly in the style of the C function
``mne_mark_bad_channels``.
Parameters
----------
bad_file : string
File name of the text file containing bad channels
If bad_file = None, bad channels are cleared, but this
is more easily done directly as raw.info['bads'] = [].
force : boolean
Whether or not to force bad channel marking (of those
that exist) if channels are not found, instead of
raising an error.
"""
if bad_file is not None:
# Check to make sure bad channels are there
names = frozenset(self.info['ch_names'])
with open(bad_file) as fid:
bad_names = [l for l in fid.read().splitlines() if l]
names_there = [ci for ci in bad_names if ci in names]
count_diff = len(bad_names) - len(names_there)
if count_diff > 0:
if not force:
raise ValueError('Bad channels from:\n%s\n not found '
'in:\n%s' % (bad_file,
self._filenames[0]))
else:
warn('%d bad channels from:\n%s\nnot found in:\n%s'
% (count_diff, bad_file, self._filenames[0]))
self.info['bads'] = names_there
else:
self.info['bads'] = []
def append(self, raws, preload=None):
"""Concatenate raw instances as if they were continuous
Parameters
----------
raws : list, or Raw instance
list of Raw instances to concatenate to the current instance
(in order), or a single raw instance to concatenate.
preload : bool, str, or None (default None)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory). If preload is
None, preload=True or False is inferred using the preload status
of the raw files passed in.
"""
if not isinstance(raws, list):
raws = [raws]
# make sure the raws are compatible
all_raws = [self]
all_raws += raws
_check_raw_compatibility(all_raws)
# deal with preloading data first (while files are separate)
all_preloaded = self.preload and all(r.preload for r in raws)
if preload is None:
if all_preloaded:
preload = True
else:
preload = False
if preload is False:
if self.preload:
self._data = None
self.preload = False
else:
# do the concatenation ourselves since preload might be a string
nchan = self.info['nchan']
c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)])
nsamp = c_ns[-1]
if not self.preload:
this_data = self._read_segment()
else:
this_data = self._data
# allocate the buffer
if isinstance(preload, string_types):
_data = np.memmap(preload, mode='w+', dtype=this_data.dtype,
shape=(nchan, nsamp))
else:
_data = np.empty((nchan, nsamp), dtype=this_data.dtype)
_data[:, 0:c_ns[0]] = this_data
for ri in range(len(raws)):
if not raws[ri].preload:
# read the data directly into the buffer
data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]]
raws[ri]._read_segment(data_buffer=data_buffer)
else:
_data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data
self._data = _data
self.preload = True
# now combine information from each raw file to construct new self
for r in raws:
self._first_samps = np.r_[self._first_samps, r._first_samps]
self._last_samps = np.r_[self._last_samps, r._last_samps]
self._raw_extras += r._raw_extras
self._filenames += r._filenames
self.annotations = _combine_annotations((self.annotations,
r.annotations),
self._last_samps,
self._first_samps,
self.info['sfreq'])
self._update_times()
if not (len(self._first_samps) == len(self._last_samps) ==
len(self._raw_extras) == len(self._filenames)):
raise RuntimeError('Append error') # should never happen
def close(self):
"""Clean up the object.
Does nothing for objects that close their file descriptors.
Things like RawFIF will override this method.
"""
pass
def copy(self):
""" Return copy of Raw instance
"""
return deepcopy(self)
def __repr__(self):
name = self._filenames[0]
name = 'None' if name is None else op.basename(name)
size_str = str(sizeof_fmt(self._size)) # str in case it fails -> None
size_str += ', data%s loaded' % ('' if self.preload else ' not')
s = ('%s, n_channels x n_times : %s x %s (%0.1f sec), ~%s'
% (name, len(self.ch_names), self.n_times, self.times[-1],
size_str))
return "<%s | %s>" % (self.__class__.__name__, s)
def add_events(self, events, stim_channel=None):
"""Add events to stim channel
Parameters
----------
events : ndarray, shape (n_events, 3)
Events to add. The first column specifies the sample number of
each event, the second column is ignored, and the third column
provides the event value. If events already exist in the Raw
instance at the given sample numbers, the event values will be
added together.
stim_channel : str | None
Name of the stim channel to add to. If None, the config variable
'MNE_STIM_CHANNEL' is used. If this is not found, it will default
to 'STI 014'.
Notes
-----
Data must be preloaded in order to add events.
"""
if not self.preload:
raise RuntimeError('cannot add events unless data are preloaded')
events = np.asarray(events)
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError('events must be shape (n_events, 3)')
stim_channel = _get_stim_channel(stim_channel, self.info)
pick = pick_channels(self.ch_names, stim_channel)
if len(pick) == 0:
raise ValueError('Channel %s not found' % stim_channel)
pick = pick[0]
idx = events[:, 0].astype(int)
if np.any(idx < self.first_samp) or np.any(idx > self.last_samp):
raise ValueError('event sample numbers must be between %s and %s'
% (self.first_samp, self.last_samp))
if not all(idx == events[:, 0]):
raise ValueError('event sample numbers must be integers')
self._data[pick, idx - self.first_samp] += events[:, 2]
def _get_buffer_size(self, buffer_size_sec=None):
"""Helper to get the buffer size"""
if buffer_size_sec is None:
if 'buffer_size_sec' in self.info:
buffer_size_sec = self.info['buffer_size_sec']
else:
buffer_size_sec = 10.0
return int(np.ceil(buffer_size_sec * self.info['sfreq']))
def _check_preload(raw, msg):
"""Helper to ensure data are preloaded"""
if not raw.preload:
raise RuntimeError(msg + ' requires raw data to be loaded. Use '
'preload=True (or string) in the constructor or '
'raw.load_data().')
def _allocate_data(data, data_buffer, data_shape, dtype):
"""Helper to data in memory or in memmap for preloading"""
if data is None:
# if not already done, allocate array with right type
if isinstance(data_buffer, string_types):
# use a memmap
data = np.memmap(data_buffer, mode='w+',
dtype=dtype, shape=data_shape)
else:
data = np.zeros(data_shape, dtype=dtype)
return data
def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False):
"""Convert indices to time
Parameters
----------
index : list-like | int
List of ints or int representing points in time.
use_first_samp : boolean
If True, the time returned is relative to the session onset, else
relative to the recording onset.
Returns
-------
times : ndarray
Times corresponding to the index supplied.
"""
times = np.atleast_1d(index) + (first_samp if use_first_samp else 0)
return times / sfreq
class _RawShell():
"""Used for creating a temporary raw object"""
def __init__(self):
self.first_samp = None
self.last_samp = None
self._cals = None
self._rawdir = None
self._projector = None
@property
def n_times(self):
return self.last_samp - self.first_samp + 1
###############################################################################
# Writing
def _write_raw(fname, raw, info, picks, fmt, data_type, reset_range, start,
stop, buffer_size, projector, drop_small_buffer,
split_size, part_idx, prev_fname):
"""Write raw file with splitting
"""
# we've done something wrong if we hit this
n_times_max = len(raw.times)
if start >= stop or stop > n_times_max:
raise RuntimeError('Cannot write raw file with no data: %s -> %s '
'(max: %s) requested' % (start, stop, n_times_max))
if part_idx > 0:
# insert index in filename
base, ext = op.splitext(fname)
use_fname = '%s-%d%s' % (base, part_idx, ext)
else:
use_fname = fname
logger.info('Writing %s' % use_fname)
fid, cals = _start_writing_raw(use_fname, info, picks, data_type,
reset_range, raw.annotations)
use_picks = slice(None) if picks is None else picks
first_samp = raw.first_samp + start
if first_samp != 0:
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp)
# previous file name and id
if part_idx > 0 and prev_fname is not None:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id'])
write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1)
end_block(fid, FIFF.FIFFB_REF)
pos_prev = fid.tell()
if pos_prev > split_size:
raise ValueError('file is larger than "split_size" after writing '
'measurement information, you must use a larger '
'value for split size: %s plus enough bytes for '
'the chosen buffer_size' % pos_prev)
next_file_buffer = 2 ** 20 # extra cushion for last few post-data tags
for first in range(start, stop, buffer_size):
# Write blocks <= buffer_size in size
last = min(first + buffer_size, stop)
data, times = raw[use_picks, first:last]
assert len(times) == last - first
if projector is not None:
data = np.dot(projector, data)
if ((drop_small_buffer and (first > start) and
(len(times) < buffer_size))):
logger.info('Skipping data chunk due to small buffer ... '
'[done]')
break
logger.info('Writing ...')
_write_raw_buffer(fid, data, cals, fmt)
pos = fid.tell()
this_buff_size_bytes = pos - pos_prev
overage = pos - split_size + next_file_buffer
if overage > 0:
# This should occur on the first buffer write of the file, so
# we should mention the space required for the meas info
raise ValueError(
'buffer size (%s) is too large for the given split size (%s) '
'by %s bytes after writing info (%s) and leaving enough space '
'for end tags (%s): decrease "buffer_size_sec" or increase '
'"split_size".' % (this_buff_size_bytes, split_size, overage,
pos_prev, next_file_buffer))
# Split files if necessary, leave some space for next file info
# make sure we check to make sure we actually *need* another buffer
# with the "and" check
if pos >= split_size - this_buff_size_bytes - next_file_buffer and \
first + buffer_size < stop:
next_fname, next_idx = _write_raw(
fname, raw, info, picks, fmt,
data_type, reset_range, first + buffer_size, stop, buffer_size,
projector, drop_small_buffer, split_size,
part_idx + 1, use_fname)
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id'])
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
break
pos_prev = pos
logger.info('Closing %s [done]' % use_fname)
if info.get('maxshield', False):
end_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)
else:
end_block(fid, FIFF.FIFFB_RAW_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
return use_fname, part_idx
def _start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
reset_range=True, annotations=None):
"""Start write raw data in file
Data will be written in float
Parameters
----------
name : string
Name of the file to create.
info : dict
Measurement info.
sel : array of int, optional
Indices of channels to include. By default all channels are included.
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), 16 (FIFFT_DAU_PACK16), or 3 (FIFFT_INT) for raw data.
reset_range : bool
If True, the info['chs'][k]['range'] parameter will be set to unity.
annotations : instance of Annotations or None
The annotations to write.
Returns
-------
fid : file
The file descriptor.
cals : list
calibration factors.
"""
#
# Measurement info
#
info = pick_info(info, sel)
#
# Create the file and save the essentials
#
fid = start_file(name)
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
cals = []
for k in range(info['nchan']):
#
# Scan numbers may have been messed up
#
info['chs'][k]['scanno'] = k + 1 # scanno starts at 1 in FIF format
if reset_range is True:
info['chs'][k]['range'] = 1.0
cals.append(info['chs'][k]['cal'] * info['chs'][k]['range'])
write_meas_info(fid, info, data_type=data_type, reset_range=reset_range)
#
# Annotations
#
if annotations is not None:
start_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, annotations.onset)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX,
annotations.duration + annotations.onset)
# To allow : in description, they need to be replaced for serialization
write_name_list(fid, FIFF.FIFF_COMMENT, [d.replace(':', ';') for d in
annotations.description])
if annotations.orig_time is not None:
write_double(fid, FIFF.FIFF_MEAS_DATE, annotations.orig_time)
end_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS)
#
# Start the raw data
#
if info.get('maxshield', False):
start_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)
else:
start_block(fid, FIFF.FIFFB_RAW_DATA)
return fid, cals
def _write_raw_buffer(fid, buf, cals, fmt):
"""Write raw buffer
Parameters
----------
fid : file descriptor
an open raw data file.
buf : array
The buffer to write.
cals : array
Calibration factors.
fmt : str
'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit
float for each item. This will be doubled for complex datatypes. Note
that short and int formats cannot be used for complex data.
"""
if buf.shape[0] != len(cals):
raise ValueError('buffer and calibration sizes do not match')
if fmt not in ['short', 'int', 'single', 'double']:
raise ValueError('fmt must be "short", "single", or "double"')
if np.isrealobj(buf):
if fmt == 'short':
write_function = write_dau_pack16
elif fmt == 'int':
write_function = write_int
elif fmt == 'single':
write_function = write_float
else:
write_function = write_double
else:
if fmt == 'single':
write_function = write_complex64
elif fmt == 'double':
write_function = write_complex128
else:
raise ValueError('only "single" and "double" supported for '
'writing complex data')
buf = buf / np.ravel(cals)[:, None]
write_function(fid, FIFF.FIFF_DATA_BUFFER, buf)
def _my_hilbert(x, n_fft=None, envelope=False):
""" Compute Hilbert transform of signals w/ zero padding.
Parameters
----------
x : array, shape (n_times)
The signal to convert
n_fft : int
Size of the FFT to perform, must be at least ``len(x)``.
The signal will be cut back to original length.
envelope : bool
Whether to compute amplitude of the hilbert transform in order
to return the signal envelope.
Returns
-------
out : array, shape (n_times)
The hilbert transform of the signal, or the envelope.
"""
from scipy.signal import hilbert
n_x = x.shape[-1]
out = hilbert(x, N=n_fft)[:n_x]
if envelope is True:
out = np.abs(out)
return out
def _check_raw_compatibility(raw):
"""Check to make sure all instances of Raw
in the input list raw have compatible parameters"""
for ri in range(1, len(raw)):
if not isinstance(raw[ri], type(raw[0])):
raise ValueError('raw[%d] type must match' % ri)
if not raw[ri].info['nchan'] == raw[0].info['nchan']:
raise ValueError('raw[%d][\'info\'][\'nchan\'] must match' % ri)
if not raw[ri].info['bads'] == raw[0].info['bads']:
raise ValueError('raw[%d][\'info\'][\'bads\'] must match' % ri)
if not raw[ri].info['sfreq'] == raw[0].info['sfreq']:
raise ValueError('raw[%d][\'info\'][\'sfreq\'] must match' % ri)
if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']):
raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri)
if not all(raw[ri]._cals == raw[0]._cals):
raise ValueError('raw[%d]._cals must match' % ri)
if len(raw[0].info['projs']) != len(raw[ri].info['projs']):
raise ValueError('SSP projectors in raw files must be the same')
if not all(_proj_equal(p1, p2) for p1, p2 in
zip(raw[0].info['projs'], raw[ri].info['projs'])):
raise ValueError('SSP projectors in raw files must be the same')
if not all(r.orig_format == raw[0].orig_format for r in raw):
warn('raw files do not all have the same data format, could result in '
'precision mismatch. Setting raw.orig_format="unknown"')
raw[0].orig_format = 'unknown'
def concatenate_raws(raws, preload=None, events_list=None):
"""Concatenate raw instances as if they were continuous. Note that raws[0]
is modified in-place to achieve the concatenation.
Parameters
----------
raws : list
list of Raw instances to concatenate (in order).
preload : bool, or None
If None, preload status is inferred using the preload status of the
raw files passed in. True or False sets the resulting raw file to
have or not have data preloaded.
events_list : None | list
The events to concatenate. Defaults to None.
Returns
-------
raw : instance of Raw
The result of the concatenation (first Raw instance passed in).
events : ndarray of int, shape (n events, 3)
The events. Only returned if `event_list` is not None.
"""
if events_list is not None:
if len(events_list) != len(raws):
raise ValueError('`raws` and `event_list` are required '
'to be of the same length')
first, last = zip(*[(r.first_samp, r.last_samp) for r in raws])
events = concatenate_events(events_list, first, last)
raws[0].append(raws[1:], preload)
if events_list is None:
return raws[0]
else:
return raws[0], events
def _check_update_montage(info, montage, path=None, update_ch_names=False):
""" Helper function for eeg readers to add montage"""
if montage is not None:
if not isinstance(montage, (string_types, Montage)):
err = ("Montage must be str, None, or instance of Montage. "
"%s was provided" % type(montage))
raise TypeError(err)
if montage is not None:
if isinstance(montage, string_types):
montage = read_montage(montage, path=path)
_set_montage(info, montage, update_ch_names=update_ch_names)
missing_positions = []
exclude = (FIFF.FIFFV_EOG_CH, FIFF.FIFFV_MISC_CH,
FIFF.FIFFV_STIM_CH)
for ch in info['chs']:
if not ch['kind'] in exclude:
if np.unique(ch['loc']).size == 1:
missing_positions.append(ch['ch_name'])
# raise error if positions are missing
if missing_positions:
raise KeyError(
"The following positions are missing from the montage "
"definitions: %s. If those channels lack positions "
"because they are EOG channels use the eog parameter."
% str(missing_positions))
| bsd-3-clause |
dangra/scrapy-sci | wallpaper_demo/wallpaper/classifier_pipelines.py | 2 | 1983 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
from scrapy.contrib.exporter import JsonItemExporter
from scrapy.exceptions import DropItem
from sklearn.linear_model import LogisticRegression
from scrapy_sci.status import Status, Reader
from scrapy_sci.classifier import ClassifierFactory
class ClassifiersPipeline(object):
def __init__(self):
self.status = Status()
self.classifiers = []
self.exporters = {}
for classifier in self.status.classifiers.keys():
CF = ClassifierFactory(self.status.classifiers[classifier])
CF.create_data_set("both")
lc = lc = CF.create_classifier(LogisticRegression(C=1e5), self.status.classifiers[classifier]['features']())
lc.fit()
self.classifiers.append((classifier, lc))
self.classifiers = sorted(self.classifiers, key = lambda a: a[1].estimate_accuracy(5, verbose=True))
print "Classifier {0} needs the most improvement; selected for export".format(self.classifiers[0][0])
for classification in self.status.classifiers[self.classifiers[0][0]]['classifications']:
f = file("{0}.json".format(classification), "wb")
self.exporters[classification] = JsonItemExporter(f)
def process_item(self, item, spider):
keep = True
for i, (name, classifier) in enumerate(self.classifiers):
item_classification = classifier.classify(item)
if i == 0: export_classification = item_classification
if self.status.classifiers[name]['classifications'][item_classification] == False:
raise DropItem("Item removed by classifier: {0}".format(name))
if keep == True:
self.exporters[export_classification].export_item(item)
| bsd-3-clause |
elkingtonmcb/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
Bleyddyn/malpi | exp/ac-plot.py | 1 | 2120 | import os
from optparse import OptionParser
def getOptions():
usage = "python pg-plot.py <data>"
parser = OptionParser( usage=usage )
(options, args) = parser.parse_args()
if len(args) != 1:
print usage
exit()
return (options, args)
#options, args = getOptions()
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy import stats
from collections import deque
import ast
plt.figure(1,figsize=(16, 18), dpi=80)
ax1=plt.subplot(211)
plt.xlabel('Episodes')
plt.ylabel('Best Reward')
#cat current_run.txt
#tail -50 CartPole-v0_ac_won.txt
count = 100
if True:
if os.path.exists('current_run.txt'):
with open('current_run.txt', 'r') as f:
runtxt = f.read()
try:
cnt = int(runtxt[runtxt.find('Iteration')+len('Iteration'):])
if cnt > 0 and cnt < 10000:
count = cnt + 1
except:
print "Nothing in current_run.txt, defaulting to 100: " + runtxt
#score_ind = 13 # for pg-pole.py
#abbr = "pg"
score_ind = 5 # for ac-pole.py
abbr = "ac"
#[0.4870887644984899, 0.01731657794205047, 0.06378070828897703, 0.9948356417679789, 0.000766760240096467, 24.75, 5000.0]
with open('CartPole-v0_'+abbr+'_won.txt', 'r') as f:
lines = deque(f, maxlen=count)
y = []
sorted_lines = []
for line in lines:
line = line.rstrip()
resd = ast.literal_eval(line)
if isinstance(resd,list):
sorted_lines.append(resd)
best = resd[score_ind]
y.append(best)
sorted_lines = sorted( sorted_lines, key=lambda a_entry: a_entry[score_ind] )
for line in sorted_lines:
print line
print "# of runs: %d" % (len(y),)
print "Min/Max: %f/%f" % (np.min(y),np.max(y))
print "Mean/stdev: %f/%f" % (np.mean(y),np.std(y))
slope, intercept, r_value, p_value, std_err = stats.linregress(range(len(y)), y)
print "Slope/intercept: %f/%f" % (slope, intercept)
#plt.plot(data[-500:,0],data[-500:,1])
ax1.plot(y)
#plt.figure(2)
plt.show()
# plt.pause(10)
| mit |
joernhees/scikit-learn | sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
jenfly/monsoon-onset | scripts/save-dailyrel-precip.py | 1 | 2510 | import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
sys.path.append('/home/jwalker/dynamics/python/monsoon-onset')
import xarray as xray
import numpy as np
import collections
import pandas as pd
import scipy
import atmos as atm
import indices
import utils
import precipdat
# ----------------------------------------------------------------------
version, yearstr = 'merra2', '1980-2015'
datadir = atm.homedir() + 'datastore/%s/analysis/' % version
onset_nm = 'CHP_MFC'
indfile = atm.homedir() + ('datastore/%s/analysis/%s_index_%s_%s.nc' %
(version, version, onset_nm, yearstr))
ind_nm, npre, npost = 'onset', 140, 230
#ind_nm, npre, npost = 'retreat', 270, 100
pcp_nm = 'gpcp'
years = np.arange(1997, 2016)
#years = np.arange(1980, 2015) # CMAP last full year is 2014
#pcpfiles = atm.homedir() + 'datastore/cmap/cmap.enhanced.precip.pentad.mean.nc'
pcpfiles = [atm.homedir() + 'datastore/gpcp/gpcp_daily_%d.nc' % yr
for yr in years]
savefile = datadir + '%s_dailyrel_%s_%s-%d.nc' % (pcp_nm, onset_nm, min(years),
max(years))
if ind_nm == 'retreat':
savefile = savefile.replace('dailyrel', 'dailyrel_retreat')
subset_dict = {'lon' : (40, 120)}
# ----------------------------------------------------------------------
# Data and calcs
# Onset index
with xray.open_dataset(indfile) as index:
index.load()
index = index.sel(year=years)
d0 = index[ind_nm].values
# Precip data
if pcp_nm == 'cmap':
pcp = precipdat.read_cmap(pcpfiles, yearmin=min(years), yearmax=max(years))
# Interpolate to daily resolution
name, attrs, coords, dimnames = atm.meta(pcp)
days = np.arange(3, 364)
interp_func = scipy.interpolate.interp1d(pcp['day'], pcp, axis=1)
vals = interp_func(days)
coords['day'] = xray.DataArray(days, coords={'day' : days})
pcp = xray.DataArray(vals, dims=dimnames, coords=coords, name=name,
attrs=attrs)
else:
pcp = atm.combine_daily_years(None, pcpfiles, years, yearname='year',
subset_dict=subset_dict)
# Wrap from following year to get extended daily range
daymin = min(d0) - npre
daymax = max(d0) + npost
pcp = utils.wrapyear_all(pcp, daymin=daymin, daymax=daymax)
# Daily relative to onset/withdrawal
pcp_rel = utils.daily_rel2onset(pcp, d0, npre, npost)
print('Saving to ' + savefile)
atm.save_nc(savefile, pcp_rel)
| mit |
tedmeeds/tcga_encoder | tcga_encoder/models/mutation_by_position/main.py | 1 | 4021 | import sys, os
#print sys.path
sys.path.insert(0, os.getcwd())
#print sys.path
from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.definitions.tcga import *
from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
from tcga_encoder.data.positions.process_gene_mutation_sequence import main as position_view
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("talk")
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
if __name__ == "__main__":
print sys.path
gene = sys.argv[1]
assembly = int(sys.argv[2])
assembly = int(sys.argv[2])
if len(sys.argv)>3:
tissue = sys.argv[3]
if tissue == "none":
tissue = None
#print "*** Filtering tissue %s"%(tissue)
else:
tissue = None
if len(sys.argv)>4:
target = sys.argv[4]
#print "*** Filtering tissue %s"%(tissue)
else:
target = "ADAM6"
data_location = "data/broad_firehose/stddata__2016_01_28_processed_new/20160128/DNA_by_gene_small"
#data_location = "data/broad_firehose/stddata__2016_01_28_processed_new/20160128/DNA_by_gene"
save_location = os.path.join( HOME_DIR, "results/tcga_position_mutations" )
check_and_mkdir(save_location)
groups = [['Silent'],['Missense_Mutation'],['Nonsense_Mutation','Nonstop_Mutation'],['In_Frame_Del','In_Frame_Ins'],['Frame_Shift_Del','Frame_Shift_Ins'],['Splice_Site','RNA']]
#rna_data_location = os.path.join( HOME_DIR, "data/broad_processed_post_recomb/20160128/pan_tiny_multi_set" )
rna_data_location = os.path.join( HOME_DIR, "data/broad_processed_post_recomb/20160128/pan_small_multi_set" )
rna = pd.HDFStore( rna_data_location + "/data.h5" )
a,b,d,s,ms,f,seq,exons,x_ticks, fig = position_view( gene, assembly, tissue, save_location = save_location, data_location = data_location, groups=groups )
disease_barcodes = []
#d = d.loc[b]
d_bc = d[ ["admin.disease_code","patient.bcr_patient_barcode"] ].values
for x in d_bc:
disease_barcodes.append( "%s_%s"%(x[0],x[1]))
RSEM = rna["/RNA/RSEM"].loc[ disease_barcodes ].fillna(0)
RSEM_T = RSEM[target].fillna(0)
R = RSEM_T.values
all_locs = []
all_r = []
for i in range( len(ms) ):
rna_val = RSEM_T.loc[ "%s_%s"%(a[i],b[i])]
if rna_val.__class__ == pd.core.series.Series:
rna_val = rna_val.values[0]
print a[i],b[i],rna_val
#rna_val = R[i]
locs = pp.find(ms[i].sum(0))
for loc in locs:
all_locs.append( loc )
all_r.append( rna_val )
all_locs = np.array(all_locs)
all_r = np.array(all_r)
fig2 = pp.figure( figsize=(14,5))
ax2 = fig2.add_subplot(211)
ax2.semilogy( all_locs, all_r, "bo", mec='k',mew=1, ms=5, alpha=0.5)
ax2.set_xticks(x_ticks, minor=False)
ax2.set_yticks(ax2.get_yticks(), minor=False)
ax2.set_xticklabels( x_ticks, fontsize=8, rotation='vertical' )
pp.xlim(0,len(s[0]))
pp.title( "Mutations = %s Target = %s"%(gene,target))
ax2 = fig2.add_subplot(212)
ax2.plot( all_locs, all_r, "bo", mec='k',mew=1, ms=5, alpha=0.5)
ax2.set_xticks(x_ticks, minor=False)
ax2.set_yticks(ax2.get_yticks(), minor=False)
ax2.set_xticklabels( x_ticks, fontsize=8, rotation='vertical' )
pp.xlim(0,len(s[0]))
#pp.title( "Mutations = %s Target = %s"%(gene,target))
if save_location is not None:
if tissue is None:
pp.savefig( save_location + "/%s_mutations_target_%s.png"%(gene,target), fmt="png" )
else:
pp.savefig( save_location + "/%s_%s_mutations_target_%s.png"%(gene,tissue,target), fmt="png" )
pp.show()
#a,b,d,s,ms,f,seq,exons,x_ticks, fig = position_view( gene, assembly, tissue, save_location = save_location, data_location = data_location, groups=groups, R = RSEM_T )
#fig = plot_positions( f, ms, x_ticks, gene, seq, s, plot_all = plot_all, colors=colors, tissue = tissue, groups = groups, save_location = save_location, R = RSEM_T ) | mit |
ARM-software/lisa | lisa/analysis/notebook.py | 2 | 3974 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Notebook Analysis Module """
import pandas as pd
import functools
import __main__ as main
from lisa.analysis.base import TraceAnalysisBase
from lisa.trace import requires_events
from lisa.datautils import df_refit_index, df_filter, SignalDesc
class NotebookAnalysis(TraceAnalysisBase):
"""
Support for custom Notebook-defined plots
Attribute lookup will be resolved in ``__main__`` module, which contains
all names created in cells of Jupyter notebooks.
Functions named ``plot_*`` have a special behavior: they are expected to
take a :class:`lisa.trace.Trace` as first parameter and a named parameter
:class:`matplotlib.axes.Axes` ``axis`` parameter to plot on.
example::
from lisa.trace import Trace
trace = Trace('trace.dat', events=['sched_switch'])
# Define a plot method in any cell
def plot_foo(trace, y, axis):
print(f'Plotting horizontal line at level: {y}')
axis.axhline(y=y)
# Just lookup the plot function
trace.analysis.notebook.plot_foo(3)
"""
name = 'notebook'
def __getattr__(self, attr):
val = getattr(main, attr)
if attr.startswith('plot_'):
f = val
# swallow "local_fig" as it is usually not needed for the notebook
# usage and pass the trace directly instead of the analysis
@TraceAnalysisBase.plot_method(return_axis=False)
@functools.wraps(f)
def wrapper(self, *args, local_fig, **kwargs):
return f(self.trace, *args, **kwargs)
val = wrapper
if callable(val):
# bind the function to the analysis instance to give a bound method
return val.__get__(self, type(self))
else:
return val
@TraceAnalysisBase.plot_method(return_axis=False)
def plot_event_field(self, event: str, field: str, axis, local_fig, filter_columns=None, filter_f=None):
"""
Plot a signal represented by the filtered values of a field of an event.
:param event: FTrace event name of interest.
:type event: str
:param field: Name of the field of ``event``.
:type field: str
:param filter_columns: Pre-filter the dataframe using
:func:`lisa.datautils.df_filter`. Also, a signal will be inferred
from the column names being used and will be passed to
:meth:`lisa.trace.Trace.df_event`.
:type filter_columns: dict or None
:param filter_f: Function used to filter the dataframe of the event.
The function must take a dataframe as only parameter and return
a filtered dataframe. It is applied after ``filter_columns`` filter.
:type filter_f: collections.abc.Callable
"""
trace = self.trace
if filter_columns:
signals = [SignalDesc(event, sorted(filter_columns.keys()))]
else:
signals = None
df = trace.df_event(event, signals=signals)
if filter_columns:
df = df_filter(df, filter_columns)
if filter_f:
df = filter_f(df)
df = df_refit_index(df, window=trace.window)
df[[field]].plot(ax=axis, drawstyle='steps-post')
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| apache-2.0 |
jayflo/scikit-learn | sklearn/metrics/__init__.py | 214 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
mtrbean/scipy | scipy/signal/wavelets.py | 23 | 10483 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=np.float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {}
bitdic['0'] = v / sm
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of w.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
The complete version of the Morlet wavelet, with a correction
term to improve admissibility. For w greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to s.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where r is the sampling rate.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
>>> length = min(10 * width[ii], len(data))
>>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length,
... width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
| bsd-3-clause |
Crompulence/cpl-library | utils/XKCD_plot_generator.py | 2 | 8219 | """
XKCD plot generator
-------------------
Author: Jake Vanderplas
This is a script that will take any matplotlib line diagram, and convert it
to an XKCD-style plot. It will work for plots with line & text elements,
including axes labels and titles (but not axes tick labels).
The idea for this comes from work by Damon McDougall
http://www.mail-archive.com/[email protected]/msg25499.html
"""
import numpy as np
import pylab as pl
from scipy import interpolate, signal
import matplotlib.font_manager as fm
# We need a special font for the code below. It can be downloaded this way:
import os
import urllib2
if not os.path.exists('Humor-Sans.ttf'):
fhandle = urllib2.urlopen('http://antiyawn.com/uploads/Humor-Sans-1.0.ttf')
open('Humor-Sans.ttf', 'wb').write(fhandle.read())
def xkcd_line(x, y, xlim=None, ylim=None,
mag=1.0, f1=30, f2=0.05, f3=15):
"""
Mimic a hand-drawn line from (x, y) data
Parameters
----------
x, y : array_like
arrays to be modified
xlim, ylim : data range
the assumed plot range for the modification. If not specified,
they will be guessed from the data
mag : float
magnitude of distortions
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
Returns
-------
x, y : ndarrays
The modified lines
"""
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we'll perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
def XKCDify(ax, mag=1.0,
f1=50, f2=0.01, f3=15,
bgcolor='w',
xaxis_loc=None,
yaxis_loc=None,
xaxis_arrow='+',
yaxis_arrow='+',
ax_extend=0.1,
expand_axes=False):
"""Make axis look hand-drawn
This adjusts all lines, text, legends, and axes in the figure to look
like xkcd plots. Other plot elements are not modified.
Parameters
----------
ax : Axes instance
the axes to be modified.
mag : float
the magnitude of the distortion
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
xaxis_loc, yaxis_log : float
The locations to draw the x and y axes. If not specified, they
will be drawn from the bottom left of the plot
xaxis_arrow, yaxis_arrow : str
where to draw arrows on the x/y axes. Options are '+', '-', '+-', or ''
ax_extend : float
How far (fractionally) to extend the drawn axes beyond the original
axes limits
expand_axes : bool
if True, then expand axes to fill the figure (useful if there is only
a single axes in the figure)
"""
# Get axes aspect
ext = ax.get_window_extent().extents
aspect = (ext[3] - ext[1]) / (ext[2] - ext[0])
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xspan = xlim[1] - xlim[0]
yspan = ylim[1] - xlim[0]
xax_lim = (xlim[0] - ax_extend * xspan,
xlim[1] + ax_extend * xspan)
yax_lim = (ylim[0] - ax_extend * yspan,
ylim[1] + ax_extend * yspan)
if xaxis_loc is None:
xaxis_loc = ylim[0]
if yaxis_loc is None:
yaxis_loc = xlim[0]
# Draw axes
xaxis = pl.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc],
linestyle='-', color='k')
yaxis = pl.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]],
linestyle='-', color='k')
# Label axes3, 0.5, 'hello', fontsize=14)
ax.text(xax_lim[1], xaxis_loc - 0.02 * yspan, ax.get_xlabel(),
fontsize=14, ha='right', va='top', rotation=12)
ax.text(yaxis_loc - 0.02 * xspan, yax_lim[1], ax.get_ylabel(),
fontsize=14, ha='right', va='top', rotation=78)
ax.set_xlabel('')
ax.set_ylabel('')
# Add title
ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1],
ax.get_title(),
ha='center', va='bottom', fontsize=16)
ax.set_title('')
Nlines = len(ax.lines)
lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)]
for line in lines:
x, y = line.get_data()
x_int, y_int = xkcd_line(x, y, xlim, ylim,
mag, f1, f2, f3)
# create foreground and background line
lw = line.get_linewidth()
line.set_linewidth(2 * lw)
line.set_data(x_int, y_int)
# don't add background line for axes
if (line is not xaxis) and (line is not yaxis):
line_bg = pl.Line2D(x_int, y_int, color=bgcolor,
linewidth=8 * lw)
ax.add_line(line_bg)
ax.add_line(line)
# Draw arrow-heads at the end of axes lines
arr1 = 0.03 * np.array([-1, 0, -1])
arr2 = 0.02 * np.array([-1, 0, 1])
arr1[::2] += np.random.normal(0, 0.005, 2)
arr2[::2] += np.random.normal(0, 0.005, 2)
x, y = xaxis.get_data()
if '+' in str(xaxis_arrow):
ax.plot(x[-1] + arr1 * xspan * aspect,
y[-1] + arr2 * yspan,
color='k', lw=2)
if '-' in str(xaxis_arrow):
ax.plot(x[0] - arr1 * xspan * aspect,
y[0] - arr2 * yspan,
color='k', lw=2)
x, y = yaxis.get_data()
if '+' in str(yaxis_arrow):
ax.plot(x[-1] + arr2 * xspan * aspect,
y[-1] + arr1 * yspan,
color='k', lw=2)
if '-' in str(yaxis_arrow):
ax.plot(x[0] - arr2 * xspan * aspect,
y[0] - arr1 * yspan,
color='k', lw=2)
# Change all the fonts to humor-sans.
prop = fm.FontProperties(fname='Humor-Sans.ttf', size=16)
for text in ax.texts:
text.set_fontproperties(prop)
# modify legend
leg = ax.get_legend()
if leg is not None:
leg.set_frame_on(False)
for child in leg.get_children():
if isinstance(child, pl.Line2D):
x, y = child.get_data()
child.set_data(xkcd_line(x, y, mag=10, f1=100, f2=0.001))
child.set_linewidth(2 * child.get_linewidth())
if isinstance(child, pl.Text):
child.set_fontproperties(prop)
# Set the axis limits
ax.set_xlim(xax_lim[0] - 0.1 * xspan,
xax_lim[1] + 0.1 * xspan)
ax.set_ylim(yax_lim[0] - 0.1 * yspan,
yax_lim[1] + 0.1 * yspan)
# adjust the axes
ax.set_xticks([])
ax.set_yticks([])
if expand_axes:
ax.figure.set_facecolor(bgcolor)
ax.set_axis_off()
ax.set_position([0, 0, 1, 1])
return ax
| gpl-3.0 |
kylerbrown/scikit-learn | sklearn/linear_model/tests/test_base.py | 120 | 10082 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
iismd17/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
feststelltaste/software-analytics | demos/20180709_WeAreDevelopers_Meetup_Vienna/ausi/portfolio.py | 5 | 1395 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import matplotlib.pyplot as plt
def plot_diagram(plot_data, x, y, size='Size'):
fig, ax = plt.subplots()
ax = plot_data.plot.scatter(
x,
y,
s=plot_data[size] * 30,
alpha=0.7,
title="SWOT analysis",
figsize=[10,7],
fontsize=14,
ax=ax
)
ax.title.set_size(24)
plt.xlabel(x, fontsize=18)
plt.ylabel(y, fontsize=18)
# plot vertical axis
ax.plot(
[plot_data[x].max()/2, plot_data[x].max()/2],
[0, plot_data[y].max()], color='k', linestyle='--', linewidth=0.6)
# plot horizonzal axis
ax.plot(
[0, plot_data[x].max()],
[plot_data[y].max()/2,plot_data[y].max()/2], color='k', linestyle='--', linewidth=0.6)
# plot fields' key words
ax.text(plot_data[x].max()*1/4, plot_data[y].max()*3/4, "Strengths", ha="center", fontsize=24)
ax.text(plot_data[x].max()*3/4, plot_data[y].max()*3/4, "Weaknesses", ha="center", fontsize=24)
ax.text(plot_data[x].max()*1/4, plot_data[y].max()*1/4, "Opportunities", ha="center", fontsize=24)
ax.text(plot_data[x].max()*3/4, plot_data[y].max()*1/4, "Threats", ha="center", fontsize=24)
for k, v in plot_data[[x, y]].iterrows():
ax.annotate(k, v, horizontalalignment='center', verticalalignment='middle', fontsize=14)
return ax | gpl-3.0 |
ycaihua/scikit-learn | sklearn/tree/tests/test_tree.py | 9 | 46546 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.random import sample_without_replacement
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
"""Check classification on a toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
"""Check classification on a weighted toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
"""Check regression on a toy dataset."""
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
"""Check on a XOR problem"""
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
"""Check consistency on dataset iris."""
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
"""Check consistency on dataset boston house prices."""
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
"""Predict probabilities using DecisionTreeClassifier."""
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
"""Check the array representation."""
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
"""Check when y is pure."""
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
"""Check numerical stability."""
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
"""Check if variable importance before fit raises ValueError. """
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
"""Check that gini is equivalent to mse for binary output variable"""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
"""Check max_features."""
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
"""Test that it gives proper exception on deficient input."""
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(Exception, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(Exception, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
def test_min_samples_leaf():
"""Test if leaves contain more than leaf_count training examples"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
"""Check that tree estimator are pickable """
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
"""Check estimators on multi-output problems."""
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
"""Check class rebalancing."""
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
"""Check that it works no matter the memory layout"""
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
"""Check sample weighting."""
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
"""Check sample weighting raises errors."""
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'auto' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='auto', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='auto', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight**2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
"""Test if class_weight raises errors and warnings when expected."""
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
"""Test preceedence of max_leaf_nodes over max_depth. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
"""Ensure property arrays' memory stays alive when tree disappears
non-regression for #2726
"""
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
"""Test if the warning for too large inputs is appropriate."""
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
| bsd-3-clause |
ianabc/vitale | vitale-numpy.py | 1 | 1476 | import sys
try:
import numpy as np
except ImportError:
print("This version REQUIRES numpy")
sys.exit(1)
#
# The "Vitale" property: The first two digits are divisible by 2, the
# first 3 digits are divisible by 4 and so on.
#
# Numpy version: If numpy has 128bit uInts (or Ints) then this would
# work, with 64 bit ints, it starts failing in the teens
#
# http://www.blog.republicofmath.com/the-number-3608528850368400786036725/
#
# Start with the even two digit numbers less than 100
#
try:
vitale_dtype = np.int128
except AttributeError:
vitale_dtype = np.int64
pass
def vitaleproperty(n):
if n == 2:
return np.arange(10, 99, 2, dtype=vitale_dtype)
else:
res = vitaleproperty(n - 1)
res = 10 * res[:, np.newaxis] + np.arange(10)
assert np.all(res > 0)
return res[np.where(res % n == 0)]
if __name__ == "__main__":
n = 2
nvnums = []
while True:
try:
vitale_n = vitaleproperty(n)
except AssertionError:
print("Overflow likely for n=%d, INCOMPLETE!!!" % n)
break
pass
if (vitale_n.size != 0):
nvnums.append(len(vitale_n))
n = n + 1
else:
break
try:
import matplotlib.pyplot as plt
plt.plot(nvnums, 'ro', markersize=5)
plt.ylim([0, max(nvnums) * 1.1])
plt.xlim([0, n+3])
plt.show()
except ImportError:
print(nvnums)
| gpl-2.0 |
wlamond/scikit-learn | setup.py | 9 | 10275 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
import traceback
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
SCIPY_MIN_VERSION = '0.13.3'
NUMPY_MIN_VERSION = '1.8.2'
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
'alldeps': (
'numpy >= {0}'.format(NUMPY_MIN_VERSION),
'scipy >= {0}'.format(SCIPY_MIN_VERSION),
),
},
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(SCIPY_MIN_VERSION)
scipy_status['version'] = scipy_version
except ImportError:
traceback.print_exc()
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(NUMPY_MIN_VERSION)
numpy_status['version'] = numpy_version
except ImportError:
traceback.print_exc()
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
NUMPY_MIN_VERSION)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
SCIPY_MIN_VERSION)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/cluster/mean_shift_.py | 19 | 15157 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
CVML/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
f3r/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 30 | 17391 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import (assert_array_almost_equal,
assert_array_equal,
assert_equal)
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_incr_mean_variance_axis():
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
data_chunks = [rng.random_integers(0, 1, size=n_features)
for i in range(n_samples)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = 0
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, X_lil, axis,
last_mean, last_var, last_n)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr) # X.shape[axis] picks # samples
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
# Test _incremantal_mean_and_var with whole data
X = np.vstack(data_chunks)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
# All data but as float
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-3,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=2,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-1,
last_mean=None, last_var=None, last_n=None)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
bchappet/dnfpy | src/experiments/plotMapsSequence.py | 1 | 7250 | import sys
import dnfpy.view.staticViewMatplotlib as view
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import os,glob
import re
import numpy as np
from dnfpy.core.utils import cosTraj
color = 'black'
showBar = True
showArrows = True
showCross = True
#if show arrow
radius = 0.3
center = 0.
period = 36
dt = 0.1
path = sys.argv[1] #name of the save folder
mapNames = eval(sys.argv[2]) #list of names "['a','b',...]"
#timeList = eval(sys.argv[3]) #list of time "[1,2]"
timeList = []
if len(sys.argv) > 3:
traceNames = eval(sys.argv[3]) #list of the traces "['ta','tb',...]" if no trace, only the time will be displayed
else:
traceNames = []
def getArray(name,time):
tStr = str(time)
timeStr = "_".join(tStr.split("."))
return np.loadtxt(path + name + "_" + timeStr + ".csv",delimiter=",")
def getTrackCenter(index,time,sizeArray):
phase = index/2.
radius_ = radius * sizeArray
center_ = center * sizeArray + (sizeArray-1)/2
print(cosTraj(26.4,50,36,30.3,0))
print(cosTraj(26.4,50,36,30.3,0.25))
x = cosTraj(time,center_,period,radius_,phase)
y = cosTraj(time,center_,period,radius_,phase+0.25)
return x,y
for filename in glob.glob(os.path.join(path, '*.csv')):
split1 = filename.split('_')
if len(split1) == 3:
mapOfFile = (split1[-3]).split('/')[-1]
if mapNames[0] == mapOfFile:
time = eval(".".join(re.compile("[_\.]").split(filename)[-3:-1]))
timeList.append(time)
timeList.sort()
print(mapNames,timeList)
#get size array
shape = getArray(mapNames[0],timeList[0]).shape
nbDim = len(shape)
sizeArray = shape[0]
#We prepare the grid
grid = gridspec.GridSpec(len(mapNames)+len(traceNames), len(timeList))
#we need to adjust the size of the grid to be sure that every map fit
size = 2
#fig = plt.figure(figsize=(size*len(timeList),size*(len(mapNames)+len(traceNames))))
fig = plt.figure(figsize = (9,size*(len(mapNames)+len(traceNames))))
main_ax = plt.gca()
for i in range(len(mapNames)):
for j in range(len(timeList)):
axes = plt.subplot(grid[i,j])
if j == 0: #plot name
plt.text(-0.15,0.5,mapNames[i],transform=axes.transAxes,va='center',ha='left',zorder=100,fontsize=12,rotation=90)
if mapNames[i] == "Inputs" and (showArrows or showCross):
assert(nbDim == 2) # adapt for dim = 1
time = timeList[j]
for indexStim in [0,1]:
#(x,y) = getTrackCenter(indexStim,time,sizeArray)
x = getArray("Inputs_track"+str(indexStim)+"_c0",time)
y = getArray("Inputs_track"+str(indexStim)+"_c1",time)
#(xt,yt) = getTrackCenter(indexStim,time+5,sizeArray)
#[xFig,yFig] = axis.transData.transform([x,y])
if j +1 < len(timeList) and showArrows:
xt = getArray("Inputs_track"+str(indexStim)+"_c0",timeList[j+1])
yt = getArray("Inputs_track"+str(indexStim)+"_c1",timeList[j+1])
print("arrow",x,y,xt,yt)
axes.annotate("",
xy=(xt, yt), xycoords='data',
xytext=(x, y), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=-0.3",
),
)
if showCross:
marker = axes.scatter([x], [y], marker='o',color=color)
plt.xticks([]), plt.yticks([])
array = getArray(mapNames[i],timeList[j])
if nbDim == 2:
img = view.plotArray(array,showBar=False)
elif nbDim == 1:
img = view.plot(array)
else:
raise Exception("nbDim > 2 not handeled")
if i == len(mapNames) -1:
plt.text(0.5,-0.1,timeList[j],va='center',ha='center',transform=axes.transAxes)
if showBar:
#plot colorbar
axisbg='w'
rect = [0.,0.,1,1]
box = axes.get_position()
width = box.width
height = box.height
inax_position = axes.transAxes.transform(rect[0:2])
transFigure = fig.transFigure.inverted()
infig_position = transFigure.transform(inax_position)
x = infig_position[0]+0.03
y = infig_position[1]
width *= rect[2]
height *= rect[3]
print(x,y,width,height)
subax = fig.add_axes([x,y,width,height],axisbg=axisbg)
egal = view.getEgal(array)
print('egal',egal)
a = np.array([[-egal,egal]])
img = view.plotArray(a,showBar=False,egal=egal)
bar = plt.colorbar(shrink=.9)
plt.gca().set_visible(False)
#axes.set_visible(False)
view.egaliseColorBar(egal,bar)
if len(traceNames) > 0:
lwTrace= 2
lineColor = 'black'
lineWidth = 2
traceName = traceNames[0]
trace = np.load(path + traceName + ".csv.npy")
x = np.linspace(0,timeList[-1],len(trace))
axis = plt.subplot(grid[-1,:])
maxLenTrace = np.max([len(t) for t in trace])
balancedTrace = np.ones((len(trace),maxLenTrace))*np.nan
for i in range(len(trace)):
for j in range(len(trace[i])):
balancedTrace[i,j] = trace[i][j]
for i in range(maxLenTrace):
plt.plot(x,balancedTrace[:,i],lw=1,color=color)
# Hide the right and top spines
axis.spines['right'].set_visible(False)
axis.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
axis.yaxis.set_ticks_position('left')
axis.xaxis.set_ticks_position('bottom')
plt.xlabel("Time (s)")
plt.ylabel("Error distance",multialignment='center')
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=10)
formatter=ticker.FormatStrFormatter("%1.2f")
ax.yaxis.set_major_formatter(formatter)
plt.xlim(0,timeList[-1])
plt.ylim(0,0.1)
ylim = ax.get_ylim()
plt.ylim(ylim)
#Add lines on trace
for it in zip(timeList):
plt.plot([it,it],ylim,color=lineColor,lw=lineWidth)
newax = fig.add_axes(main_ax.get_position(), frameon=False)
#plt.subplot2grid(grid,(gridY-2,0),colspan=gridX,rowspan=2)
size =100
plt.xlim([0,size])
plt.ylim([0,size])
rowSize = size/float(len(mapNames)+1)
yto = 0 + rowSize + rowSize/10.0 -rowSize/15.0 -3
yfrom =0 + rowSize - rowSize/10.0 -rowSize/50.0
#xfrom = np.linspace(0,size,len(trace),endpoint=False)
xfrom = [x / timeList[-1] * size for x in timeList]
nbIt = len(timeList)
xto = np.linspace(nbIt,size,nbIt,endpoint=True)
print("xfrom : " + str(xfrom))
xmargin = size /float( nbIt *5)
print("xmargin : " + str(xmargin))
image = (size - (nbIt-1)*xmargin)/float(nbIt)
print("image : " + str(image))
xto=np.arange(image/2,size,image+xmargin)
print("xto : " + str(xto))
print("xfrom lenght : " + str(len(xfrom)))
for i in range(len(timeList)):
plt.plot([xfrom[i],xto[i]],[yfrom,yto],color=lineColor,lw=lineWidth)
plt.xticks([])
plt.yticks([])
plt.show()
| gpl-2.0 |
Alexx-G/openface | util/tsne.py | 11 | 1261 | #!/usr/bin/env python2
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.style.use('bmh')
import argparse
print("""
Note: This example assumes that `name i` corresponds to `label i`
in `labels.csv`.
""")
parser = argparse.ArgumentParser()
parser.add_argument('workDir', type=str)
parser.add_argument('--names', type=str, nargs='+', required=True)
args = parser.parse_args()
y = pd.read_csv("{}/labels.csv".format(args.workDir)).as_matrix()[:, 0]
X = pd.read_csv("{}/reps.csv".format(args.workDir)).as_matrix()
target_names = np.array(args.names)
colors = cm.Dark2(np.linspace(0, 1, len(target_names)))
X_pca = PCA(n_components=50).fit_transform(X, X)
tsne = TSNE(n_components=2, init='random', random_state=0)
X_r = tsne.fit_transform(X_pca)
for c, i, target_name in zip(colors,
list(range(1, len(target_names) + 1)),
target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1],
c=c, label=target_name)
plt.legend()
out = "{}/tsne.pdf".format(args.workDir)
plt.savefig(out)
print("Saved to: {}".format(out))
| apache-2.0 |
rajul/mne-python | tutorials/plot_raw_objects.py | 15 | 5335 | """
.. _tut_raw_objects
The :class:`Raw <mne.io.RawFIF>` data structure: continuous data
================================================================
"""
from __future__ import print_function
import mne
import os.path as op
from matplotlib import pyplot as plt
###############################################################################
# Continuous data is stored in objects of type :class:`Raw <mne.io.RawFIF>`.
# The core data structure is simply a 2D numpy array (channels × samples,
# `._data`) combined with an :class:`Info <mne.io.meas_info.Info>` object
# (`.info`) (:ref:`tut_info_objects`.
#
# The most common way to load continuous data is from a .fif file. For more
# information on :ref:`loading data from other formats <ch_raw>`, or creating
# it :ref:`from scratch <tut_creating_data_structures>`.
###############################################################################
# Loading continuous data
# -----------------------
# Load an example dataset, the preload flag loads the data into memory now
data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
'sample', 'sample_audvis_raw.fif')
raw = mne.io.RawFIF(data_path, preload=True, verbose=False)
# Give the sample rate
print('sample rate:', raw.info['sfreq'], 'Hz')
# Give the size of the data matrix
print('channels x samples:', raw._data.shape)
###############################################################################
# Information about the channels contained in the :class:`Raw <mne.io.RawFIF>`
# object is contained in the :class:`Info <mne.io.meas_info.Info>` attribute.
# This is essentially a dictionary with a number of relevant fields (see
# :ref:`tut_info_objects`).
###############################################################################
# Indexing data
# -------------
#
# There are two ways to access the data stored within :class:`Raw
# <mne.io.RawFIF>` objects. One is by accessing the underlying data array, and
# the other is to index the :class:`Raw <mne.io.RawFIF>` object directly.
#
# To access the data array of :class:`Raw <mne.io.Raw>` objects, use the
# `_data` attribute. Note that this is only present if `preload==True`.
print('Shape of data array:', raw._data.shape)
array_data = raw._data[0, :1000]
_ = plt.plot(array_data)
###############################################################################
# You can also pass an index directly to the :class:`Raw <mne.io.RawFIF>`
# object. This will return an array of times, as well as the data representing
# those timepoints. This may be used even if the data is not preloaded:
# Extract data from the first 5 channels, from 1 s to 3 s.
sfreq = raw.info['sfreq']
data, times = raw[:5, int(sfreq * 1):int(sfreq * 3)]
_ = plt.plot(times, data.T)
_ = plt.title('Sample channels')
###############################################################################
# -----------------------------------------
# Selecting subsets of channels and samples
# -----------------------------------------
#
# It is possible to use more intelligent indexing to extract data, using
# channel names, types or time ranges.
# Pull all MEG gradiometer channels:
# Make sure to use copy==True or it will overwrite the data
meg_only = raw.pick_types(meg=True, copy=True)
eeg_only = raw.pick_types(meg=False, eeg=True, copy=True)
# The MEG flag in particular lets you specify a string for more specificity
grad_only = raw.pick_types(meg='grad', copy=True)
# Or you can use custom channel names
pick_chans = ['MEG 0112', 'MEG 0111', 'MEG 0122', 'MEG 0123']
specific_chans = raw.pick_channels(pick_chans, copy=True)
print(meg_only, eeg_only, grad_only, specific_chans, sep='\n')
###############################################################################
# Notice the different scalings of these types
f, (a1, a2) = plt.subplots(2, 1)
eeg, times = eeg_only[0, :int(sfreq * 2)]
meg, times = meg_only[0, :int(sfreq * 2)]
a1.plot(times, meg[0])
a2.plot(times, eeg[0])
###############################################################################
# You can restrict the data to a specific time range
restricted = raw.crop(5, 7) # in seconds
print('New time range from', restricted.times.min(), 's to',
restricted.times.max(), 's')
###############################################################################
# And drop channels by name
restricted = restricted.drop_channels(['MEG 0241', 'EEG 001'])
print('Number of channels reduced from', raw.info['nchan'], 'to',
restricted.info['nchan'])
###############################################################################
# --------------------------------------------------
# Concatenating :class:`Raw <mne.io.RawFIF>` objects
# --------------------------------------------------
#
# :class:`Raw <mne.io.RawFIF>` objects can be concatenated in time by using the
# :func:`append <mne.io.RawFIF.append>` function. For this to work, they must
# have the same number of channels and their :class:`Info
# <mne.io.meas_info.Info>` structures should be compatible.
# Create multiple :class:`Raw <mne.io.RawFIF>` objects
raw1 = raw.copy().crop(0, 10)
raw2 = raw.copy().crop(10, 20)
raw3 = raw.copy().crop(20, 100)
# Concatenate in time (also works without preloading)
raw1.append([raw2, raw3])
print('Time extends from', raw1.times.min(), 's to', raw1.times.max(), 's')
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/examples/api/legend_demo.py | 6 | 1083 | import numpy as np
import matplotlib.pyplot as plt
a = np.arange(0,3,.02)
b = np.arange(0,3,.02)
c = np.exp(a)
d = c[::-1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(a,c,'k--',a,d,'k:',a,c+d,'k')
leg = ax.legend(('Model length', 'Data length', 'Total message length'),
'upper center', shadow=True)
ax.set_ylim([-1,20])
ax.grid(False)
ax.set_xlabel('Model complexity --->')
ax.set_ylabel('Message length --->')
ax.set_title('Minimum Message Length')
ax.set_yticklabels([])
ax.set_xticklabels([])
# set some legend properties. All the code below is optional. The
# defaults are usually sensible but if you need more control, this
# shows you how
# the matplotlib.patches.Rectangle instance surrounding the legend
frame = leg.get_frame()
frame.set_facecolor('0.80') # set the frame face color to light gray
# matplotlib.text.Text instances
for t in leg.get_texts():
t.set_fontsize('small') # the legend text fontsize
# matplotlib.lines.Line2D instances
for l in leg.get_lines():
l.set_linewidth(1.5) # the legend line width
plt.show()
| mit |
xumiao/pymonk | monk/core/turtle.py | 1 | 17918 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 08 19:52:01 2013
The complex problem solver that manage a team of pandas.
@author: xm
"""
import base
import constants as cons
import crane
from relation import MatchingRelation
from monk.utils.utils import binary2decimal, translate, monitor_accuracy
from monk.math.cmath import sign0
#from itertools import izip
import logging
import nltk
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
logger = logging.getLogger("monk.turtle")
metricLog = logging.getLogger("metric")
stopwords_english = set(stopwords.words('english'))
symbols = {'\'', '\"', '[', ']','{','}','(',')','.','$', '#'}
class Turtle(base.MONKObject):
FPANDAS = 'pandas'
FTIGRESS = 'tigress'
FMAPPING = 'mapping'
FDESCRIPTION = 'description'
FPENALTY = 'pPenalty'
FEPS = 'pEPS'
FMAXPATHLENGTH = 'pMaxPathLength'
FMAXINFERENCESTEPS = 'pMaxInferenceSteps'
FPARTIALBARRIER = 'pPartialBarrier'
FMERGECLOCK = 'pMergeClock'
FTRAINCLOCK = 'pTrainClock'
FENTITYCOLLECTIONNAME = 'entityCollectionName'
FREQUIRES = 'requires'
FREQUIRES_UIDS = 'uids'
FREQUIRES_TURTLES = 'turtles'
FFOLLOWERS = 'followers'
FLEADER = 'leader'
store = crane.turtleStore
def __default__(self):
super(Turtle, self).__default__()
self.pandas = []
self.tigress = None
self.mapping = {}
self.description = cons.DEFAULT_EMPTY
self.pPenalty = 1.0
self.pEPS = 1e-6
self.pMaxPathLength = 1
self.pMaxInferenceSteps = 1000
self.entityCollectionName = cons.DEFAULT_EMPTY
self.requires = dict()
self.followers = []
self.leader = None
self.mergeQueue = set()
self.pPartialBarrier = 50
self.pMergeClock = 0
self.pTrainClock = 0
def __restore__(self):
super(Turtle, self).__restore__()
try:
[panda.setdefault(self.CREATOR, self.creator) for panda in self.pandas]
[panda.setdefault(self.NAME, self.name) for panda in self.pandas]
self.tigress.setdefault(self.CREATOR, self.creator)
self.tigress.setdefault(self.NAME, self.name)
except:
pass
self.pandas = crane.pandaStore.load_or_create_all(self.pandas)
self.tigress = crane.tigressStore.load_or_create(self.tigress)
self.pandaUids = set((p.uid for p in self.pandas))
self.invertedMapping = {tuple(v): k for k, v in self.mapping.iteritems()}
self.followers = set(self.followers)
if self.FREQUIRES_UIDS in self.requires:
uids = self.requires[self.FREQUIRES_UIDS]
if isinstance(uids, basestring):
uids = eval(uids)
[panda.add_features(uids) for panda in self.pandas]
elif self.FREQUIRES_TURTLES in self.requires:
turtles = self.store.load_or_create_all(
[{'name':t, 'creator':self.creator}
for t in self.requires[self.FREQUIRES_TURTLES]])
[panda.add_features(turtle.get_panda_uids()) for turtle in turtles for panda in self.pandas]
elif self.requires:
logger.error('dependent features are either in {0} or {1}, but not in {2}'.format(
self.FREQUIRES_UIDS,
self.FREQUIRES_TURTLES,
self.requires))
def generic(self):
result = super(Turtle, self).generic()
if self.tigress:
result[self.FTIGRESS] = self.tigress.signature()
result[self.FPANDAS] = [panda.signature() for panda in self.pandas]
result[self.FFOLLOWERS] = list(self.followers)
# invertedMapping is created from mapping
del result['invertedMapping']
del result['pandaUids']
del result['mergeQueue']
return result
def clone(self, userName):
obj = super(Turtle, self).clone(userName)
obj.pandaUids = set(self.pandaUids)
obj.tigress = self.tigress.clone(userName)
obj.pandas = [p.clone(userName) for p in self.pandas]
obj.requires = dict(self.requires)
return obj
def save(self):
super(Turtle, self).save()
if self.tigress:
self.tigress.save()
[pa.save() for pa in self.pandas]
def delete(self, deep=False):
result = super(Turtle, self).delete()
if self.tigress:
result = result and self.tigress.delete()
if deep:
result = result and [pa.delete() for pa in self.pandas]
return result
def add_follower(self, follower):
if follower not in self.followers:
self.followers.add(follower)
self.store.push_one_in_fields(self, {'followers':follower})
[pa.increment() for pa in self.pandas]
return True
else:
logger.info('user {} is already a follower of {}'.format(follower, self.creator))
return False
def add_leader(self, leader):
if self.leader != leader:
self.leader = leader
self.store.update_one_in_fields(self, {'leader': leader})
return True
else:
logger.info('user {} is already the leader of {}'.format(leader, self.creator))
return False
def remove_leader(self, leader):
if self.leader and leader == self.leader:
self.leader = None
self.store.update_one_in_fields(self, {'leader':None})
return True
else:
logger.info('user {} is not the leader of {}@{}'.format(leader, self.name, self.creator))
return False
def remove_follower(self, follower):
if follower in self.followers:
self.followers.remove(follower)
self.store.pull_one_in_fields(self, {'followers':follower})
[pa.decrease() for pa in self.pandas]
return True
else:
logger.info('user {} is not a follower of {}@{}'.format(follower, self.name, self.crcreator))
return False
def require_panda(self, panda):
if self.has_panda(panda):
logger.error('turtle can not depends on itself {0}'.format(panda._id))
return
[pa.add_features(panda.uid) for pa in self.pandas]
def require(self, turtleName):
if self.name == turtleName:
logger.error('turle can not depend on itself {0}'.format(turtleName))
return
turtle = self.store.load_or_create({'name':turtleName, 'creator':self.creator})
[panda.add_features(turtle.get_panda_uids()) for panda in self.pandas]
def get_panda_uids(self):
return [pa.uid for pa in self.pandas]
def has_panda(self, panda):
return panda.uid in self.pandaUids
def add_panda(self, panda):
if not self.has_panda(panda):
self.pandas.append(panda)
self.pandaUids.add(panda.uid)
crane.pandaStore.push_one_in_fields(self, {'pandas':panda._id})
return True
else:
logger.info('panda {0} is already in the turtle {1}'.format(panda.name, self.name))
return False
def delete_panda(self, panda):
if self.has_panda(panda):
self.pandas.remove(panda)
self.pandaUids.remove(panda.uid)
crane.pandaStore.pull_one_in_fields(self, {'pandas':panda._id})
return True
else:
logger.info('panda {0} is not in the turtle {1}'.format(panda.name, self.name))
return False
def predict(self, entity, fields=None):
scores = [panda.predict(entity) for panda in self.pandas]
predicted = self.invertedMapping[tuple(map(sign0, scores))]
self.tigress.measure(entity, predicted)
return predicted
def test_data(self, entity):
test = [panda.predict(entity) for panda in self.pandas]
logger.info('turtle {0} value is {1}'.format(self.creator, test[0]))
return test
def add_data(self, entity):
return self.tigress.supervise(self, entity)
def active_train(self):
try:
self.tigress.supervise(self)
except Exception as e:
logger.info(e.message)
logger.info("turtle {0} does not have active superviser".format(self.name))
def train(self):
[panda.train(self.leader) for panda in self.pandas]
self.pTrainClock += 1
self.update_fields({self.FTRAINCLOCK:self.pTrainClock})
logger.debug('training clock {0}'.format(self.pTrainClock))
def checkout(self):
[panda.checkout(self.leader) for panda in self.pandas]
def commit(self):
[panda.commit() for panda in self.pandas]
def merge(self, follower):
if follower not in self.followers and follower != self.creator:
logger.error('user {0} is not a follower of {1}@{2}'.format(follower, self.creator, self.name))
return False
self.mergeQueue.add(follower)
if len(self.mergeQueue) >= min(len(self.followers), self.pPartialBarrier):
for follower in self.mergeQueue:
[panda.merge(follower) for panda in self.pandas]
self.mergeQueue.clear()
[panda.update_fields({panda.FCONSENSUS:panda.z.generic()}) for panda in self.pandas]
self.pMergeClock += 1
self.update_fields({self.FMERGECLOCK:self.pMergeClock})
logger.debug('merge clock {0}'.format(self.pMergeClock))
return True
return False
def reset(self):
[panda.reset() for panda in self.pandas]
self.pTrainClock = 0
self.pMergeClock = 0
self.update_fields({self.FTRAINCLOCK:self.pTrainClock, self.FMERGECLOCK:self.pMergeClock})
def reset_data(self):
[panda.reset_data() for panda in self.pandas]
def set_mantis_parameter(self, para, value):
[panda.set_mantis_parameter(para, value) for panda in self.pandas]
class SingleTurtle(Turtle):
def predict(self, entity, fields=None):
panda = self.pandas[0]
score = panda.predict(entity)
if sign0(score) > 0:
self.tigress.measure(entity, panda.name)
monitor_accuracy(panda.name, self.creator, score, 'True')
return panda.name
else:
self.tigress.measure(entity, cons.DEFAULT_NONE)
monitor_accuracy(panda.name, self.creator, score, 'False')
return cons.DEFAULT_NONE
class MultiLabelTurtle(Turtle):
def predict(self, entity, fields=None):
predicted = [panda.name for panda in self.pandas if panda.predict(entity) > 0]
self.tigress.measure(entity, predicted)
return predicted
class RankingTurtle(Turtle):
FTARGET_CONNECTION_STRING = 'targetConnectionString'
FTARGET_DATABASE_NAME = 'targetDatabaseName'
FTARGET_COLLECTION_NAME = 'targetCollectionName'
FTARGET_NUM_LEVELS = 'numLevels'
FBEAM_SIZE = 'beamSize'
FWINDOW_SIZE = 'windowSize'
FQUERY = 'queryFunc'
FTARGET_STORE = 'targetStore'
RAW_TARGETID = '_targetId'
RAW_RELEVANCE = '_relevance'
def __default__(self):
super(RankingTurtle, self).__default__()
self.targetConnectionString = cons.DEFAULT_EMPTY
self.targetDatabaseName = cons.DEFAULT_EMPTY
self.targetCollectionName = cons.DEFAULT_EMPTY
self.targetStore = crane.Crane()
self.numLevels = 1
self.beamSize = 10
self.windowSize = 2 * self.beamSize
self.queryFunc = cons.DEFAULT_FUNC
def __restore__(self):
super(RankingTurtle, self).__restore__()
self.targetStore = crane.Crane(self.targetConnectionString,
self.targetDatabaseName,
self.targetCollectionName)
def generic(self):
result = super(RankingTurtle, self).generic()
del result[self.FTARGET_STORE]
return result
def clone(self, userName):
obj = super(RankingTurtle, self).clone(userName)
return obj
def predict(self, entity, fields=None):
query = eval(self.queryFunc)(entity)
targetIds = self.targetStore.load_all_in_ids(query, 0, self.windowSize)
targets = self.targetStore.load_all_by_ids(targetIds)
relation = MatchingRelation()
relation.set_argument(0, entity)
results = []
for target in targets:
relation.set_argument(1, target)
relation.compute()
rank = self.invertedMapping[[sign0(panda.predict(relation)) for panda in self.pandas]]
results.append((rank, target))
results.sort(reverse=True)
return results[:self.beamSize]
def add_data(self, entity):
targetId = entity.get_raw(self.RAW_TARGETID, None)
relevance = entity.get_raw(self.RAW_RELEVANCE, 0)
if targetId:
target = self.targetStore.load_one_by_id(targetId)
relation = MatchingRelation()
relation.set_argument(0, entity)
relation.set_argument(1, target)
relation.compute()
relation.set_raw(self.RAW_RELEVANCE, relevance)
self.tigress.supervise(self, relation)
class SPNTurtle(Turtle):
pass
class DictionaryTurtle(Turtle):
def __default__(self):
super(DictionaryTurtle, self).__default__()
self.dictionary = dict()
def __restore__(self):
super(DictionaryTurtle, self).__restore__()
self.dictionary = {p.name:p.uid for p in self.pandas}
def generic(self):
result = super(DictionaryTurtle, self).generic()
try:
del result['tigress']
del result['dictionary']
except:
pass
return result
def clone(self, userName):
obj = super(DictionaryTurtle, self).clone(userName)
obj.dictionary = dict(self.dictionary)
return obj
def _process(self, field):
return {}
def _get_or_new_panda(self, name):
if name not in self.dictionary:
panda = {self.MONK_TYPE: 'ExistPanda',
self.NAME: name,
self.CREATOR: self.creator}
panda = crane.pandaStore.load_or_create(panda, tosave=True)
self.add_panda(panda)
self.dictionary[name] = panda.uid
uid = panda.uid
else:
uid = self.dictionary[name]
return (uid, 1.0)
def is_stop(self, w):
if w in stopwords_english:
return True
else:
return False
def is_symbol(self, w):
if w in symbols:
return True
elif w.find('.') >= 0:
return True
else:
return False
def is_single(self, w):
if len(w) <= 1:
return True
else:
return False
def predict(self, entity, fields):
total = 0
for field in fields:
value = translate(getattr(entity, field, ''))
if not value:
continue
allTokens = self._process(value)
entity._raws.update(allTokens)
entity._features.update([self._get_or_new_panda(t) for t in allTokens])
total += len(allTokens)
return total
def merge(self):
# TODO should be taking care of dedups in models and data
pass
class UniGramTurtle(DictionaryTurtle):
def _process(self, field):
allTokens = {}
sents = nltk.tokenize.sent_tokenize(field)
for sent in sents:
tokens = nltk.tokenize.word_tokenize(sent.lower())
allTokens.update(((t,1) for t in tokens if not self.is_stop(t) and
not self.is_symbol(t) and
not self.is_single(t)))
return allTokens
class POSTurtle(DictionaryTurtle):
def _process(self, field):
allTokens = {}
sents = nltk.tokenize.sent_tokenize(field)
for sent in sents:
tokens = nltk.tokenize.word_tokenize(sent.lower())
tagged = nltk.pos_tag([t for t in tokens if not self.is_stop(t) and
not self.is_symbol(t) and
not self.is_single(t)])
allTokens.update((('_'.join(t),1) for t in tagged))
return allTokens
class StemTurtle(DictionaryTurtle):
def _process(self, field):
allTokens = {}
sents = nltk.tokenize.sent_tokenize(field)
logger.debug(sents)
port = PorterStemmer()
for sent in sents:
tokens = nltk.tokenize.word_tokenize(sent.lower())
stems = [port.stem(t) for t in tokens if not self.is_stop(t) and not self.is_symbol(t)]
allTokens.update(((t,1) for t in stems))
logger.debug(' '.join(allTokens))
return allTokens
base.register(Turtle)
base.register(SingleTurtle)
base.register(MultiLabelTurtle)
base.register(RankingTurtle)
base.register(DictionaryTurtle)
base.register(SPNTurtle)
base.register(UniGramTurtle)
base.register(POSTurtle)
base.register(StemTurtle)
| mit |
chiffa/numpy | numpy/core/function_base.py | 23 | 6891 | from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
delta = stop - start
if num > 1:
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y = y * delta
else:
# One might be tempted to use faster, in-place multiplication here,
# but this prevents step from overriding what class is produced,
# and thus prevents, e.g., use of Quantities; see gh-7142.
y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
| bsd-3-clause |
legacysurvey/pipeline | py/legacyanalysis/nexp-map.py | 2 | 5740 | from __future__ import print_function
import os
import fitsio
import numpy as np
from glob import glob
from collections import Counter
import matplotlib
#matplotlib.use('Agg')
#matplotlib.rc('text', usetex=True)
#matplotlib.rc('font', family='serif')
import pylab as plt
from astrometry.util.fits import fits_table
from astrometry.libkd.spherematch import match_radec
from astrometry.util.miscutils import clip_polygon
from astrometry.util.multiproc import multiproc
from tractor import NanoMaggies
from legacypipe.survey import LegacySurveyData, wcs_for_brick
def one_brick(X):
(ibrick, brick) = X
bands = ['g','r','z']
print('Brick', brick.brickname)
wcs = wcs_for_brick(brick, W=94, H=94, pixscale=10.)
BH,BW = wcs.shape
targetrd = np.array([wcs.pixelxy2radec(x,y) for x,y in
[(1,1),(BW,1),(BW,BH),(1,BH),(1,1)]])
survey = LegacySurveyData()
C = survey.ccds_touching_wcs(wcs)
if C is None:
print('No CCDs touching brick')
return None
I = np.flatnonzero(C.ccd_cuts == 0)
if len(I) == 0:
print('No good CCDs touching brick')
return None
C.cut(I)
print(len(C), 'CCDs touching brick')
depths = {}
for band in bands:
d = np.zeros((BH,BW), np.float32)
depths[band] = d
npix = dict([(band,0) for band in bands])
nexps = dict([(band,0) for band in bands])
# survey.get_approx_wcs(ccd)
for ccd in C:
#im = survey.get_image_object(ccd)
awcs = survey.get_approx_wcs(ccd)
imh,imw = ccd.height,ccd.width
x0,y0 = 0,0
x1 = x0 + imw
y1 = y0 + imh
imgpoly = [(1,1),(1,imh),(imw,imh),(imw,1)]
ok,tx,ty = awcs.radec2pixelxy(targetrd[:-1,0], targetrd[:-1,1])
tpoly = list(zip(tx,ty))
clip = clip_polygon(imgpoly, tpoly)
clip = np.array(clip)
if len(clip) == 0:
continue
x0,y0 = np.floor(clip.min(axis=0)).astype(int)
x1,y1 = np.ceil (clip.max(axis=0)).astype(int)
#slc = slice(y0,y1+1), slice(x0,x1+1)
awcs = awcs.get_subimage(x0, y0, x1-x0, y1-y0)
ah,aw = awcs.shape
#print('Image', ccd.expnum, ccd.ccdname, ccd.filter, 'overlap', x0,x1, y0,y1, '->', (1+x1-x0),'x',(1+y1-y0))
# Find bbox in brick space
r,d = awcs.pixelxy2radec([1,1,aw,aw], [1,ah,ah,1])
ok,bx,by = wcs.radec2pixelxy(r,d)
bx0 = np.clip(np.round(bx.min()).astype(int) -1, 0, BW-1)
bx1 = np.clip(np.round(bx.max()).astype(int) -1, 0, BW-1)
by0 = np.clip(np.round(by.min()).astype(int) -1, 0, BH-1)
by1 = np.clip(np.round(by.max()).astype(int) -1, 0, BH-1)
#print('Brick', bx0,bx1,by0,by1)
band = ccd.filter[0]
assert(band in bands)
ccdzpt = ccd.ccdzpt + 2.5 * np.log10(ccd.exptime)
psf_sigma = ccd.fwhm / 2.35
psfnorm = 1./(2. * np.sqrt(np.pi) * psf_sigma)
orig_zpscale = zpscale = NanoMaggies.zeropointToScale(ccdzpt)
sig1 = ccd.sig1 / orig_zpscale
detsig1 = sig1 / psfnorm
# print('Image', ccd.expnum, ccd.ccdname, ccd.filter,
# 'PSF depth', -2.5 * (np.log10(5.*detsig1) - 9), 'exptime', ccd.exptime,
# 'sig1', ccd.sig1, 'zpt', ccd.ccdzpt, 'fwhm', ccd.fwhm,
# 'filename', ccd.image_filename.strip())
depths[band][by0:by1+1, bx0:bx1+1] += (1. / detsig1**2)
npix[band] += (y1+1-y0)*(x1+1-x0)
nexps[band] += 1
for band in bands:
det = np.median(depths[band])
# compute stats for 5-sigma detection
with np.errstate(divide='ignore'):
depth = 5. / np.sqrt(det)
# that's flux in nanomaggies -- convert to mag
depth = -2.5 * (np.log10(depth) - 9)
if not np.isfinite(depth):
depth = 0.
depths[band] = depth
#bricks.get('psfdepth_' + band)[ibrick] = depth
print(brick.brickname, 'median PSF depth', band, ':', depth,
'npix', npix[band],
'nexp', nexps[band])
#'npix', bricks.get('npix_'+band)[ibrick],
#'nexp', bricks.get('nexp_'+band)[ibrick])
return (npix, nexps, depths)
def main():
survey = LegacySurveyData()
ccds = survey.get_ccds_readonly()
print(len(ccds), 'CCDs')
ccds = ccds[ccds.ccd_cuts == 0]
print(len(ccds), 'good CCDs')
# Find bricks touched by >=1 CCD
bricks = survey.get_bricks_readonly()
bricks = bricks[(bricks.dec > -20) * (bricks.dec < 35.)]
print(len(bricks), 'bricks in Dec range')
I,J,d = match_radec(bricks.ra, bricks.dec, ccds.ra, ccds.dec, 0.5, nearest=True)
bricks = bricks[I]
print(len(bricks), 'bricks')
bands = ['g','r','z']
nexps = {}
for b in bands:
ne = np.zeros(len(bricks), np.int16)
nexps[b] = ne
bricks.set('nexp_'+b, ne)
npix = {}
for b in bands:
n = np.zeros(len(bricks), np.int64)
npix[b] = n
bricks.set('npix_'+b, n)
for b in bands:
n = np.zeros(len(bricks), np.float32)
bricks.set('psfdepth_'+b, n)
args = enumerate(bricks)
mp = multiproc(8)
R = mp.map(one_brick, args)
for ibrick,res in enumerate(R):
if res is None:
continue
(npix, nexps, depths) = res
for band in bands:
bricks.get('npix_' + band)[ibrick] = npix[band]
bricks.get('nexp_' + band)[ibrick] = nexps[band]
bricks.get('psfdepth_' + band)[ibrick] = depths[band]
bricks.cut((bricks.nexp_g + bricks.nexp_r + bricks.nexp_z) > 0)
bricks.writeto('/global/cscratch1/sd/dstn/bricks-nexp.fits')
if __name__ == '__main__':
main()
| gpl-2.0 |
Windy-Ground/scikit-learn | sklearn/tests/test_multiclass.py | 136 | 23649 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
parthlathiya/Stock-Market-Price-Prediction | SVC/Preprocessing_of_data.py | 2 | 4530 | import numpy as np
import pandas as pd
from numpy import genfromtxt
Days=10
def minn(X):
min=X[0];
for i in range(1,len(X)):
if min>X[i]:
min=X[i]
return min
def maxx(X):
max=X[0];
for i in range(1,len(X)):
if max<X[i]:
max=X[i]
return max
X = genfromtxt('nik225.csv', delimiter=',',usecols=range(1,5),skip_header=True)
y = X[:,3]
SMAA=np.zeros((len(X)-(Days-1),1))
for i in range((Days-1),len(X)):
SMAA[i-(Days-1)]=np.average(X[i-(Days-1):i+1,3])
#print(SMAA) #1
WMA=np.zeros((len(X)-(Days-1),1))
for i in range((Days-1),len(X)):
sum1=0.0
for j in range(i-(Days-1),i+1):
sum1+=(Days-i+j)*X[j][3]
sum1/=(Days*(Days+1))/2
WMA[i-(Days-1)]=sum1
#print(WMA) #2
Momentum=np.zeros((len(X)-(Days-1),1))
for i in range((Days-1),len(X)):
Momentum[i-(Days-1)]=X[i][3]-X[i-(Days-1)][3]
#print(Momentum) #3
StochasticK=np.zeros((len(X)-(Days-1),1))
for i in range((Days-1),len(X)):
StochasticK[i-(Days-1)]=(X[i][3]-minn(X[i-(Days-1):i+1,2]))*100.0/(maxx(X[i-(Days-1):i+1,1])-minn(X[i-(Days-1):i+1,2]))
#print(StochasticK) #4
StochasticD=np.zeros((len(StochasticK)-(Days-1),1))
for i in range((Days-1),len(StochasticK)):
sum1=0.0
for j in range(i-(Days-1),i+1):
sum1+=StochasticK[j]
sum1/=Days
StochasticD[i-(Days-1)]=sum1
#print(StochasticD) #5
UP=np.zeros((len(X)-1,1))
DW=np.zeros((len(X)-1,1))
for i in range(1,len(X)):
temp=X[i][3]-X[i-1][3]
if temp<0:
UP[i-1]=0
DW[i-1]=-temp
else:
UP[i-1]=temp
DW[i-1]=0
#print(UP)
#print(DW)
RSI=np.zeros((len(UP)-(Days-1),1))
for i in range((Days-1),len(UP)):
RSI[i-(Days-1)]=100-(100/(1+(np.average(UP[i-(Days-1):i+1])/np.average(DW[i-(Days-1):i+1]))))
#print(RSI) #6
EMA12=np.zeros((len(X),1))
EMA12[0]=X[0][3]
for i in range(1,len(EMA12)):
EMA12[i]=EMA12[i-1]+( (2/(1+12)) * (X[i][3]-EMA12[i-1]) )
EMA26=np.zeros((len(X),1))
EMA26[0]=X[0][3]
for i in range(1,len(EMA26)):
EMA26[i]=EMA26[i-1]+( (2/(1+26)) * (X[i][3]-EMA26[i-1]) )
DIFF=EMA12-EMA26
MACD=np.zeros((len(X),1))
MACD[0]=DIFF[0]
for i in range(1,len(EMA26)):
MACD[i]=MACD[i-1]+( (2/(len(MACD)+1)) * (DIFF[i]-MACD[i-1]) )
#print(MACD) #7
LWR=np.zeros((len(X),1))
for i in range(len(X)):
LWR[i]=(X[i][1]-X[i][3])/(X[i][1]-X[i][2]) if (X[i][1]-X[i][2])!=0 else 0.00000000000001
#print(LWR) #8
ADO=np.zeros((len(X)-1,1))
for i in range(1,len(X)):
ADO[i-1]=(X[i][1]-X[i-1][3])/(X[i][1]-X[i][2]) if (X[i][1]-X[i][2])!=0 else 0.00000000000001
#print(ADO) #9
M=np.zeros((len(X),1))
for i in range(len(X)):
M[i]=(X[i][1]+X[i][2]+X[i][3])/3.0
#print(M)
SM=np.zeros((len(M)-(Days-1),1))
for i in range((Days-1),len(M)):
SM[i-(Days-1)]=np.average(M[i-(Days-1):i+1])
#print(SM)
D=np.zeros((len(M)-(Days-1),1))
for i in range((Days-1),len(M)):
D[i-(Days-1)]=np.average(np.abs(M[i-(Days-1):i+1]-SM[i-(Days-1)]))
#print(D)
CCI=np.zeros((len(SM),1))
for i in range(len(SM)):
CCI[i-(Days-1)]=(M[i+(Days-1)]-SM[i])/(0.015*D[i])
#print(CCI) #10
result = np.zeros((len(StochasticD),1))
SMAA = SMAA[Days-1:,:]
result = SMAA
WMA = WMA[Days-1:,:]
result = np.append(result,WMA,axis=1)
Momentum = Momentum[Days-1:,:]
result = np.append(result,Momentum,axis=1)
StochasticK = StochasticK[Days-1:,:]
result = np.append(result,StochasticK,axis=1)
result = np.append(result,StochasticD,axis=1)
RSI = RSI[Days-2:,:] #one extra cut because of Up and down
result = np.append(result,RSI,axis=1)
MACD = MACD[2*(Days-1):,:] #one extra cut because of Up and down
result = np.append(result,MACD,axis=1)
LWR = LWR[2*(Days-1):,:]
result = np.append(result,LWR,axis=1)
ADO = ADO[2*Days-3:,:]
result = np.append(result,ADO,axis=1)
CCI =CCI[Days-1:,:]
result = np.append(result,ADO,axis=1)
y = y.reshape((len(X),1))
y = y[2*(Days-1):,:]
result = np.append(result,y,axis=1)
new_data = np.zeros((len(result)-1,12))
for i in range (0,len(result)-1) :
for j in range(0,len(result[0])) :
if j == 10 :
if result[i][j] > result[i+1][j] :
new_data[i][j] = 1
new_data[i][j+1] = 0
else :
new_data[i][j] = 0
new_data[i][j+1] = 1
else :
if result[i][j] > result[i+1][j] :
new_data[i][j] = 0
else :
new_data[i][j] = 1
new_datadf = pd.DataFrame(new_data)
new_datadf.to_csv("train.csv",header=True)
| gpl-3.0 |
AndersNymarkChristensen/BlobDetectionForBloodFlow | blobFunct.py | 1 | 14704 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 12:53:28 2015
@author: Anders Nymark Christensen
based on Matlab code by Anders Nymark Christensen & William Sebastian Henning Benzon 2014
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
from skimage import morphology
from skimage.measure import regionprops
from scipy import misc
def BlobFunct(im, plotopt):
"""
The BlobFunct is the main function in the blobdetection feature created
at Rigshospitalet during Julie Bjerglund Andersen's project regarding
bloodflow estimation in piglets using red, green and blue microspheres.
In this script several parameters used for microsphere detection can be
tweaked and modified to various situations.
Some of the adjustable parameters:
minBlobSize - Minimum Blobsize
maxBlobSize - Maximum Blobsize
edgeSigma - Used in edge detections
shapeThreshold - Roundness measure
channelThreshold - Minimum RGB values
INPUT:
im = RGB image (uint)
plotopt = [0,1] if visuals during the script is needed, set plotopt = 1
otherwise, plotopt = 0.
OUTPUT:
amount - total amount of microspheres detected [Red Green Blue]
mask - Binary tissue mask
arealsize = size of tissue (pixels)
red - Coordinates for all red detected microspheres ([X Y])
green - Coordinates for all green detected microspheres ([X Y])
blue - Coordinates for all blue detected microspheres ([X Y])
SUBFUNCTIONS:
For this function to work, following functions are needed:
areal_est
RGBcheck
Image Processing Toolbox(By MathWorks)
NOTE:
This function calls RGBcheck which also affects the outcome very
drastically. So if adjusting parameters in this script doesnt affect the
outcome. Please change settings in RGBcheck.m
"""
# Settings
nLoops = 1
minBlobSize = 35.0
maxBlobSize = 80.0
edgeSigma = 3.0 # Used in the combined edge and blobdetection
shapeThreshold = 2.0 # The maximun deviation from circular defined by abs(perimeter/diameter - pi) < shapeThreshold
channelThreshold = [0, 0, 0] #[50 90 60];
im = np.array(im)
IM = np.copy(im)
IM = IM.astype(np.float64, copy=False)
mask, arealsize = areal_est(IM)
for index in range(0,3):
if index == 1:
maxBlobSize = 150
else:
maxBlobSize = 80
# Find edges
BW = edgeLoG(IM[:,:,index],[],edgeSigma)
# The blobs are closed contours. We find them by filling the holes and
# subtracting the original edges, leaving only closed contours in the
# image.
BW2 = scipy.ndimage.morphology.binary_fill_holes(BW) - BW
# We remove areas smaller and larger than what we have specified
IMsmall = morphology.remove_small_objects(BW2, min_size=minBlobSize, connectivity=8)
IMbig = morphology.remove_small_objects(BW2, min_size=maxBlobSize, connectivity=8)
IMblobs = (IMsmall - IMbig) > 0
IMtemp, num = scipy.ndimage.label(IMblobs)
# Label the remaining areas, if below the channel threshold they are
# removed
for nBlob in range(0,num):
if np.max(IM[IMtemp==nBlob]) < channelThreshold[nLoops]:
IMtemp[IMtemp==nBlob] = 0
IMtemp2, num = scipy.ndimage.label(IMtemp)
# Find centroid for plotting
props = regionprops(IMtemp2) #'Centroid','Area','Perimeter','MinorAxisLength');
allProps = np.zeros( (num, 2) )
n = 0
for nProps in range(0,num):
allProps[n,:] = np.asarray(props[nProps].centroid)
n += 1
if index == 0:
newProps = np.zeros( (num, 2) )
# remove points that are not circular
n = 0
for nProps in range(0,num):
if abs(props[nProps].perimeter/props[nProps].minor_axis_length - np.pi) < shapeThreshold:
newProps[n,:] = np.asarray(props[nProps].centroid)
n += 1
newProps = newProps[~np.all(newProps == 0,axis = 1), :]
allProps = newProps
xy1 = np.round(allProps)
deletionList = []
for i in range(0,np.size(xy1,0)):
if mask[xy1[i,0],xy1[i,1]]==0:
deletionList.append(i)
xy1 = np.delete(xy1,deletionList,0)
## Blob-check
if index == 0:
red = np.copy(xy1)
red = RGBcheck(im,red,'red')
elif index ==1:
green = np.copy(xy1)
green = RGBcheck(im,green,'green')
elif index ==2:
blue = np.copy(xy1)
blue = RGBcheck(im,blue,'blue')
nLoops = nLoops +1
amount = [len(red), len(green), len(blue)]
if plotopt==1:
plt.figure
plt.imshow(im)
plt.plot(red[:,1],red[:,0],'rx')
plt.plot(green[:,1],green[:,0],'gx')
plt.plot(blue[:,1],blue[:,0],'bx')
plt.show()
print('Plotting...')
return(amount,mask,arealsize,red,green,blue)
def areal_est(im):
"""
Uses image to create binary image-mask of tissue corresponding to value 1
and non-tissue = value zero.
INPUT: im = RGB-image
OUTPUT: BW = Binary mask image
arealsize = amount of pixels in BW = 1
"""
from skimage.exposure import rescale_intensity
from skimage.filter import threshold_otsu
from skimage.morphology import remove_small_objects
from skimage.morphology import disk
from skimage.morphology import binary_closing
minSlideSize = 200000
imScaled = rescale_intensity(im[:,:,1], in_range=(np.mean(im[:,:,1])-9*np.std(im[:,:,1]), np.mean(im[:,:,1])+9*np.std(im[:,:,1])))
# Calculating optimal binary threshold level
level = threshold_otsu(imScaled)
# Initial binary mask
mask = imScaled > level
# removing smal segments
mask = remove_small_objects(mask, min_size=minSlideSize, connectivity=8)
se = disk(10)
mask = binary_closing(mask,se)
# Isolating largest binary piece of tissue
L, num = scipy.ndimage.label(mask);
props = regionprops(L)
largest = 0
for index in range(0,num):
if props[index].area > largest:
largest = props[index].area
largestIndex = index
# Isolating primary label for binary image
BW = (L == largestIndex)
# Securing tissue-value = 1
if BW[0,0] == True:
BW = ~BW
arealsize = largest;
return(BW, arealsize)
def RGBcheck(im, blobs, color):
"""
RGBcheck
RGBcheck tests wheter each blob fulfill color-composition requirements in
RGB-components. Blobs must be rounded coordinates!
INPUT:
im: Must be RGB image
blobs: 2-dimensional coordinates with index not exceeding image size
and rounded to nearest whole-number
color: Must be a string, either "red", "green" or "blue".
OUTPUT:
blobs: Same vector as "blobs", but blobs which did not meet
requirements have been removed.
Red
If the center blob-pixel has higer intensity in green or blue, the red
blob is deleted. OR if the pixel intensity is below Rthresh1(80). To
ensure homogenity in blob. Standard deviation must not be above Rthresh2
(10) in 3by3 matrix with centroid as center pixel.
Green
The blob is deleted if center pixel has higer intensity in red, if the
green intensity timed Gthresh1 (1.25) is lower than blue intensity and if
green intensity timed Gthresh2 (0.75) is higher than red intensity
Further more blob is discarded if mean green blob intensity in 3by3
matrix is less than Gthresh3 (85).
Blue
If the center pixel intensity is higher in red og green channel, blob is
discarded. If the center pixel does not have minimum Bthresh[2] times mean
blue intensity in image, blob is discarded. IF the sum of red and green
intensity is higher than blue intensity in center-pixel, blob is
discarded. Last if 3by3 matrix with center equal to blob-center does not
have a minimum blue intensity of Bthresh2 (130). Blob is discarded.
"""
# Threshold values:
Rthresh1 = 0;
Rthresh2 = 19;
Gthresh1 = 1.15; # 1.25
Gthresh2 = 0.55;
Gthresh3 = 80;
Bthresh1 = 1;
Bthresh2 = 110; # 125
a = np.size(blobs,0)
deletionarray = []
# RED
# Remove if green is more intense
if color == 'red':
for i in range(0,a):
RGBs = im[blobs[i,0],blobs[i,1],:]
if RGBs[1] > RGBs[0]:
deletionarray.append(i)
elif RGBs[2] > RGBs[0]:
deletionarray.append(i)
elif RGBs[0] < Rthresh1:
deletionarray.append(i)
blobs = np.delete(blobs,deletionarray,0)
deletionarray = []
a = np.size(blobs,0)
for i in range(0,a):
if blobs.any:
areal = sections_area(blobs[i,:],im,1)
if np.mean(areal[:,:,0]) < Rthresh1:
deletionarray.append(i)
blobs = np.delete(blobs,deletionarray,0)
deletionarray = []
a = np.size(blobs,0)
for i in range(0,a):
if blobs.any:
areal = sections_area(blobs[i,:],im,1)
RedVect = areal[:,:,0]
if np.std( RedVect.astype(np.float64, copy=False) ) > Rthresh2:
deletionarray.append(i)
blobs = np.delete(blobs,deletionarray,0)
# GREEN
# Remove if red is more intense OR if blue is more than 1.25x intense
# Remove green if average green value in blob matrix (9x9) is less than 100
elif color == 'green':
for i in range(0,a):
RGBs = im[blobs[i,0],blobs[i,1],:]
if RGBs[0] > RGBs[1] or RGBs[2] > Gthresh1*RGBs[1] or RGBs[0] < Gthresh2*RGBs[1]:
deletionarray.append(i)
a = np.size(blobs,0)
for i in range(0,a):
if blobs.any:
areal = sections_area(blobs[i,:],im,1)
if np.mean(areal[:,:,1]) < Gthresh3:
deletionarray.append(i)
blobs = np.delete(blobs,deletionarray,0)
# BLUE
# If red is more intense OR green is more intense
# If blue is less than 2 times the average blue
# If blue is less than the sum of green and red
elif color == 'blue':
for i in range(0,a):
deleted = 0;
RGBs = im[blobs[i,0],blobs[i,1],:]
if RGBs[1] > RGBs[2] or RGBs[0] > RGBs[2]:
deletionarray.append(i)
deleted = 1
if deleted != 1:
if RGBs[2] < Bthresh1*np.mean(im[:,:,2]):
deletionarray.append(i)
deleted = 1
if deleted != 1:
if RGBs[2] < sum(RGBs[0:1]):
deletionarray.append(i)
deleted = 1
a = np.size(blobs,0)
for i in range(0,a):
if blobs.any:
areal = sections_area(blobs[i,:],im,1)
if np.mean(areal[:,:,2]) < Bthresh2:
deletionarray.append(i)
blobs = np.delete(blobs,deletionarray,0)
return(blobs)
def sections_area(blobs,im,sidelength):
"""
sections
Used for extracting features for a specific area given a size,
coordinates and image. output-features are mean values and standard
deviation.
INPUT:
blobs: must be an x times 2 matrix with koordinates
image: must be a A x B x 3, uint8 image
sidelength: adviced to be somewhere between 1 and 20
Function is taking image-border coordinates into account and crops the
sections evaluated to fit image dimension without resulting in error.
By William Sebastian Henning Benzon 2014
"""
rB = np.round(blobs)
g = np.copy(im)
# a = np.size(rB,0)
x = np.size(g,0)
y = np.size(g,1)
# for i in range(0,1):
if rB[1]-sidelength < 1:
if rB[0]-sidelength < 1:
section = g[1:rB[1]+sidelength,1:rB[0]+sidelength,:]
elif rB[0]+sidelength > y:
section = g[1:rB[1]+sidelength,rB[0]-sidelength:y,:]
else:
section = g[1:rB[1]+sidelength,rB[0]-sidelength:rB[0]+sidelength,:]
elif rB[1]+sidelength > x:
if rB[0]-sidelength < 1:
section = g[rB[1]-sidelength:x,1:rB[0]+sidelength,:]
elif rB[0]+sidelength > y:
section = g[rB[1]-sidelength:x,rB[0]-sidelength:y,:]
else:
section = g[rB[1]-sidelength:x,rB[0]-sidelength:rB[0]+sidelength,:]
elif rB[0]-sidelength < 1:
section = g[rB[1]-sidelength:rB[1]+sidelength,1:rB[0]+sidelength,:]
elif rB[0]+sidelength > y:
section = g[rB[1]-sidelength:rB[1]+sidelength,rB[0]-sidelength:y,:]
else:
# Not border coordinates
section = g[rB[1]-sidelength:rB[1]+sidelength,rB[0]-sidelength:rB[0]+sidelength,:]
return(section)
def edgeLoG(im, thres, edgeSigma):
"""
Detect edges using Laplacian of Gassian
INPUT:
im: single channel image, type=float32/64
thres: threshold for edges
edgeSigma: Kernel used in Laplacian of Gaussian
OUTPUT:
fc: Binary image with edges
Anders Nymark Christensen, 2015
"""
import scipy.ndimage as nd
if not edgeSigma:
edgeSigma = 2.0
LoG = nd.gaussian_laplace(im, edgeSigma)
if not thres:
thres = np.absolute(LoG).mean() * 0.75
# Find zero-crossing
zc1 = (np.diff(np.sign(LoG),1,0) == -2) & (np.abs(np.diff(LoG,1,0)) > thres )
zc2 = (np.diff(np.sign(LoG),1,0) == 2) & (np.abs(np.diff(LoG,1,0)) > thres )
zc12 = np.pad(np.logical_or(zc1,zc2),((0,1),(0,0)),'constant', constant_values=((False,False),(False,False)))
zc3 = (np.diff(np.sign(LoG),1,1) == -2) & (np.abs(np.diff(LoG,1,1)) > thres )
zc4 = (np.diff(np.sign(LoG),1,1) == 2) & (np.abs(np.diff(LoG,1,1)) > thres )
zc34 = np.pad(np.logical_or(zc3,zc4),((0,0),(0,1)),'constant', constant_values=((False,False),(False,False)))
zc = np.logical_or(zc12,zc34)
zc = np.logical_or(zc, LoG == 0)
return zc | bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
ababino/efe | segunda/info2.py | 1 | 5030 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Resuleve los ejercicio 4 y 9 de la guia 3 y 10 de la guía 4."""
from __future__ import unicode_literals
import random
from matplotlib import pyplot as plt
import numpy as np
import argparse
from math import atan2
import seaborn as sns
def exp_random_variable(l):
"""
Exponential random variable.
"""
x = random.random()
y = - np.log(1-x)/l
return y
def my_hist(data, bins, err_type='poisson', **kwargs):
"""Histogram with poissonian error bars."""
y, bin_edges = np.histogram(data, bins=bins)
width = bin_edges[1:] - bin_edges[0:-1]
normalization = width[0] * sum(y)
if err_type == 'poisson':
yerr = np.sqrt(y) / normalization
elif err_type == 'binomial':
yerr = np.sqrt(abs(y * ( 1 - y / normalization))) / normalization
y = y.astype(np.float) / normalization
plt.bar(bin_edges[:-1], y, yerr=yerr, width=width, ecolor='r', **kwargs)
def cauchy(x):
y = 1.0 / (1.0 + x**2)
y = y / np.pi
return y
def normal(x, mu, sigma):
a = 1 / np.sqrt(2 * np.pi * sigma**2)
y = a * np.exp(-(x - mu)**2 / (2 * sigma**2))
return y
def ej4a(argshow):
x = np.linspace(-6, 6, 1000)
y_cauchy = cauchy(x) / cauchy(0)
y_normal = normal(x, 0, 0.75) / normal(0, 0, 0.75)
plt.plot(x, y_cauchy, 'b', label="Cauchy")
plt.plot(x, y_normal, 'r', label="Normal (0, 0.75)")
plt.legend()
plt.grid(True)
plt.savefig('ej4a.jpg')
if argshow:
plt.show()
else:
plt.close()
def ej4b(argshow):
x = np.linspace(-10, 10, 1000)
y_cauchy = cauchy(x) / cauchy(0)
y_normal = 0.5 * normal(x, 0, 0.75) + 0.5 * normal(x, 0, 3)
y_normal = y_normal / (0.5 * normal(0, 0, 0.75) + 0.5 * normal(0, 0, 3))
plt.plot(x, y_cauchy, 'b', label="Cauchy")
plt.plot(x, y_normal, 'r', label="0.5 * Normal (0, 0.75) + 0.5 * Normal (0, 3)")
plt.xticks(range(-10, 10))
plt.legend()
plt.grid(True)
plt.savefig('ej4b.jpg')
if argshow:
plt.show()
else:
plt.close()
def ej9(argshow):
n = 500
l = 0.25
exp_rand = [exp_random_variable(l) for x in range(n)]
x = np.arange(0.5, 30.5)
y = l*np.exp(-l*x)
plt.figure(1)
my_hist(exp_rand, bins=np.arange(0, 31), label='Simulación',
err_type='binomial')
plt.plot(x, y, 'k--*', label='Distribución Teórica')
plt.ylabel('Frecuencia')
plt.legend(loc='upper left')
plt.savefig('ej9b_1.jpg')
if argshow:
plt.show()
else:
plt.close()
exp_rand = [exp_random_variable(l) for x in range(n)]
x = np.arange(0.5, 30.5)
y = l*np.exp(-l*x)
bins = np.concatenate([np.arange(0, 15), np.arange(15, 31, 2)])
plt.figure(2)
my_hist(exp_rand, bins=bins, label='Simulación',
err_type='binomial')
plt.plot(x, y, 'k--*', label='Distribución Teórica')
plt.ylabel('Frecuencia')
plt.legend(loc='upper left')
plt.savefig('ej9b_2.jpg')
if argshow:
plt.show()
else:
plt.close()
def densidad(r):
if r<0 or r>1:
rho = 0
else:
rho = 1.0 / (1 + r**2)
rho = rho / (np.pi * (4 - np.pi))
return rho
def random_densidad(n):
vecs = []
while True:
(x, y, z) = 2 * np.random.rand(3) - 1
u = np.random.rand(1) / (np.pi * (4 - np.pi))
r = np.sqrt(x**2 + y**2 + z**2)
if u <= densidad(r):
vecs.append((x, y, z))
if len(vecs) == n:
break
return vecs
def r_marginal(r):
return 4* r**2 / ((4 - np.pi)*(1 + r**2))
def theta_marginal(theta):
return 0.5 * np.sin(theta)
def phi_marginal(phi):
return [1.0 / (2 * np.pi) for i in range(len(phi))]
def g4ej10(argshow):
v = random_densidad(1000)
r = [np.sqrt(x**2 + y**2 + z**2) for (x, y, z) in v]
phi = [atan2(y, x) + np.pi for (x, y, z) in v]
theta = [atan2(z, np.sqrt(x**2 + y**2)) + np.pi/2 for (x, y, z) in v]
r_x = np.linspace(0, 1, 1000)
r_y = r_marginal(r_x)
phi_x = np.linspace(0, 2*np.pi, 1000)
phi_y = phi_marginal(phi_x)
theta_x = np.linspace(0, np.pi, 1000)
theta_y = theta_marginal(theta_x)
plt.figure(1)
my_hist(r, bins=np.linspace(0, 1, 20), label='Simulación',
err_type='binomial')
plt.plot(r_x, r_y, '--k', label='Distribución Teórica' )
plt.xlabel('r')
plt.legend()
plt.savefig('g4ej10_r.jpg')
plt.figure(2)
my_hist(phi, bins=np.linspace(0, 2*np.pi, 20), label='Simulación',
err_type='binomial')
plt.plot(phi_x, phi_y, '--k', label='Distribución Teórica')
plt.xlabel('$\phi$')
plt.legend()
plt.savefig('g4ej10_phi.jpg')
plt.figure(3)
my_hist(theta, bins=np.linspace(0, np.pi, 20), label='Simulación',
err_type='binomial')
plt.plot(theta_x, theta_y, '--k', label='Distribución Teórica')
plt.xlabel(r'$\theta$')
plt.legend()
plt.savefig('g4ej10_theta.jpg')
if argshow:
plt.show()
else:
plt.close()
return
def main(args):
print('-----Ejercicio 4-------')
print('-----b)-------')
ej4a(args.show)
ej4b(args.show)
print('-----Ejercicio 9-------')
print('-----b)-------')
ej9(args.show)
g4ej10(args.show)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""Resuleve los ejercicio 4 y 9
de la guia 3 y 10 de la guía 4.""")
parser.add_argument('-show', action='store_true', help='muestra los gráficos')
args = parser.parse_args()
main(args)
| mit |
trungnt13/scikit-learn | examples/applications/face_recognition.py | 15 | 5394 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Gerhard_Schroeder 0.91 0.75 0.82 28
Donald_Rumsfeld 0.84 0.82 0.83 33
Tony_Blair 0.65 0.82 0.73 34
Colin_Powell 0.78 0.88 0.83 58
George_W_Bush 0.93 0.86 0.90 129
avg / total 0.86 0.84 0.85 282
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
Ziqi-Li/bknqgis | bokeh/bokeh/core/compat/mplexporter/renderers/vega_renderer.py | 10 | 5272 | import warnings
import json
import random
from .base import Renderer
from ..exporter import Exporter
class VegaRenderer(Renderer):
def open_figure(self, fig, props):
self.props = props
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
self.data = []
self.scales = []
self.axes = []
self.marks = []
def open_axes(self, ax, props):
if len(self.axes) > 0:
warnings.warn("multiple axes not yet supported")
self.axes = [dict(type="x", scale="x", ticks=10),
dict(type="y", scale="y", ticks=10)]
self.scales = [dict(name="x",
domain=props['xlim'],
type="linear",
range="width",
),
dict(name="y",
domain=props['ylim'],
type="linear",
range="height",
),]
def draw_line(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'line',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"stroke": {"value": style['color']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['linewidth']},
}
}
})
def draw_markers(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'symbol',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"fill": {"value": style['facecolor']},
"fillOpacity": {"value": style['alpha']},
"stroke": {"value": style['edgecolor']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['edgewidth']},
}
}
})
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
if text_type == 'xlabel':
self.axes[0]['title'] = text
elif text_type == 'ylabel':
self.axes[1]['title'] = text
class VegaHTML(object):
def __init__(self, renderer):
self.specification = dict(width=renderer.figwidth,
height=renderer.figheight,
data=renderer.data,
scales=renderer.scales,
axes=renderer.axes,
marks=renderer.marks)
def html(self):
"""Build the HTML representation for IPython."""
id = random.randint(0, 2 ** 16)
html = '<div id="vis%d"></div>' % id
html += '<script>\n'
html += VEGA_TEMPLATE % (json.dumps(self.specification), id)
html += '</script>\n'
return html
def _repr_html_(self):
return self.html()
def fig_to_vega(fig, notebook=False):
"""Convert a matplotlib figure to vega dictionary
if notebook=True, then return an object which will display in a notebook
otherwise, return an HTML string.
"""
renderer = VegaRenderer()
Exporter(renderer).run(fig)
vega_html = VegaHTML(renderer)
if notebook:
return vega_html
else:
return vega_html.html()
VEGA_TEMPLATE = """
( function() {
var _do_plot = function() {
if ( (typeof vg == 'undefined') && (typeof IPython != 'undefined')) {
$([IPython.events]).on("vega_loaded.vincent", _do_plot);
return;
}
vg.parse.spec(%s, function(chart) {
chart({el: "#vis%d"}).update();
});
};
_do_plot();
})();
"""
| gpl-2.0 |
alexrudy/AstroObject | tests/test_anaspec.py | 1 | 17993 | # -*- coding: utf-8 -*-
#
# Test_anaspec.py
# ObjectModel
#
# Created by Alexander Rudy on 2011-10-31.
# Copyright 2011 Alexander Rudy. All rights reserved.
# Version 0.6.1
#
import numpy as np
import pyfits as pf
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.axes
import os,copy
import nose.tools as nt
from nose.plugins.skip import Skip,SkipTest
from tests.apitests import *
import AstroObject.anaspec
class equality_AnalyticFrame(equality_Base):
"""Equality methods for FITSFrames"""
def data_eq_data(self,data,other):
"""Return whether these two are the same data"""
return np.allclose(data,other)
def frame_eq_frame(self,frame,other):
"""Return whether these two FITS frames are the same"""
return frame.label == other.label
def data_eq_frame(self,data,frame):
"""Return whether this data is the same as the data in this frame."""
return False
class equality_InterpolatedSpectraFrame(equality_AnalyticFrame):
"""Equality methods for FITSFrames"""
def frame_eq_frame(self,frame,other):
"""Return whether these two FITS frames are the same"""
return np.allclose(frame.data,other.data)
def data_eq_frame(self,data,frame):
"""Return whether this data is the same as the data in this frame."""
return np.allclose(frame.data,data)
class API_anaspec(equality_AnalyticFrame,API_Base_Frame):
"""Set up and basic tests for analytic spectra"""
def test_init_with_wavelengths(self):
"""__init__() works with wavelengths"""
SFrame = self.frame(wavelengths=self.WAVELENGTHS)
assert not np.abs(self.WAVELENGTHS - SFrame.requested_wavelengths > 1e-6).any()
def test_call_with_arbitrary_arguments(self):
"""__call__() accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS,other=1,arbitrary="str",arguments="blah")
@nt.raises(ValueError)
def test_call(self):
"""__call__() fails"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame()
def test_call_with_kwargs(self):
"""__call__(**kwargs) yields data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS)
def test_init_empty(self):
"""__init__() abstract frame works without data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
def test_add_objects(self):
"""__add__() Objects respont to + operator"""
SFrame1 = self.frame()
SFrame2 = self.frame()
SFrame3 = SFrame1 + SFrame2
assert isinstance(SFrame3,AstroObject.anaspec.CompositeSpectra)
def test_sub_objects(self):
"""__sub__() Objects respont to - operator"""
SFrame1 = self.frame()
SFrame2 = self.frame()
SFrame3 = SFrame1 - SFrame2
assert isinstance(SFrame3,AstroObject.anaspec.CompositeSpectra)
def test_mul_objects(self):
"""__mul__() Objects respont to * operator"""
SFrame1 = self.frame()
SFrame2 = self.frame()
SFrame3 = SFrame1 * SFrame2
assert isinstance(SFrame3,AstroObject.anaspec.CompositeSpectra)
def test_add_other(self):
"""__add__() Handles adding of other simple classes"""
SFrame1 = self.frame()
SFrame2 = 10.0
SFrame3 = SFrame1 + SFrame2
assert isinstance(SFrame3,AstroObject.anaspec.CompositeSpectra)
class API_InterpolatedSpectrumBase(equality_InterpolatedSpectraFrame,API_anaspec):
"""API_InterpolatedSpectrumBase"""
def save_or_compare(self,data,filename,skip=True):
"""Save or compare data"""
fname = filename % self.__class__.__name__
try:
old_data = np.load(fname)
except IOError:
np.save(fname,data)
if skip:
raise SkipTest
else:
return True
else:
passed = self.data_eq_data(data,old_data)
if not passed:
print npArrayInfo(data,"New Calc")
print npArrayInfo(old_data,"Old Data")
return passed
def test_call(self):
"""__call__() yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4,method='resample')
assert self.save_or_compare(data,"tests/data/%s-call.npy")
def test_call_with_kwargs(self):
"""__call__() accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4,other=1,arbitrary="str",arguments="blah",method='resample')
assert self.save_or_compare(data,"tests/data/%s-call.npy",skip=False)
def test_call_resample(self):
"""__call__(method='resample') yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4,method='resample')
assert self.save_or_compare(data,"tests/data/%s-resample.npy")
def test_call_resample_with_arbitrary_arguments(self):
"""__call__(method='resample') accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4,other=1,arbitrary="str",arguments="blah",method='resample')
assert self.save_or_compare(data,"tests/data/%s-resample2.npy",skip=False)
def test_call_integrate_quad(self):
"""__call__(method='integrate_quad') yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=(self.WAVELENGTHS[:-1]/np.diff(self.WAVELENGTHS)),method="integrate_quad")
assert self.save_or_compare(data,"tests/data/%s-integrateQ.npy")
def test_call_integrate_quad_with_arbitrary_arguments(self):
"""__call__(method='integrate_quad') accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),other=1,arbitrary="str",arguments="blah",method="integrate_quad")
assert self.save_or_compare(data,"tests/data/%s-integrateQ2.npy",skip=False)
def test_call_integrate_hist_with_arbitrary_arguments(self):
"""__call__(method='integrate_hist') accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),other=1,arbitrary="str",arguments="blah",method="integrate_hist")
assert self.save_or_compare(data,"tests/data/%s-integrateH2.npy",skip=False)
def test_call_integrate_hist(self):
"""__call__(method='integrate_hist') yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),method="integrate_hist")
assert self.save_or_compare(data,"tests/data/%s-integrateH.npy")
def test_call_interpolate_with_arbitrary_arguments(self):
"""__call__(method='interpolate') accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),other=1,arbitrary="str",arguments="blah",method="interpolate")
assert self.save_or_compare(data,"tests/data/%s-interpolate2.npy",skip=False)
def test_call_interpolate(self):
"""__call__(method='interpolate') yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),method="interpolate")
assert self.save_or_compare(data,"tests/data/%s-interpolate.npy")
def test_call_polyfit_with_arbitrary_arguments(self):
"""__call__(method='polyfit') accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),other=1,arbitrary="str",arguments="blah",method="polyfit")
assert self.save_or_compare(data,"tests/data/%s-polyfit2.npy",skip=False)
def test_call_polyfit(self):
"""__call__(method='polyfit') yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),method="polyfit")
assert self.save_or_compare(data,"tests/data/%s-polyfit.npy")
def test_call_resolve_with_arbitrary_arguments(self):
"""__call__(method='resolve') accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),other=1,arbitrary="str",arguments="blah",method="resolve")
assert self.save_or_compare(data,"tests/data/%s-resolve2.npy",skip=False)
def test_call_resolve(self):
"""__call__(method='resolve') yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),method="resolve")
assert self.save_or_compare(data,"tests/data/%s-resolve.npy")
def test_call_resolve_and_integrate_arguments(self):
"""__call__(method='resolve_and_integrate') accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),other=1,arbitrary="str",arguments="blah",method="resolve_and_integrate")
assert self.save_or_compare(data,"tests/data/%s-resolve_and_integrate2.npy",skip=False)
def test_call_resolve_and_integrate(self):
"""__call__(method='resolve_and_integrate') yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
data = AFrame(wavelengths=self.WAVELENGTHS[:-1],resolution=np.diff(self.WAVELENGTHS),method="resolve_and_integrate")
assert self.save_or_compare(data,"tests/data/%s-resolve_and_integrate.npy")
class test_InterpolatedSpectrum(API_InterpolatedSpectrumBase,API_General_Frame):
"""AnalyticSpecra.InterpolatedSpectrum"""
def setup(self):
"""Sets up the test with some basic image data"""
self.WAVELENGTHS = ((np.arange(98)+1)/2.0 + 1.0) * 1e-7
self.WAVELENGHTS_LOWR = ((np.arange(23)+1)*2.0 + 1.0) * 1e-7
self.VALID = np.array([(np.arange(50) + 1.0) * 1e-7,np.sin(np.arange(50))+2.0])
self.FRAME = AstroObject.anaspec.InterpolatedSpectrum
self.INVALID = 20
self.FRAMESTR = "<'InterpolatedSpectrum' labeled 'Valid'>"
self.HDUTYPE = pf.ImageHDU
self.SHOWTYPE = mpl.axes.Subplot
self.FLABEL = "Valid"
self.RKWARGS = {'wavelengths':self.WAVELENGTHS}
super(test_InterpolatedSpectrum,self).setup()
class test_UnitarySpectrum(API_AnalyticMixin,API_InterpolatedSpectrumBase):
"""anaspec.UnitarySpectrum"""
def setup(self):
"""Sets up the test with some basic image data"""
self.WAVELENGTHS = ((np.arange(98)+1)/2.0 + 1.0) * 1e-7
self.WAVELENGHTS_LOWR = ((np.arange(23)+1)*2.0 + 1.0) * 1e-7
self.VALID = AstroObject.anaspec.InterpolatedSpectrum(np.array([(np.arange(50) + 1.0) * 1e-7,np.sin(np.arange(50))+2.0]),"Valid")
self.FRAME = AstroObject.anaspec.UnitarySpectrum
self.INVALID = 20
self.FRAMESTR = "<'UnitarySpectrum' labeled '[Valid]'>"
self.HDUTYPE = pf.ImageHDU
self.SHOWTYPE = mpl.axes.Subplot
self.RKWARGS = { 'wavelengths' : self.WAVELENGTHS }
self.FLABEL = "[Valid]"
super(test_UnitarySpectrum,self).setup()
def frame(self,**kwargs):
"""Return a valid frame"""
return self.FRAME(self.VALID,**kwargs)
def test_init_nolabel(self):
"""__init__() succeeds with valid data but no label"""
AFrame = self.FRAME(self.VALID,label=None)
assert AFrame.label == self.FLABEL
@nt.raises(AttributeError)
def test_init_empty(self):
"""__init__() works without data"""
self.FRAME(data=None,label="Label")
def test_call(self):
"""__call__() yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4)
assert self.save_or_compare(data,"tests/data/%s-call.npy")
def test_call_with_kwargs(self):
"""__call__() accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4,other=1,arbitrary="str",arguments="blah")
assert self.save_or_compare(data,"tests/data/%s-call.npy",skip=False)
def test_call_resample(self):
"""__call__(method='resample') yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4,method='resample',upsample=True)
assert self.save_or_compare(data,"tests/data/%s-resample.npy")
def test_call_resample_with_arbitrary_arguments(self):
"""__call__(method='resample') accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4,other=1,arbitrary="str",arguments="blah",method='resample',upsample=True)
assert self.save_or_compare(data,"tests/data/%s-resample2.npy",skip=False)
class test_Resolver(API_InterpolatedSpectrumBase,API_General_Frame):
"""anaspec.Resolver"""
def setup(self):
"""Sets up the test with some basic image data"""
self.WAVELENGTHS = ((np.arange(98)+1)/2.0 + 1.0) * 1e-7
self.WAVELENGHTS_LOWR = ((np.arange(23)+1)*2.0 + 1.0) * 1e-7
self.VALIDF = AstroObject.anaspec.InterpolatedSpectrum(np.array([(np.arange(50) + 1.0) * 1e-7,np.sin(np.arange(50))+2.0]),"Valid")
self.VALID = self.VALIDF.data
self.FRAME = AstroObject.anaspec.Resolver
self.INVALID = 20
self.FRAMESTR = "<'Resolver' labeled 'R[Valid]'>"
self.HDUTYPE = pf.ImageHDU
self.SHOWTYPE = mpl.axes.Subplot
self.RKWARGS = { 'wavelengths' : self.VALID[0] }
self.FLABEL = "R[Valid]"
super(test_Resolver,self).setup()
def frame(self,**kwargs):
"""Return a valid frame"""
nkwargs = {}
nkwargs.update(self.RKWARGS)
nkwargs.update(kwargs)
return self.FRAME(self.VALIDF,**nkwargs)
def test_init_nolabel(self):
"""__init__() succeeds with valid data but no label"""
AFrame = self.FRAME(self.VALIDF,label=None,**self.RKWARGS)
assert AFrame.label == self.FLABEL
@nt.raises(AttributeError)
def test_init_empty(self):
"""__init__() works without data"""
self.FRAME(data=None,label="Label",**self.RKWARGS)
def test_call(self):
"""__call__() yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4)
assert self.save_or_compare(data,"tests/data/%s-call.npy")
def test_call_with_kwargs(self):
"""__call__() accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4,other=1,arbitrary="str",arguments="blah")
assert self.save_or_compare(data,"tests/data/%s-call.npy",skip=False)
def test_call_resample(self):
"""__call__(method='resample') yields valid data"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4,method='resample',upsample=True)
assert self.save_or_compare(data,"tests/data/%s-resample.npy")
def test_call_resample_with_arbitrary_arguments(self):
"""__call__(method='resample') accepts arbitrary keyword arguments"""
AFrame = self.frame()
assert AFrame.label == self.FLABEL
WL = self.WAVELENGHTS_LOWR
data = AFrame(wavelengths=WL[:-1],resolution=(WL[:-1]/np.diff(WL))/4,other=1,arbitrary="str",arguments="blah",method='resample',upsample=True)
assert self.save_or_compare(data,"tests/data/%s-resample2.npy",skip=False)
| gpl-3.0 |
hdmetor/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
brenthuisman/phd_tools | epid.phasespace.photonfluence.py | 1 | 1751 | #!/usr/bin/env python
import argparse,numpy as np,matplotlib,dump,plot
parser = argparse.ArgumentParser(description='Plot 2D hist from ROOT TTree.')
parser.add_argument('files', nargs='*')
args = parser.parse_args()
rootfiles = args.files
for rootfile in rootfiles:
all = dump.scaletreefast(rootfile,['X','Y'])
f, ((ax1 ,ax2), (ax3,ax4)) = plot.subplots(nrows=2, ncols=2, sharex=False, sharey=False)#,figsize=(28,10))
f.subplots_adjust(hspace=.5,wspace=.5)
ax1.set_title("Positions")
ax2.set_title("Cutout")
panelsize = 410. #mm
numpix = 256
pixsize = panelsize/numpix #mm
xbins = np.linspace(-panelsize/2.,panelsize/2.,numpix+1)
ybins = xbins
xbins2 = plot.chopcentral(xbins,5)
ybins2 = xbins2
xbins3 = np.linspace(-5,5,10+1)
ybins3 = xbins3
xbins4 = np.linspace(-10,0,10+1)
ybins4 = xbins4
print xbins2
whole = plot.plot2dhist( ax1, all['X'], all['Y'], xbins=xbins,ybins=ybins, log=True)
plot.plot2dhist( ax2, all['X'], all['Y'], xbins=xbins2,ybins=ybins2, log=True)
per_mm2 = plot.plot2dhist( ax3, all['X'], all['Y'], xbins=xbins3,ybins=ybins3, log=True)
per_mm2_kwadrant = plot.plot2dhist( ax4, all['X'], all['Y'], xbins=xbins4,ybins=ybins4, log=True)
print 'mean. per mm2,whole',np.mean(whole.flatten())
print 'std. per mm2,whole',np.std(whole.flatten())
print 'mean. per mm2',np.mean(per_mm2.flatten())
print 'std. per mm2',np.std(per_mm2.flatten())
print 'mean. per mm2',np.mean(per_mm2_kwadrant.flatten())
print 'std. per mm2',np.std(per_mm2_kwadrant.flatten())
print per_mm2_kwadrant
f.savefig(rootfile+'.pdf', bbox_inches='tight')
plot.close('all')
| lgpl-3.0 |
dariox2/CADL | session-5/s5p5-trump.py | 1 | 17377 |
# Session 5, part 5 (notebook part 2)
print("Begin import...")
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
#from skimage import data # ERROR: Cannot load libmkl_def.so
from scipy.misc import imresize
from scipy.ndimage.filters import gaussian_filter
print("Loading tensorflow...")
import tensorflow as tf
from libs import utils, gif, datasets, dataset_utils, nb_utils
# dja
plt.style.use('bmh')
#import datetime
#np.set_printoptions(threshold=np.inf) # display FULL array (infinite)
plt.ion()
plt.figure(figsize=(4, 4))
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from matplotlib.cbook import MatplotlibDeprecationWarning
warnings.filterwarnings("ignore", category=MatplotlibDeprecationWarning)
def wait(n):
#plt.pause(n)
plt.pause(3)
#input("(press enter)")
##
## Part 5 - Pretrained Char-RNN of Donald Trump
##
# Rather than stick around to let a model train, let's now explore
# one I've trained for you Donald Trump. If you've trained your own
# model on your own text corpus then great! You should be able to use
# that in place of the one I've provided and still continue with the
# rest of the notebook.
#
# For the Donald Trump corpus, there are a lot of video transcripts
# that you can find online. I've searched for a few of these, put
# them in a giant text file, made everything lowercase, and removed
# any extraneous letters/symbols to help reduce the vocabulary (not
# that it's not very large to begin with, ha).
#
# I used the code exactly as above to train on the text I gathered
# and left it to train for about 2 days. The only modification is
# that I also used "dropout" which you can see in the libs/charrnn.py
# file. Let's explore it now and we'll see how we can play with
# "sampling" the model to generate new phrases, and how to "prime"
# the model (a psychological term referring to when someone is
# exposed to something shortly before another event).
#
# First, let's clean up any existing graph:
tf.reset_default_graph()
#
# Getting the Trump Data
#
# Now let's load the text. This is included in the repo or can be
# downloaded from:
with open('trump.txt', 'r') as fp:
txt = fp.read()
# Let's take a look at what's going on in here:
# In[ ]:
print("txt 100: ", txt[:100])
# dja - from s5p4:
from collections import OrderedDict
vocab = list(set(txt))
vocab.sort()
print("len vocab: ", len(vocab))
print("vocab: ", vocab)
encoder = OrderedDict(zip(vocab, range(len(vocab))))
#
# Basic Text Analysis
#
# We can do some basic data analysis to get a sense of what kind of
# vocabulary we're working with. It's really important to look at
# your data in as many ways as possible. This helps ensure there
# isn't anything unexpected going on. Let's find every unique word he
# uses:
words = set(txt.split(' '))
#print("words: ", words)
# Now let's count their occurrences:
counts = {word_i: 0 for word_i in words}
for word_i in txt.split(' '):
counts[word_i] += 1
#print("counts: ", counts)
# We can sort this like so:
##[(word_i, counts[word_i]) for word_i in sorted(counts, key=counts.get, reverse=True)]
print("Sorted counts:")
n=0
for word_i in sorted(counts, key=counts.get, reverse=True):
print(word_i, counts[word_i])
n+=1
if n>10:
break
# As we should expect, "the" is the most common word, as it is in the
# English language:
# https://en.wikipedia.org/wiki/Most_common_words_in_English
#
#
# Loading the Pre-trained Trump Model
#
# Let's load the pretrained model. Rather than provide a tfmodel
# export, I've provided the checkpoint so you can also experiment
# with training it more if you wish. We'll rebuild the graph using
# the `charrnn` module in the `libs` directory:
from libs import charrnn
# Let's get the checkpoint and build the model then restore the
# variables from the checkpoint. The only parameters of consequence
# are `n_layers` and `n_cells` which define the total size and layout
# of the model. The rest are flexible. We'll set the `batch_size` and
# `sequence_length` to 1, meaning we can feed in a single character
# at a time only, and get back 1 character denoting the very next
# character's prediction.
# In[ ]:
ckpt_name = 'trump.ckpt'
g = tf.Graph()
n_layers = 3
n_cells = 512
"""
with tf.Session(graph=g) as sess:
model = charrnn.build_model(txt=txt,
batch_size=1,
sequence_length=1,
n_layers=n_layers,
n_cells=n_cells,
gradient_clip=10.0)
saver = tf.train.Saver()
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("Model restored (1).")
# Let's now take a look at the model:
# nb_utils.show_graph(g.as_graph_def())
"""
n_iterations = 100
#
# Inference: Keeping Track of the State
#
# Now recall from Part 4 when we created our LSTM network, we had an
# `initial_state` variable which would set the LSTM's `c` and `h`
# state vectors, as well as the final output state which was the
# output of the `c` and `h` state vectors after having passed through
# the network. When we input to the network some letter, say 'n', we
# can set the `initial_state` to zeros, but then after having input
# the letter `n`, we'll have as output a new state vector for `c` and
# `h`. On the next letter, we'll then want to set the `initial_state`
# to this new state, and set the input to the previous letter's
# output. That is how we ensure the network keeps track of time and
# knows what has happened in the past, and let it continually
# generate.
print("Inference...")
curr_states = None
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = charrnn.build_model(txt=txt,
batch_size=1,
sequence_length=1,
n_layers=n_layers,
n_cells=n_cells,
gradient_clip=10.0)
saver = tf.train.Saver()
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("Model restored (2).")
# Get every tf.Tensor for the initial state
init_states = []
for s_i in model['initial_state']:
init_states.append(s_i.c)
init_states.append(s_i.h)
# Similarly, for every state after inference
final_states = []
for s_i in model['final_state']:
final_states.append(s_i.c)
final_states.append(s_i.h)
# Let's start with the letter 't' and see what comes out:
synth = [[encoder[' ']]]
for i in range(n_iterations):
# We'll create a feed_dict parameter which includes what to
# input to the network, model['X'], as well as setting
# dropout to 1.0, meaning no dropout.
feed_dict = {model['X']: [synth[-1]],
model['keep_prob']: 1.0}
# Now we'll check if we currently have a state as a result
# of a previous inference, and if so, add to our feed_dict
# parameter the mapping of the init_state to the previous
# output state stored in "curr_states".
if curr_states:
feed_dict.update(
{init_state_i: curr_state_i
for (init_state_i, curr_state_i) in
zip(init_states, curr_states)})
# Now we can infer and see what letter we get
p = sess.run(model['probs'], feed_dict=feed_dict)[0]
# And make sure we also keep track of the new state
curr_states = sess.run(final_states, feed_dict=feed_dict)
# Find the most likely character
p = np.argmax(p)
# Append to string
synth.append([p])
# Print out the decoded letter
print(model['decoder'][p], end='')
sys.stdout.flush()
print("")
#
# Probabilistic Sampling
#
# Run the above cell a couple times. What you should find is that it
# is deterministic. We always pick *the* most likely character. But
# we can do something else which will make things less deterministic
# and a bit more interesting: we can sample from our probabilistic
# measure from our softmax layer. This means if we have the letter
# 'a' as 0.4, and the letter 'o' as 0.2, we'll have a 40% chance of
# picking the letter 'a', and 20% chance of picking the letter 'o',
# rather than simply always picking the letter 'a' since it is the
# most probable.
print("Probabilistic Sampling...")
curr_states = None
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = charrnn.build_model(txt=txt,
batch_size=1,
sequence_length=1,
n_layers=n_layers,
n_cells=n_cells,
gradient_clip=10.0)
saver = tf.train.Saver()
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("Model restored (3).")
# Get every tf.Tensor for the initial state
init_states = []
for s_i in model['initial_state']:
init_states.append(s_i.c)
init_states.append(s_i.h)
# Similarly, for every state after inference
final_states = []
for s_i in model['final_state']:
final_states.append(s_i.c)
final_states.append(s_i.h)
# Let's start with the letter 't' and see what comes out:
synth = [[encoder[' ']]]
for i in range(n_iterations):
# We'll create a feed_dict parameter which includes what to
# input to the network, model['X'], as well as setting
# dropout to 1.0, meaning no dropout.
feed_dict = {model['X']: [synth[-1]],
model['keep_prob']: 1.0}
# Now we'll check if we currently have a state as a result
# of a previous inference, and if so, add to our feed_dict
# parameter the mapping of the init_state to the previous
# output state stored in "curr_states".
if curr_states:
feed_dict.update(
{init_state_i: curr_state_i
for (init_state_i, curr_state_i) in
zip(init_states, curr_states)})
# Now we can infer and see what letter we get
p = sess.run(model['probs'], feed_dict=feed_dict)[0]
# And make sure we also keep track of the new state
curr_states = sess.run(final_states, feed_dict=feed_dict)
# Now instead of finding the most likely character,
# we'll sample with the probabilities of each letter
p = p.astype(np.float64)
p = np.random.multinomial(1, p.ravel() / p.sum())
p = np.argmax(p)
# Append to string
synth.append([p])
# Print out the decoded letter
print(model['decoder'][p], end='')
sys.stdout.flush()
print("")
#
# Inference: Temperature
#
# When performing probabilistic sampling, we can also use a parameter
# known as temperature which comes from simulated annealing. The
# basic idea is that as the temperature is high and very hot, we have
# a lot more free energy to use to jump around more, and as we cool
# down, we have less energy and then become more deterministic. We
# can use temperature by scaling our log probabilities like so:
print("Temperature...")
temperature = 0.5
curr_states = None
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = charrnn.build_model(txt=txt,
batch_size=1,
sequence_length=1,
n_layers=n_layers,
n_cells=n_cells,
gradient_clip=10.0)
saver = tf.train.Saver()
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("Model restored (4).")
# Get every tf.Tensor for the initial state
init_states = []
for s_i in model['initial_state']:
init_states.append(s_i.c)
init_states.append(s_i.h)
# Similarly, for every state after inference
final_states = []
for s_i in model['final_state']:
final_states.append(s_i.c)
final_states.append(s_i.h)
# Let's start with the letter 't' and see what comes out:
synth = [[encoder[' ']]]
for i in range(n_iterations):
# We'll create a feed_dict parameter which includes what to
# input to the network, model['X'], as well as setting
# dropout to 1.0, meaning no dropout.
feed_dict = {model['X']: [synth[-1]],
model['keep_prob']: 1.0}
# Now we'll check if we currently have a state as a result
# of a previous inference, and if so, add to our feed_dict
# parameter the mapping of the init_state to the previous
# output state stored in "curr_states".
if curr_states:
feed_dict.update(
{init_state_i: curr_state_i
for (init_state_i, curr_state_i) in
zip(init_states, curr_states)})
# Now we can infer and see what letter we get
p = sess.run(model['probs'], feed_dict=feed_dict)[0]
# And make sure we also keep track of the new state
curr_states = sess.run(final_states, feed_dict=feed_dict)
# Now instead of finding the most likely character,
# we'll sample with the probabilities of each letter
p = p.astype(np.float64)
p = np.log(p) / temperature
p = np.exp(p) / np.sum(np.exp(p))
p = np.random.multinomial(1, p.ravel() / p.sum())
p = np.argmax(p)
# Append to string
synth.append([p])
# Print out the decoded letter
print(model['decoder'][p], end='')
sys.stdout.flush()
print("")
#
# Inference: Priming
#
# Let's now work on "priming" the model with some text, and see what
# kind of state it is in and leave it to synthesize from there. We'll
# do more or less what we did before, but feed in our own text
# instead of the last letter of the synthesis from the model.
print("Priming...")
prime = "obama"
temperature = 1.0
curr_states = None
n_iterations = 500
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = charrnn.build_model(txt=txt,
batch_size=1,
sequence_length=1,
n_layers=n_layers,
n_cells=n_cells,
gradient_clip=10.0)
saver = tf.train.Saver()
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("Model restored (5).")
# Get every tf.Tensor for the initial state
init_states = []
for s_i in model['initial_state']:
init_states.append(s_i.c)
init_states.append(s_i.h)
# Similarly, for every state after inference
final_states = []
for s_i in model['final_state']:
final_states.append(s_i.c)
final_states.append(s_i.h)
# Now we'll keep track of the state as we feed it one
# letter at a time.
curr_states = None
for ch in prime:
feed_dict = {model['X']: [[model['encoder'][ch]]],
model['keep_prob']: 1.0}
if curr_states:
feed_dict.update(
{init_state_i: curr_state_i
for (init_state_i, curr_state_i) in
zip(init_states, curr_states)})
# Now we can infer and see what letter we get
p = sess.run(model['probs'], feed_dict=feed_dict)[0]
p = p.astype(np.float64)
p = np.log(p) / temperature
p = np.exp(p) / np.sum(np.exp(p))
p = np.random.multinomial(1, p.ravel() / p.sum())
p = np.argmax(p)
# And make sure we also keep track of the new state
curr_states = sess.run(final_states, feed_dict=feed_dict)
# Now we're ready to do what we were doing before but with the
# last predicted output stored in `p`, and the current state of
# the model.
synth = [[p]]
print(prime + model['decoder'][p], end='')
for i in range(n_iterations):
# Input to the network
feed_dict = {model['X']: [synth[-1]],
model['keep_prob']: 1.0}
# Also feed our current state
feed_dict.update(
{init_state_i: curr_state_i
for (init_state_i, curr_state_i) in
zip(init_states, curr_states)})
# Inference
p = sess.run(model['probs'], feed_dict=feed_dict)[0]
# Keep track of the new state
curr_states = sess.run(final_states, feed_dict=feed_dict)
# Sample
p = p.astype(np.float64)
p = np.log(p) / temperature
p = np.exp(p) / np.sum(np.exp(p))
p = np.random.multinomial(1, p.ravel() / p.sum())
p = np.argmax(p)
# Append to string
synth.append([p])
# Print out the decoded letter
print(model['decoder'][p], end='')
sys.stdout.flush()
print("")
# eop
| apache-2.0 |
HolgerPeters/scikit-learn | benchmarks/bench_tree.py | 131 | 3647 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import matplotlib.pyplot as plt
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
plt.figure('scikit-learn tree benchmark results')
plt.subplot(211)
plt.title('Learning with varying number of samples')
plt.plot(xx, scikit_classifier_results, 'g-', label='classification')
plt.plot(xx, scikit_regressor_results, 'r-', label='regression')
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
plt.subplot(212)
plt.title('Learning in high dimensional spaces')
plt.plot(xx, scikit_classifier_results, 'g-', label='classification')
plt.plot(xx, scikit_regressor_results, 'r-', label='regression')
plt.legend(loc='upper left')
plt.xlabel('number of dimensions')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
dimkal/mne-python | examples/forward/plot_read_forward.py | 21 | 2422 | """
====================================================
Read a forward operator and display sensitivity maps
====================================================
Forward solutions can be read using read_forward_solution in Python.
"""
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
fwd = mne.read_forward_solution(fname, surf_ori=True)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
###############################################################################
# Show gain matrix a.k.a. leadfield matrix with sensitivity map
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
plt.colorbar(im, ax=ax, cmap='RdBu_r')
###############################################################################
# Show sensitivity of each sensor type to dipoles in the source space
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
plt.figure()
plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
plt.title('Normal orientation sensitivity')
plt.xlabel('sensitivity')
plt.ylabel('count')
plt.legend()
# Cautious smoothing to see actual dipoles
grad_map.plot(time_label='Gradiometer sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[0, 50, 100]))
# Note. The source space uses min-dist and therefore discards most
# superficial dipoles. This is why parts of the gyri are not covered.
| bsd-3-clause |
samzhang111/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
adelomana/cassandra | conditionedFitness/figureMenadione/script.2.py | 2 | 6309 | import matplotlib,numpy,sys,scipy
import matplotlib.pyplot
sys.path.append('/Users/alomana/gDrive2/projects/centers/ap/src/assessmentGraphs/publicationFigures/lib')
import calculateStatistics
### MAIN
matplotlib.rcParams.update({'font.size':36,'font.family':'Times New Roman','xtick.labelsize':28,'ytick.labelsize':28})
thePointSize=12
# menadione 1
xSignal=numpy.array([[151,179,186,194,186],[114,108,96,127,123]])
xNoSignal=numpy.array([[159,203,192,225,168],[105,119,97,109,119]])
cf_mu_0, cf_sd_0, pvalue_0 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[117,115,116,118,130],[61,87,63,85,69]])
xNoSignal=numpy.array([[139,111,100,102,112],[73,46,61,69,62]])
cf_mu_50, cf_sd_50, pvalue_50 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[155,156,132],[86,102,120]])
xNoSignal=numpy.array([[170,180,175],[82,89,89]])
cf_mu_190, cf_sd_190, pvalue_190 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[119,79,50,95,83],[136,128,120,142,141]])
xNoSignal=numpy.array([[74,92,96,81,74],[141,97,127,114,132]])
cf_mu_300, cf_sd_300, pvalue_300 = calculateStatistics.main(xSignal, xNoSignal)
x = [0, 50, 190, 300]
y = [cf_mu_0, cf_mu_50, cf_mu_190, cf_mu_300]
z = [cf_sd_0, cf_sd_50, cf_sd_190, cf_sd_300]
w = [pvalue_0, pvalue_50, pvalue_190, pvalue_300]
matplotlib.pyplot.errorbar(x,y,yerr=z,fmt=':o',color='orange',ecolor='orange',markeredgecolor='orange',capsize=0,ms=thePointSize,mew=0)
matplotlib.pyplot.tight_layout()
for i in range(len(w)):
if y[i] > 0.:
sp=y[i]+z[i]+0.02
else:
sp=y[i]-z[i]-0.02
if w[i] < 0.05 and w[i] >= 0.01:
matplotlib.pyplot.scatter(x[i], sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
if w[i] < 0.01:
matplotlib.pyplot.scatter(x[i]-3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.scatter(x[i]+3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.plot([0,300],[0,0],'--',color='black')
matplotlib.pyplot.xlim([-25,325])
matplotlib.pyplot.ylim([-0.5,0.5])
matplotlib.pyplot.yticks([-0.4,-0.2,0,0.2,0.4])
matplotlib.pyplot.xlabel('Generation')
matplotlib.pyplot.ylabel('Conditioned Fitness')
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.savefig('figure.menadione.1.pdf')
matplotlib.pyplot.clf()
# menadione 2
xSignal=numpy.array([[188,179,157,189,175],[102,120,94,96,99]])
xNoSignal=numpy.array([[192,197,198,173,223],[84,87,76,95,85]])
cf_mu_0, cf_sd_0, pvalue_0 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[135,142,146,110,134],[100,92,110,105,111]])
xNoSignal=numpy.array([[125,159,123,129,116],[75,79,71,66,63]])
cf_mu_50, cf_sd_50, pvalue_50 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[151,151,174],[58,74,64]])
xNoSignal=numpy.array([[104,117,125],[86,93,94]])
cf_mu_190, cf_sd_190, pvalue_190 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[59,94,79,100,83],[91,114,104,115,109]])
xNoSignal=numpy.array([[89,57,50,71,97],[108,120,103,116,106]])
cf_mu_300, cf_sd_300, pvalue_300 = calculateStatistics.main(xSignal, xNoSignal)
x = [0, 50, 190, 300]
y = [cf_mu_0, cf_mu_50, cf_mu_190, cf_mu_300]
z = [cf_sd_0, cf_sd_50, cf_sd_190, cf_sd_300]
w = [pvalue_0, pvalue_50, pvalue_190, pvalue_300]
matplotlib.pyplot.errorbar(x,y,yerr=z,fmt=':o',color='orange',ecolor='orange',markeredgecolor='orange',capsize=0,ms=thePointSize,mew=0)
for i in range(len(w)):
if y[i] > 0.:
sp=y[i]+0.1
else:
sp=y[i]-0.1
if w[i] < 0.05 and w[i] >= 0.01:
matplotlib.pyplot.scatter(x[i], sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
if w[i] < 0.01:
matplotlib.pyplot.scatter(x[i]-3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.scatter(x[i]+3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.plot([0,300],[0,0],'--',color='black')
matplotlib.pyplot.xlim([-25,325])
matplotlib.pyplot.ylim([-0.5,0.5])
matplotlib.pyplot.yticks([-0.4,-0.2,0,0.2,0.4])
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.xlabel('Generation')
matplotlib.pyplot.ylabel('Conditioned Fitness')
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.savefig('figure.menadione.2.pdf')
matplotlib.pyplot.clf()
# menadione 3
xSignal=numpy.array([[199,159,173,153,207],[95,108,110,100,98]])
xNoSignal=numpy.array([[148,190,124,145,146],[99,76,91,95,72]])
cf_mu_0, cf_sd_0, pvalue_0 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[107,154,115,143,142],[110,99,107,112,99]])
xNoSignal=numpy.array([[109,122,135,152,133],[76,65,89,100,80]])
cf_mu_50, cf_sd_50, pvalue_50 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[150,155,198],[117,129,137]])
xNoSignal=numpy.array([[184,186,202],[123,153,166]])
cf_mu_190, cf_sd_190, pvalue_190 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[129,164,194,157,186],[182,213,203,163,207]])
xNoSignal=numpy.array([[203,195,149,187,128],[180,187,196,188,194]])
cf_mu_300, cf_sd_300, pvalue_300 = calculateStatistics.main(xSignal, xNoSignal)
x = [0, 50, 190, 300]
y = [cf_mu_0, cf_mu_50, cf_mu_190, cf_mu_300]
z = [cf_sd_0, cf_sd_50, cf_sd_190, cf_sd_300]
w = [pvalue_0, pvalue_50, pvalue_190, pvalue_300]
matplotlib.pyplot.errorbar(x,y,yerr=z,fmt=':o',color='orange',ecolor='orange',markeredgecolor='orange',capsize=0,ms=thePointSize,mew=0)
for i in range(len(w)):
if y[i] > 0.:
sp=y[i]+0.1
else:
sp=y[i]-0.1
if w[i] < 0.05 and w[i] >= 0.01:
matplotlib.pyplot.scatter(x[i], sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
if w[i] < 0.01:
matplotlib.pyplot.scatter(x[i]-3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.scatter(x[i]+3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.plot([0,300],[0,0],'--',color='black')
matplotlib.pyplot.xlim([-25,325])
matplotlib.pyplot.ylim([-0.5,0.5])
matplotlib.pyplot.yticks([-0.4,-0.2,0,0.2,0.4])
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.xlabel('Generation')
matplotlib.pyplot.ylabel('Conditioned Fitness')
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.savefig('figure.menadione.3.pdf')
matplotlib.pyplot.clf()
| gpl-3.0 |
appapantula/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/pylab_examples/axes_zoom_effect.py | 3 | 3293 | from matplotlib.transforms import Bbox, TransformedBbox, \
blended_transform_factory
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector,\
BboxConnectorPatch
def connect_bbox(bbox1, bbox2,
loc1a, loc2a, loc1b, loc2b,
prop_lines, prop_patches=None):
if prop_patches is None:
prop_patches = prop_lines.copy()
prop_patches["alpha"] = prop_patches.get("alpha", 1)*0.2
c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines)
c1.set_clip_on(False)
c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines)
c2.set_clip_on(False)
bbox_patch1 = BboxPatch(bbox1, **prop_patches)
bbox_patch2 = BboxPatch(bbox2, **prop_patches)
p = BboxConnectorPatch(bbox1, bbox2,
#loc1a=3, loc2a=2, loc1b=4, loc2b=1,
loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b,
**prop_patches)
p.set_clip_on(False)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect01(ax1, ax2, xmin, xmax, **kwargs):
u"""
ax1 : the main axes
ax1 : the zoomed axes
(xmin,xmax) : the limits of the colored area in both plot axes.
connect ax1 & ax2. The x-range of (xmin, xmax) in both axes will
be marked. The keywords parameters will be used ti create
patches.
"""
trans1 = blended_transform_factory(ax1.transData, ax1.transAxes)
trans2 = blended_transform_factory(ax2.transData, ax2.transAxes)
bbox = Bbox.from_extents(xmin, 0, xmax, 1)
mybbox1 = TransformedBbox(bbox, trans1)
mybbox2 = TransformedBbox(bbox, trans2)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect02(ax1, ax2, **kwargs):
u"""
ax1 : the main axes
ax1 : the zoomed axes
Similar to zoom_effect01. The xmin & xmax will be taken from the
ax1.viewLim.
"""
tt = ax1.transScale + (ax1.transLimits + ax2.transAxes)
trans = blended_transform_factory(ax2.transData, tt)
mybbox1 = ax1.bbox
mybbox2 = TransformedBbox(ax1.viewLim, trans)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
import matplotlib.pyplot as plt
plt.figure(1, figsize=(5,5))
ax1 = plt.subplot(221)
ax2 = plt.subplot(212)
ax2.set_xlim(0, 1)
ax2.set_xlim(0, 5)
zoom_effect01(ax1, ax2, 0.2, 0.8)
ax1 = plt.subplot(222)
ax1.set_xlim(2, 3)
ax2.set_xlim(0, 5)
zoom_effect02(ax1, ax2)
plt.show()
| gpl-2.0 |
interactiveaudiolab/nussl | recipes/wham/chimera.py | 1 | 7122 | """
This recipe trains and evaluates a mask inference model
on the clean data from the WHAM dataset with 8k. It's divided into
three big chunks: data preparation, training, and evaluation.
Final output of this script:
"""
import nussl
from nussl import ml, datasets, utils, separation, evaluation
import os
import torch
import multiprocessing
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from torch import optim
import logging
import matplotlib.pyplot as plt
import shutil
import json
import tqdm
import glob
import numpy as np
import termtables
# ----------------------------------------------------
# ------------------- SETTING UP ---------------------
# ----------------------------------------------------
# seed this recipe for reproducibility
utils.seed(0)
# set up logging
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S', level=logging.INFO)
# make sure this is set to WHAM root directory
WHAM_ROOT = os.getenv("WHAM_ROOT")
CACHE_ROOT = os.getenv("CACHE_ROOT")
NUM_WORKERS = multiprocessing.cpu_count() // 4
OUTPUT_DIR = os.path.expanduser('~/.nussl/recipes/wham_chimera/run14_1e-2_1e3_1')
RESULTS_DIR = os.path.join(OUTPUT_DIR, 'results')
MODEL_PATH = os.path.join(OUTPUT_DIR, 'checkpoints', 'best.model.pth')
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
BATCH_SIZE = 25
MAX_EPOCHS = 100
CACHE_POPULATED = True
LEARNING_RATE = 1e-3
PATIENCE = 5
GRAD_NORM = 1e-2
shutil.rmtree(os.path.join(RESULTS_DIR), ignore_errors=True)
os.makedirs(RESULTS_DIR, exist_ok=True)
shutil.rmtree(os.path.join(OUTPUT_DIR, 'tensorboard'), ignore_errors=True)
def construct_transforms(cache_location):
# stft will be 32ms wlen, 8ms hop, sqrt-hann, at 8khz sample rate by default
tfm = datasets.transforms.Compose([
datasets.transforms.MagnitudeSpectrumApproximation(), # take stfts and get ibm
datasets.transforms.MagnitudeWeights(), # get magnitude weights
datasets.transforms.ToSeparationModel(), # convert to tensors
datasets.transforms.Cache(cache_location), # up to here gets cached
datasets.transforms.GetExcerpt(400) # get 400 frame excerpts (3.2 seconds)
])
return tfm
def cache_dataset(_dataset):
cache_dataloader = torch.utils.data.DataLoader(
_dataset, num_workers=NUM_WORKERS, batch_size=BATCH_SIZE)
ml.train.cache_dataset(cache_dataloader)
_dataset.cache_populated = True
tfm = construct_transforms(os.path.join(CACHE_ROOT, 'tr'))
dataset = datasets.WHAM(WHAM_ROOT, split='tr', transform=tfm,
cache_populated=CACHE_POPULATED)
tfm = construct_transforms(os.path.join(CACHE_ROOT, 'cv'))
val_dataset = datasets.WHAM(WHAM_ROOT, split='cv', transform=tfm,
cache_populated=CACHE_POPULATED)
if not CACHE_POPULATED:
# cache datasets for speed
cache_dataset(dataset)
cache_dataset(val_dataset)
# ----------------------------------------------------
# -------------------- TRAINING ----------------------
# ----------------------------------------------------
# reload after caching
train_sampler = torch.utils.data.sampler.RandomSampler(dataset)
val_sampler = torch.utils.data.sampler.RandomSampler(val_dataset)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE, sampler=train_sampler)
val_dataloader = torch.utils.data.DataLoader(val_dataset, num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE, sampler=val_sampler)
n_features = dataset[0]['mix_magnitude'].shape[1]
# builds a baseline model with 4 recurrent layers, 600 hidden units, bidirectional
# and 20 dimensional embedding
config = ml.networks.builders.build_recurrent_chimera(
n_features, 600, 4, True, 0.3, 20, ['sigmoid', 'unit_norm'],
2, ['sigmoid'], normalization_class='BatchNorm'
)
model = ml.SeparationModel(config).to(DEVICE)
logging.info(model)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.5, patience=PATIENCE)
# set up the loss function
loss_dictionary = {
'PermutationInvariantLoss': {'args': ['L1Loss'], 'weight': 1e3},
'DeepClusteringLoss': {'weight': 1.0}
}
# set up closures for the forward and backward pass on one batch
train_closure = ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model)
val_closure = ml.train.closures.ValidationClosure(
loss_dictionary, model)
# set up engines for training and validation
trainer, validator = ml.train.create_train_and_validation_engines(
train_closure, val_closure, device=DEVICE)
# attach handlers for visualizing output and saving the model
ml.train.add_stdout_handler(trainer, validator)
ml.train.add_validate_and_checkpoint(
OUTPUT_DIR, model, optimizer, dataset,
trainer, val_data=val_dataloader, validator=validator)
ml.train.add_tensorboard_handler(OUTPUT_DIR, trainer)
# add a handler to set up patience
@trainer.on(ml.train.ValidationEvents.VALIDATION_COMPLETED)
def step_scheduler(trainer):
val_loss = trainer.state.epoch_history['validation/loss'][-1]
scheduler.step(val_loss)
# add a handler to set up gradient clipping
@trainer.on(ml.train.BackwardsEvents.BACKWARDS_COMPLETED)
def clip_gradient(trainer):
torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_NORM)
# train the model
trainer.run(dataloader, max_epochs=MAX_EPOCHS)
# ----------------------------------------------------
# ------------------- EVALUATION ---------------------
# ----------------------------------------------------
test_dataset = datasets.WHAM(WHAM_ROOT, sample_rate=8000, split='tt')
# make a deep clustering separator with an empty audio signal initially
# this one will live on gpu and be used in a threadpool for speed
dme = separation.deep.DeepMaskEstimation(
nussl.AudioSignal(), model_path=MODEL_PATH, device='cuda')
def forward_on_gpu(audio_signal):
# set the audio signal of the object to this item's mix
dme.audio_signal = audio_signal
masks = dme.forward()
return masks
def separate_and_evaluate(item, masks):
separator = separation.deep.DeepMaskEstimation(item['mix'])
estimates = separator(masks)
evaluator = evaluation.BSSEvalScale(
list(item['sources'].values()), estimates, compute_permutation=True)
scores = evaluator.evaluate()
output_path = os.path.join(RESULTS_DIR, f"{item['mix'].file_name}.json")
with open(output_path, 'w') as f:
json.dump(scores, f)
pool = ThreadPoolExecutor(max_workers=NUM_WORKERS)
for i, item in enumerate(tqdm.tqdm(test_dataset)):
masks = forward_on_gpu(item['mix'])
if i == 0:
separate_and_evaluate(item, masks)
else:
pool.submit(separate_and_evaluate, item, masks)
pool.shutdown(wait=True)
json_files = glob.glob(f"{RESULTS_DIR}/*.json")
df = evaluation.aggregate_score_files(json_files)
overall = df.mean()
headers = ["", f"OVERALL (N = {df.shape[0]})", ""]
metrics = ["SAR", "SDR", "SIR"]
data = np.array(df.mean()).T
data = [metrics, data]
termtables.print(data, header=headers, padding=(0, 1), alignment="ccc")
| mit |
griffinfoster/shapelets | scripts/solveShapelet.py | 1 | 9766 | #!/usr/bin/env python
"""
Solve for shapelet coefficients based on beta, xc, phi, and n_max
"""
import sys
import numpy as np
import shapelets
if __name__ == '__main__':
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] FITS_IMAGE')
o.set_description(__doc__)
o.add_option('-r', '--region', dest='region', default=None,
help='Region of image to decompose into shapelets, (xmin,xmax,ymin,ymax), default: None')
o.add_option('-N', '--noise_region', dest='nregion', default=None,
help='Region of image to use to create a noise map, if set to None the entire image is used, this is not used in the script, (xmin,xmax,ymin,ymax), default: None')
o.add_option('-x', '--xc', dest='xc', default=None,
help='set a x,y pixel position for shapelet center, if using a region it is based on the relative position, default: centroid of image/region')
o.add_option('-m', '--mode', dest='mode', default='cart',
help='Set the shapelet mode, cartesian or polar, default: cartesian')
o.add_option('-n', '--nmax', dest='nmax', default='5',
help='Size of coefficient dimensions, can be two values i.e. \'4,5\', default: 5')
o.add_option('-b', '--beta', dest='beta', default=None,
help='Beta value, can be two values i.e. \'25.0,30.5\', default: None, guess is made based on Gaussian fit')
o.add_option('-p','--phi', dest='phi', default=0., type='float',
help='Rotation angle (radians), only used when beta is manually input, default: 0')
o.add_option('-o', '--outfile', dest='ofn', default='shapeletCoeffs.pkl',
help='Coefficients output filename, default: shapeletCoeffs.pkl')
o.add_option('--centroid', dest='centroid', action="store_true", default=False,
help='Use the centroid position instead of max intensity position')
o.add_option('-s', '--savefig', dest='savefig', default=None,
help='Save the figure, requires filename')
o.add_option('--noplot', dest='noplot', action='store_true',
help='Do no show plots')
opts, args = o.parse_args(sys.argv[1:])
#import matplotlib if needed
show_plots = not opts.noplot
if show_plots:
from matplotlib import pyplot as plt
import matplotlib.patches
ifn=args[0]
im0,hdr=shapelets.fileio.readFITS(ifn,hdr=True)
extent=[0,im0.shape[0],0,im0.shape[1]]
if not (opts.region is None):
extent=map(int, opts.region.split(','))
im=shapelets.img.selPxRange(im0, [extent[2],extent[3],extent[0],extent[1]]) #numpy axis flip)
else:
im=im0
#noise map
if opts.nregion is None:
#sample the entire image for noise estimation
mean,std=shapelets.img.estimateNoise(im0,mode='sample')
nm=shapelets.img.makeNoiseMap(im.shape,mean,std)
else:
#use a specific region for noise estimation
nextent=map(int, opts.nregion.split(','))
mean,std=shapelets.img.estimateNoise(shapelets.img.selPxRange(im0,[nextent[2],nextent[3],nextent[0],nextent[1]]),mode='basic')
nm=shapelets.img.makeNoiseMap(im.shape,mean,std)
#select initial beta, phi, and xc
if opts.beta==None:
beta0,phi0,nmax0=shapelets.decomp.initParams(im,mode='fit',hdr=hdr)
else:
beta0=map(float,opts.beta.split(','))
phi0=float(opts.phi)
if len(beta0)==1:
beta0=[beta0[0],beta0[0]]
else:
beta0=[beta0[1],beta0[0]] #input to numpy flip
if opts.centroid:
xc=shapelets.img.centroid(im)
elif opts.xc==None:
xc=shapelets.img.maxPos(im)
else:
xc=map(float,opts.xc.split(','))
xc=[xc[1],xc[0]] #input to numpy flip
nmax=opts.nmax.split(',')
if len(nmax)==1:
nmax=[int(nmax[0])+1,int(nmax[0])+1]
else:
nmax=[int(nmax[1])+1,int(nmax[0])+1] #input to numpy flip
print 'Using beta: (%f,%f) :: \tphi: %f radians :: \tcentre: x,y=(%f,%f) :: \tnmax: (%i,%i)'%(beta0[1],beta0[0],phi0,xc[1],xc[0],nmax[1]-1,nmax[0]-1)
#determine (RA,dec) coordinates for centroid position
#TODO: this is correct for when the FITS header is delta RA<0 and delta Dec>0, this may need to be generalized
if extent is None:
#radec=hdr['wcs'].wcs_pix2sky(np.array([ [xc[1]+1,xc[0]+1] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
radec=hdr['wcs'].all_pix2world(np.array([ [xc[1]+1,xc[0]+1] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
else:
#radec=hdr['wcs'].wcs_pix2sky(np.array([ [xc[1]+extent[0]+1,im0.shape[0]-(extent[2]+xc[0])] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
radec=hdr['wcs'].all_pix2world(np.array([ [xc[1]+extent[0]+1,im0.shape[0]-(extent[2]+xc[0])] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
print 'Centroid RA: %f (deg) Dec: %f (deg)'%(radec[0],radec[1])
if opts.mode.startswith('pol'):
r0,th0=shapelets.shapelet.polarArray(xc,im.shape)
#plot: data, model, residual: model-data, coeffs
if show_plots:
fig = plt.figure()
ax = fig.add_subplot(221)
plt.title('Image')
plt.imshow(im)
e=matplotlib.patches.Ellipse(xy=[xc[1],xc[0]],width=2.*beta0[1],height=2.*beta0[0],angle=(180.*phi0/np.pi)) #numpy to matplotlib flip
e.set_clip_box(ax.bbox)
e.set_alpha(0.3)
e.set_facecolor('black')
ax.add_artist(e)
plt.text(xc[1],xc[0],'+',horizontalalignment='center',verticalalignment='center') #numpy to matplotlib flip
plt.colorbar()
plt.xlabel('X/RA')
plt.ylabel('Y/Dec')
plt.subplot(222)
plt.title('Model')
bvals=shapelets.decomp.genPolarBasisMatrix(beta0,nmax,phi0,r0,th0)
coeffs=shapelets.decomp.solveCoeffs(bvals,im)
mdl=np.abs(shapelets.img.constructModel(bvals,coeffs,im.shape))
plt.imshow(mdl)
plt.text(xc[1],xc[0],'+',horizontalalignment='center',verticalalignment='center') #numpy to matplotlib flip
plt.colorbar()
plt.xlabel('X/RA')
plt.ylabel('Y/Dec')
plt.subplot(223)
plt.title('Residual')
res=im-mdl
plt.imshow(res)
plt.colorbar()
plt.xlabel('X/RA')
plt.ylabel('Y/Dec')
plt.subplot(224)
plt.title('Coefficients')
cimR=shapelets.img.polarCoeffImg(coeffs.real,nmax)
cimI=shapelets.img.polarCoeffImg(coeffs.imag,nmax)
cimI=np.fliplr(cimI)
cim=np.concatenate((cimR,cimI),axis=1)
#plt.pcolor(cim)
plt.imshow(cim,interpolation='nearest',origin='lower')
plt.colorbar()
else:
bvals=shapelets.decomp.genPolarBasisMatrix(beta0,nmax,phi0,r0,th0)
coeffs=shapelets.decomp.solveCoeffs(bvals,im)
ofn=opts.ofn
print 'Writing to file:',ofn
shapelets.fileio.writeLageurreCoeffs(ofn,coeffs,xc,im.shape,beta0,phi0,nmax,info=ifn,pos=[radec[0],radec[1],hdr['dra'],hdr['ddec']])
elif opts.mode.startswith('cart'):
#plot: data, model, residual: model-data, coeffs
if show_plots:
fig = plt.figure()
ax = fig.add_subplot(221)
plt.title('Image')
plt.imshow(im)
e=matplotlib.patches.Ellipse(xy=[xc[1],xc[0]],width=2.*beta0[1],height=2.*beta0[0],angle=(180.*phi0/np.pi)) #numpy to matplotlib flip
e.set_clip_box(ax.bbox)
e.set_alpha(0.3)
e.set_facecolor('black')
ax.add_artist(e)
plt.text(xc[1],xc[0],'+',horizontalalignment='center',verticalalignment='center') #numpy to matplotlib flip
plt.colorbar()
plt.xlabel('X/RA')
plt.ylabel('Y/Dec')
plt.subplot(222)
plt.title('Model')
ry=np.array(range(0,im.shape[0]),dtype=float)-xc[0]
rx=np.array(range(0,im.shape[1]),dtype=float)-xc[1]
yy,xx=shapelets.shapelet.xy2Grid(ry,rx)
bvals=shapelets.decomp.genBasisMatrix(beta0,nmax,phi0,yy,xx)
coeffs=shapelets.decomp.solveCoeffs(bvals,im)
mdl=shapelets.img.constructModel(bvals,coeffs,im.shape)
plt.imshow(mdl)
plt.text(xc[1],xc[0],'+',horizontalalignment='center',verticalalignment='center') #numpy to matplotlib flip
plt.colorbar()
plt.xlabel('X/RA')
plt.ylabel('Y/Dec')
plt.subplot(223)
plt.title('Residual')
res=im-mdl
plt.imshow(res)
plt.colorbar()
plt.xlabel('X/RA')
plt.ylabel('Y/Dec')
plt.subplot(224)
plt.title('Coefficients')
sqCoeffs=np.reshape(coeffs,nmax)
#plt.pcolor(sqCoeffs)
plt.imshow(sqCoeffs,interpolation='nearest',origin='lower')
plt.colorbar()
else:
ry=np.array(range(0,im.shape[0]),dtype=float)-xc[0]
rx=np.array(range(0,im.shape[1]),dtype=float)-xc[1]
yy,xx=shapelets.shapelet.xy2Grid(ry,rx)
bvals=shapelets.decomp.genBasisMatrix(beta0,nmax,phi0,yy,xx)
coeffs=shapelets.decomp.solveCoeffs(bvals,im)
ofn=opts.ofn
print 'Writing to file:',ofn
shapelets.fileio.writeHermiteCoeffs(ofn,coeffs,xc,im.shape,beta0,phi0,nmax,info=ifn,pos=[radec[0],radec[1],hdr['dra'],hdr['ddec']])
if show_plots:
if not (opts.savefig is None):
plt.savefig(opts.savefig)
else: plt.show()
| bsd-3-clause |
WillArmentrout/galSims | plotting/PlotWISESim.py | 1 | 1630 | import pylab as p
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.pyplot import Rectangle # Used to make dummy legend
import math
# Open CSV File
datafileS = open('3DHiiRegions.csv', 'r')
csvFileS = []
for row in datafileS:
csvFileS.append(row.strip().split(','))
xdataS = list()
ydataS = list()
zdataS = list()
indexS = 0
while indexS < len(csvFileS) :
xdataS.append(float(csvFileS[indexS][1]))
ydataS.append(float(csvFileS[indexS][2]))
zdataS.append(float(csvFileS[indexS][3]))
indexS += 1
datafileW = open('wise_hii_V1.0.csv', 'r')
csvFileW = []
for row in datafileW:
csvFileW.append(row.strip().split(','))
# Save Galactic Radius Info from CSV to new list
xdataW = list()
ydataW = list()
zdataW = list()
indexW = 1
# GLong in column 2
# GLat in column 3
# Distance in Column 13
while indexW < len(csvFileW) :
try:
d = float(csvFileW[indexW][13])
l = (float(csvFileW[indexW][2])+90)*math.pi/180
b = math.pi/2-float(csvFileW[indexW][3])*math.pi/180
xdataW.append(d*math.sin(b)*math.cos(l))
ydataW.append(d*math.sin(b)*math.sin(l)-8.4)
zdataW.append(d*math.cos(b))
except:
pass
indexW += 1
fig=p.figure()
ax = p3.Axes3D(fig)
comb = ax.scatter(xdataS, ydataS, zdataS, s=3, label='Simulated Regions', facecolor='r', lw = 0)
orig = ax.scatter(xdataW, ydataW, zdataW, s=3, label='Wise Data', facecolor='g', lw = 0)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
prox1 = Rectangle((0, 0), 1, 1, fc="g")
prox2 = Rectangle((0, 0), 1, 1, fc="r")
ax.legend([prox1, prox2], ["Wise Data", "Simulated Regions"])
p.show()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.