Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def listen(self, func):
"""
Listen to parameters change.
Parameters
----------
func : callable
Function to be called when a parameter changes.
"""
self._C0.listen(func)
self._C1.listen(func) |
def _LhD(self):
"""
Implements Lₕ and D.
Returns
-------
Lh : ndarray
Uₕᵀ S₁⁻½ U₁ᵀ.
D : ndarray
(Sₕ ⊗ Sₓ + Iₕₓ)⁻¹.
"""
from numpy_sugar.linalg import ddot
self._init_svd()
if self._cache["LhD"] is not None:
return self._cache["LhD"]
S1, U1 = self.C1.eigh()
U1S1 = ddot(U1, 1 / sqrt(S1))
Sh, Uh = eigh(U1S1.T @ self.C0.value() @ U1S1)
self._cache["LhD"] = {
"Lh": (U1S1 @ Uh).T,
"D": 1 / (kron(Sh, self._Sx) + 1),
"De": 1 / (kron(Sh, self._Sxe) + 1),
}
return self._cache["LhD"] |
def value(self):
"""
Covariance matrix K = C₀ ⊗ GGᵀ + C₁ ⊗ I.
Returns
-------
K : ndarray
C₀ ⊗ GGᵀ + C₁ ⊗ I.
"""
C0 = self._C0.value()
C1 = self._C1.value()
return kron(C0, self._GG) + kron(C1, self._I) |
def gradient(self):
"""
Gradient of K.
Returns
-------
C0 : ndarray
Derivative of C₀ over its parameters.
C1 : ndarray
Derivative of C₁ over its parameters.
"""
self._init_svd()
C0 = self._C0.gradient()["Lu"].T
C1 = self._C1.gradient()["Lu"].T
grad = {"C0.Lu": kron(C0, self._X).T, "C1.Lu": kron(C1, self._I).T}
return grad |
def gradient_dot(self, v):
"""
Implements ∂K⋅v.
Parameters
----------
v : array_like
Vector from ∂K⋅v.
Returns
-------
C0.Lu : ndarray
∂K⋅v, where the gradient is taken over the C₀ parameters.
C1.Lu : ndarray
∂K⋅v, where the gradient is taken over the C₁ parameters.
"""
self._init_svd()
V = unvec(v, (self.G.shape[0], -1) + v.shape[1:])
r = {}
C = self._C0.gradient()["Lu"]
r["C0.Lu"] = tensordot(V.T @ self.G @ self.G.T, C, axes=([-2], [0]))
r["C0.Lu"] = r["C0.Lu"].reshape(V.shape[2:] + (-1,) + (C.shape[-1],), order="F")
C = self._C1.gradient()["Lu"]
r["C1.Lu"] = tensordot(V.T, C, axes=([-2], [0]))
r["C1.Lu"] = r["C1.Lu"].reshape(V.shape[2:] + (-1,) + (C.shape[-1],), order="F")
return r |
def solve(self, v):
"""
Implements the product K⁻¹⋅v.
Parameters
----------
v : array_like
Array to be multiplied.
Returns
-------
x : ndarray
Solution x to the equation K⋅x = y.
"""
from numpy_sugar.linalg import ddot
self._init_svd()
L = kron(self.Lh, self.Lx)
return L.T @ ddot(self.D, L @ v, left=True) |
def logdet(self):
"""
Implements log|K| = - log|D| + n⋅log|C₁|.
Returns
-------
logdet : float
Log-determinant of K.
"""
self._init_svd()
return -log(self._De).sum() + self.G.shape[0] * self.C1.logdet() |
def logdet_gradient(self):
"""
Implements ∂log|K| = Tr[K⁻¹∂K].
It can be shown that::
∂log|K| = diag(D)ᵀdiag(L(∂K)Lᵀ) = diag(D)ᵀ(diag(Lₕ∂C₀Lₕᵀ)⊗diag(LₓGGᵀLₓᵀ)),
when the derivative is over the parameters of C₀. Similarly,
∂log|K| = diag(D)ᵀdiag(L(∂K)Lᵀ) = diag(D)ᵀ(diag(Lₕ∂C₁Lₕᵀ)⊗diag(I)),
over the parameters of C₁.
Returns
-------
C0 : ndarray
Derivative of C₀ over its parameters.
C1 : ndarray
Derivative of C₁ over its parameters.
"""
from numpy_sugar.linalg import dotd
self._init_svd()
dC0 = self._C0.gradient()["Lu"]
grad_C0 = zeros_like(self._C0.Lu)
for i in range(self._C0.Lu.shape[0]):
t = kron(dotd(self.Lh, dC0[..., i] @ self.Lh.T), self._diag_LxGGLxe)
grad_C0[i] = (self._De * t).sum()
dC1 = self._C1.gradient()["Lu"]
grad_C1 = zeros_like(self._C1.Lu)
p = self._Sxe.shape[0]
np = self._G.shape[0] - p
for i in range(self._C1.Lu.shape[0]):
t = (dotd(self.Lh, dC1[..., i] @ self.Lh.T) * np).sum()
t1 = kron(dotd(self.Lh, dC1[..., i] @ self.Lh.T), eye(p))
t += (self._De * t1).sum()
grad_C1[i] = t
return {"C0.Lu": grad_C0, "C1.Lu": grad_C1} |
def LdKL_dot(self, v, v1=None):
"""
Implements L(∂K)Lᵀv.
The array v can have one or two dimensions and the first dimension has to have
size n⋅p.
Let vec(V) = v. We have
L(∂K)Lᵀ⋅v = ((Lₕ∂C₀Lₕᵀ) ⊗ (LₓGGᵀLₓᵀ))vec(V) = vec(LₓGGᵀLₓᵀVLₕ∂C₀Lₕᵀ),
when the derivative is over the parameters of C₀. Similarly,
L(∂K)Lᵀv = ((Lₕ∂C₁Lₕᵀ) ⊗ (LₓLₓᵀ))vec(V) = vec(LₓLₓᵀVLₕ∂C₁Lₕᵀ),
over the parameters of C₁.
"""
self._init_svd()
def dot(a, b):
r = tensordot(a, b, axes=([1], [0]))
if a.ndim > b.ndim:
return r.transpose([0, 2, 1])
return r
Lh = self.Lh
V = unvec(v, (self.Lx.shape[0], -1) + v.shape[1:])
LdKL_dot = {
"C0.Lu": empty((v.shape[0],) + v.shape[1:] + (self._C0.Lu.shape[0],)),
"C1.Lu": empty((v.shape[0],) + v.shape[1:] + (self._C1.Lu.shape[0],)),
}
dC0 = self._C0.gradient()["Lu"]
for i in range(self._C0.Lu.shape[0]):
t = dot(self._LxG, dot(self._LxG.T, dot(V, Lh @ dC0[..., i] @ Lh.T)))
LdKL_dot["C0.Lu"][..., i] = t.reshape((-1,) + t.shape[2:], order="F")
dC1 = self._C1.gradient()["Lu"]
for i in range(self._C1.Lu.shape[0]):
t = dot(V, Lh @ dC1[..., i] @ Lh.T)
LdKL_dot["C1.Lu"][..., i] = t.reshape((-1,) + t.shape[2:], order="F")
return LdKL_dot |
def rsolve(A, y):
"""
Robust solve Ax=y.
"""
from numpy_sugar.linalg import rsolve as _rsolve
try:
beta = _rsolve(A, y)
except LinAlgError:
msg = "Could not converge to solve Ax=y."
msg += " Setting x to zero."
warnings.warn(msg, RuntimeWarning)
beta = zeros(A.shape[0])
return beta |
def multivariate_normal(random, mean, cov):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
random : np.random.RandomState instance
Random state.
mean : array_like
Mean of the n-dimensional distribution.
cov : array_like
Covariance matrix of the distribution. It must be symmetric and
positive-definite for proper sampling.
Returns
-------
out : ndarray
The drawn sample.
"""
from numpy.linalg import cholesky
L = cholesky(cov)
return L @ random.randn(L.shape[0]) + mean |
def gradient(self):
"""
Sum of covariance function derivatives.
Returns
-------
dict
∂K₀ + ∂K₁ + ⋯
"""
grad = {}
for i, f in enumerate(self._covariances):
for varname, g in f.gradient().items():
grad[f"{self._name}[{i}].{varname}"] = g
return grad |
def value(self):
"""
Covariance matrix.
Returns
-------
K : ndarray
s⋅XXᵀ.
"""
X = self.X
return self.scale * (X @ X.T) |
def B(self):
"""
Effect-sizes parameter, B.
"""
return unvec(self._vecB.value, (self.X.shape[1], self.A.shape[0])) |
def bernoulli_sample(
offset,
G,
heritability=0.5,
causal_variants=None,
causal_variance=0,
random_state=None,
):
r"""Bernoulli likelihood sampling.
Sample according to
.. math::
\mathbf y \sim \prod_{i=1}^n
\text{Bernoulli}(\mu_i = \text{logit}(z_i))
\mathcal N(~ o \mathbf 1 + \mathbf a^\intercal \boldsymbol\alpha;
~ (h^2 - v_c)\mathrm G^\intercal\mathrm G +
(1-h^2-v_c)\mathrm I ~)
using the canonical Logit link function to define the conditional Bernoulli
mean :math:`\mu_i`.
The causal :math:`\mathbf a` covariates and the corresponding effect-sizes
are randomly draw according to the following idea. The ``causal_variants``,
if given, are first mean-zero and std-one normalized and then having
its elements divided by the squared-root the the number of variances::
causal_variants = _stdnorm(causal_variants, axis=0)
causal_variants /= sqrt(causal_variants.shape[1])
The causal effect-sizes :math:`\boldsymbol\alpha` are draw from
:math:`\{-1, +1\}` and subsequently normalized for mean-zero and std-one""
Parameters
----------
random_state : random_state
Set the initial random state.
Example
-------
.. doctest::
>>> from glimix_core.random import bernoulli_sample
>>> from numpy.random import RandomState
>>> offset = 5
>>> G = [[1, -1], [2, 1]]
>>> bernoulli_sample(offset, G, random_state=RandomState(0))
array([1., 1.])
"""
link = LogitLink()
mean, cov = _mean_cov(
offset, G, heritability, causal_variants, causal_variance, random_state
)
lik = BernoulliProdLik(link)
sampler = GGPSampler(lik, mean, cov)
return sampler.sample(random_state) |
def poisson_sample(
offset,
G,
heritability=0.5,
causal_variants=None,
causal_variance=0,
random_state=None,
):
"""Poisson likelihood sampling.
Parameters
----------
random_state : random_state
Set the initial random state.
Example
-------
.. doctest::
>>> from glimix_core.random import poisson_sample
>>> from numpy.random import RandomState
>>> offset = -0.5
>>> G = [[0.5, -1], [2, 1]]
>>> poisson_sample(offset, G, random_state=RandomState(0))
array([0, 6])
"""
mean, cov = _mean_cov(
offset, G, heritability, causal_variants, causal_variance, random_state
)
link = LogLink()
lik = PoissonProdLik(link)
sampler = GGPSampler(lik, mean, cov)
return sampler.sample(random_state) |
def L(self):
r"""Cholesky decomposition of :math:`\mathrm B`.
.. math::
\mathrm B = \mathrm Q^{\intercal}\tilde{\mathrm{T}}\mathrm Q
+ \mathrm{S}^{-1}
"""
from numpy_sugar.linalg import ddot, sum2diag
if self._L_cache is not None:
return self._L_cache
s = self._cov["scale"]
d = self._cov["delta"]
Q = self._cov["QS"][0][0]
S = self._cov["QS"][1]
ddot(self.A * self._site.tau, Q, left=True, out=self._NxR)
B = dot(Q.T, self._NxR, out=self._RxR)
B *= 1 - d
sum2diag(B, 1.0 / S / s, out=B)
self._L_cache = _cho_factor(B)
return self._L_cache |
def fit(self, verbose=True, factr=1e5, pgtol=1e-7):
r"""Maximise the marginal likelihood.
Parameters
----------
verbose : bool
``True`` for progress output; ``False`` otherwise.
Defaults to ``True``.
factr : float, optional
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is
the machine precision.
pgtol : float, optional
The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol``
where ``pg_i`` is the i-th component of the projected gradient.
Notes
-----
Please, refer to :func:`scipy.optimize.fmin_l_bfgs_b` for further information
about ``factr`` and ``pgtol``.
"""
self._maximize(verbose=verbose, factr=factr, pgtol=pgtol) |
def covariance(self):
r"""Covariance of the prior.
Returns
-------
:class:`numpy.ndarray`
:math:`v_0 \mathrm K + v_1 \mathrm I`.
"""
from numpy_sugar.linalg import ddot, sum2diag
Q0 = self._QS[0][0]
S0 = self._QS[1]
return sum2diag(dot(ddot(Q0, self.v0 * S0), Q0.T), self.v1) |
def fit(self, verbose=True, factr=1e5, pgtol=1e-7):
r"""Maximise the marginal likelihood.
Parameters
----------
verbose : bool
``True`` for progress output; ``False`` otherwise.
Defaults to ``True``.
factr : float, optional
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is
the machine precision.
pgtol : float, optional
The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol``
where ``pg_i`` is the i-th component of the projected gradient.
Notes
-----
Please, refer to :func:`scipy.optimize.fmin_l_bfgs_b` for further information
about ``factr`` and ``pgtol``.
"""
self._verbose = verbose
self._maximize(verbose=verbose, factr=factr, pgtol=pgtol)
self._verbose = False |
def posteriori_mean(self):
r""" Mean of the estimated posteriori.
This is also the maximum a posteriori estimation of the latent variable.
"""
from numpy_sugar.linalg import rsolve
Sigma = self.posteriori_covariance()
eta = self._ep._posterior.eta
return dot(Sigma, eta + rsolve(GLMM.covariance(self), self.mean())) |
def posteriori_covariance(self):
r""" Covariance of the estimated posteriori."""
K = GLMM.covariance(self)
tau = self._ep._posterior.tau
return pinv(pinv(K) + diag(1 / tau)) |
def _bstar_1effect(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM):
"""
Same as :func:`_bstar_set` but for single-effect.
"""
from numpy_sugar import epsilon
from numpy_sugar.linalg import dotd
from numpy import sum
r = full(MTBM[0].shape[0], yTBy)
r -= 2 * add.reduce([dot(i, beta) for i in yTBX])
r -= 2 * add.reduce([i * alpha for i in yTBM])
r += add.reduce([dotd(beta.T, dot(i, beta)) for i in XTBX])
r += add.reduce([dotd(beta.T, i * alpha) for i in XTBM])
r += add.reduce([sum(alpha * i * beta, axis=0) for i in XTBM])
r += add.reduce([alpha * i.ravel() * alpha for i in MTBM])
return clip(r, epsilon.tiny, inf) |
def _bstar_set(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM):
"""
Compute -2𝐲ᵀBEⱼ𝐛ⱼ + (𝐛ⱼEⱼ)ᵀBEⱼ𝐛ⱼ.
For 𝐛ⱼ = [𝜷ⱼᵀ 𝜶ⱼᵀ]ᵀ.
"""
from numpy_sugar import epsilon
r = yTBy
r -= 2 * add.reduce([i @ beta for i in yTBX])
r -= 2 * add.reduce([i @ alpha for i in yTBM])
r += add.reduce([beta.T @ i @ beta for i in XTBX])
r += 2 * add.reduce([beta.T @ i @ alpha for i in XTBM])
r += add.reduce([alpha.T @ i @ alpha for i in MTBM])
return clip(r, epsilon.tiny, inf) |
def null_lml(self):
"""
Log of the marginal likelihood for the null hypothesis.
It is implemented as ::
2·log(p(Y)) = -n·log(2𝜋s) - log|D| - n,
Returns
-------
lml : float
Log of the marginal likelihood.
"""
n = self._nsamples
scale = self.null_scale
return (self._static_lml() - n * log(scale)) / 2 |
def null_beta(self):
"""
Optimal 𝜷 according to the marginal likelihood.
It is compute by solving the equation ::
(XᵀBX)𝜷 = XᵀB𝐲.
Returns
-------
beta : ndarray
Optimal 𝜷.
"""
ETBE = self._ETBE
yTBX = self._yTBX
A = sum(i.XTBX for i in ETBE)
b = sum(yTBX)
return rsolve(A, b) |
def null_beta_covariance(self):
"""
Covariance of the optimal 𝜷 according to the marginal likelihood.
Returns
-------
beta_covariance : ndarray
(Xᵀ(s(K + vI))⁻¹X)⁻¹.
"""
A = sum(i @ j.T for (i, j) in zip(self._XTQDi, self._XTQ))
return self.null_scale * pinv(A) |
def null_scale(self):
"""
Optimal s according to the marginal likelihood.
The optimal s is given by ::
s = n⁻¹𝐲ᵀB(𝐲 - X𝜷),
where 𝜷 is optimal.
Returns
-------
scale : float
Optimal scale.
"""
n = self._nsamples
beta = self.null_beta
sqrdot = self._yTBy - dot(sum(self._yTBX), beta)
return sqrdot / n |
def fast_scan(self, M, verbose=True):
"""
LMLs, fixed-effect sizes, and scales for single-marker scan.
Parameters
----------
M : array_like
Matrix of fixed-effects across columns.
verbose : bool, optional
``True`` for progress information; ``False`` otherwise.
Defaults to ``True``.
Returns
-------
lmls : ndarray
Log of the marginal likelihoods.
effsizes0 : ndarray
Covariate fixed-effect sizes.
effsizes1 : ndarray
Candidate set fixed-effect sizes.
scales : ndarray
Scales.
"""
from tqdm import tqdm
if M.ndim != 2:
raise ValueError("`M` array must be bidimensional.")
p = M.shape[1]
lmls = empty(p)
effsizes0 = empty((p, self._XTQ[0].shape[0]))
effsizes0_se = empty((p, self._XTQ[0].shape[0]))
effsizes1 = empty(p)
effsizes1_se = empty(p)
scales = empty(p)
if verbose:
nchunks = min(p, 30)
else:
nchunks = min(p, 1)
chunk_size = (p + nchunks - 1) // nchunks
for i in tqdm(range(nchunks), desc="Scanning", disable=not verbose):
start = i * chunk_size
stop = min(start + chunk_size, M.shape[1])
r = self._fast_scan_chunk(M[:, start:stop])
lmls[start:stop] = r["lml"]
effsizes0[start:stop, :] = r["effsizes0"]
effsizes0_se[start:stop, :] = r["effsizes0_se"]
effsizes1[start:stop] = r["effsizes1"]
effsizes1_se[start:stop] = r["effsizes1_se"]
scales[start:stop] = r["scale"]
return {
"lml": lmls,
"effsizes0": effsizes0,
"effsizes0_se": effsizes0_se,
"effsizes1": effsizes1,
"effsizes1_se": effsizes1_se,
"scale": scales,
} |
def scan(self, M):
"""
LML, fixed-effect sizes, and scale of the candidate set.
Parameters
----------
M : array_like
Fixed-effects set.
Returns
-------
lml : float
Log of the marginal likelihood.
effsizes0 : ndarray
Covariates fixed-effect sizes.
effsizes0_se : ndarray
Covariates fixed-effect size standard errors.
effsizes1 : ndarray
Candidate set fixed-effect sizes.
effsizes1_se : ndarray
Candidate fixed-effect size standard errors.
scale : ndarray
Optimal scale.
"""
from numpy_sugar.linalg import ddot
from numpy_sugar import is_all_finite
M = asarray(M, float)
if M.shape[1] == 0:
return {
"lml": self.null_lml(),
"effsizes0": self.null_beta,
"effsizes0_se": self.null_beta_se,
"effsizes1": empty((0)),
"effsizes1_se": empty((0)),
"scale": self.null_scale,
}
if not is_all_finite(M):
raise ValueError("M parameter has non-finite elements.")
MTQ = [dot(M.T, Q) for Q in self._QS[0] if Q.size > 0]
yTBM = [dot(i, j.T) for (i, j) in zip(self._yTQDi, MTQ)]
XTBM = [dot(i, j.T) for (i, j) in zip(self._XTQDi, MTQ)]
D = self._D
MTBM = [ddot(i, 1 / j) @ i.T for i, j in zip(MTQ, D) if j.min() > 0]
return self._multicovariate_set(yTBM, XTBM, MTBM) |
def null_lml(self):
"""
Log of the marginal likelihood for the null hypothesis.
It is implemented as ::
2·log(p(Y)) = -n·p·log(2𝜋s) - log|K| - n·p,
for which s and 𝚩 are optimal.
Returns
-------
lml : float
Log of the marginal likelihood.
"""
np = self._nsamples * self._ntraits
scale = self.null_scale
return self._static_lml() / 2 - np * safe_log(scale) / 2 - np / 2 |
def null_scale(self):
"""
Optimal s according to the marginal likelihood.
The optimal s is given by
s = (n·p)⁻¹𝐲ᵀK⁻¹(𝐲 - 𝐦),
where 𝐦 = (A ⊗ X)vec(𝚩) and 𝚩 is optimal.
Returns
-------
scale : float
Optimal scale.
"""
np = self._nsamples * self._ntraits
b = vec(self.null_beta)
mKiy = b.T @ self._MKiy
sqrtdot = self._yKiy - mKiy
scale = sqrtdot / np
return scale |
def scan(self, A1, X1):
"""
LML, fixed-effect sizes, and scale of the candidate set.
Parameters
----------
A1 : (p, e) array_like
Trait-by-environments design matrix.
X1 : (n, m) array_like
Variants set matrix.
Returns
-------
lml : float
Log of the marginal likelihood for the set.
effsizes0 : (c, p) ndarray
Fixed-effect sizes for the covariates.
effsizes0_se : (c, p) ndarray
Fixed-effect size standard errors for the covariates.
effsizes1 : (m, e) ndarray
Fixed-effect sizes for the candidates.
effsizes1_se : (m, e) ndarray
Fixed-effect size standard errors for the candidates.
scale : float
Optimal scale.
"""
from numpy import empty
from numpy.linalg import multi_dot
from numpy_sugar import epsilon, is_all_finite
from scipy.linalg import cho_solve
A1 = asarray(A1, float)
X1 = asarray(X1, float)
if not is_all_finite(A1):
raise ValueError("A1 parameter has non-finite elements.")
if not is_all_finite(X1):
raise ValueError("X1 parameter has non-finite elements.")
if A1.shape[1] == 0:
beta_se = sqrt(self.null_beta_covariance.diagonal())
return {
"lml": self.null_lml(),
"effsizes0": unvec(self.null_beta, (self._ncovariates, -1)),
"effsizes0_se": unvec(beta_se, (self._ncovariates, -1)),
"effsizes1": empty((0,)),
"effsizes1_se": empty((0,)),
"scale": self.null_scale,
}
X1X1 = X1.T @ X1
XX1 = self._X.T @ X1
AWA1 = self._WA.T @ A1
A1W = A1.T @ self._W
GX1 = self._G.T @ X1
MRiM1 = kron(AWA1, XX1)
M1RiM1 = kron(A1W @ A1, X1X1)
M1Riy = vec(multi_dot([X1.T, self._Y, A1W.T]))
XRiM1 = kron(self._WL0.T @ A1, GX1)
ZiXRiM1 = cho_solve(self._Lz, XRiM1)
MRiXZiXRiM1 = self._XRiM.T @ ZiXRiM1
M1RiXZiXRiM1 = XRiM1.T @ ZiXRiM1
M1RiXZiXRiy = XRiM1.T @ self._ZiXRiy
T0 = [[self._MRiM, MRiM1], [MRiM1.T, M1RiM1]]
T1 = [[self._MRiXZiXRiM, MRiXZiXRiM1], [MRiXZiXRiM1.T, M1RiXZiXRiM1]]
T2 = [self._MRiy, M1Riy]
T3 = [self._MRiXZiXRiy, M1RiXZiXRiy]
MKiM = block(T0) - block(T1)
MKiy = block(T2) - block(T3)
beta = rsolve(MKiM, MKiy)
mKiy = beta.T @ MKiy
cp = self._ntraits * self._ncovariates
effsizes0 = unvec(beta[:cp], (self._ncovariates, self._ntraits))
effsizes1 = unvec(beta[cp:], (X1.shape[1], A1.shape[1]))
np = self._nsamples * self._ntraits
sqrtdot = self._yKiy - mKiy
scale = clip(sqrtdot / np, epsilon.tiny, inf)
lml = self._static_lml() / 2 - np * safe_log(scale) / 2 - np / 2
effsizes_se = sqrt(clip(scale * pinv(MKiM).diagonal(), epsilon.tiny, inf))
effsizes0_se = unvec(effsizes_se[:cp], (self._ncovariates, self._ntraits))
effsizes1_se = unvec(effsizes_se[cp:], (X1.shape[1], A1.shape[1]))
return {
"lml": lml,
"effsizes0": effsizes0,
"effsizes1": effsizes1,
"scale": scale,
"effsizes0_se": effsizes0_se,
"effsizes1_se": effsizes1_se,
} |
def sample(self, random_state=None):
r"""Sample from the specified distribution.
Parameters
----------
random_state : random_state
Set the initial random state.
Returns
-------
numpy.ndarray
Sample.
"""
from numpy_sugar import epsilon
from numpy_sugar.linalg import sum2diag
from numpy_sugar.random import multivariate_normal
if random_state is None:
random_state = RandomState()
m = self._mean.value()
K = self._cov.value().copy()
sum2diag(K, +epsilon.small, out=K)
return self._lik.sample(multivariate_normal(m, K, random_state), random_state) |
def economic_qs_zeros(n):
"""Eigen decomposition of a zero matrix."""
Q0 = empty((n, 0))
Q1 = eye(n)
S0 = empty(0)
return ((Q0, Q1), S0) |
def get_fast_scanner(self):
"""
Return :class:`.FastScanner` for association scan.
Returns
-------
:class:`.FastScanner`
Instance of a class designed to perform very fast association scan.
"""
terms = self._terms
return KronFastScanner(self._Y, self._mean.A, self._mean.X, self._cov.Ge, terms) |
def lml(self):
"""
Log of the marginal likelihood.
Let 𝐲 = vec(Y), M = A⊗X, and H = MᵀK⁻¹M. The restricted log of the marginal
likelihood is given by [R07]_::
2⋅log(p(𝐲)) = -(n⋅p - c⋅p) log(2π) + log(|MᵀM|) - log(|K|) - log(|H|)
- (𝐲-𝐦)ᵀ K⁻¹ (𝐲-𝐦),
where 𝐦 = M𝛃 for 𝛃 = H⁻¹MᵀK⁻¹𝐲.
For implementation purpose, let X = (L₀ ⊗ G) and R = (L₁ ⊗ I)(L₁ ⊗ I)ᵀ.
The covariance can be written as::
K = XXᵀ + R.
From the Woodbury matrix identity, we have
𝐲ᵀK⁻¹𝐲 = 𝐲ᵀR⁻¹𝐲 - 𝐲ᵀR⁻¹XZ⁻¹XᵀR⁻¹𝐲,
where Z = I + XᵀR⁻¹X. Note that R⁻¹ = (U₁S₁⁻¹U₁ᵀ) ⊗ I and ::
XᵀR⁻¹𝐲 = (L₀ᵀW ⊗ Gᵀ)𝐲 = vec(GᵀYWL₀),
where W = U₁S₁⁻¹U₁ᵀ. The term GᵀY can be calculated only once and it will form a
r×p matrix. We similarly have ::
XᵀR⁻¹M = (L₀ᵀWA) ⊗ (GᵀX),
for which GᵀX is pre-computed.
The log-determinant of the covariance matrix is given by
log(|K|) = log(|Z|) - log(|R⁻¹|) = log(|Z|) - 2·n·log(|U₁S₁⁻½|).
The log of the marginal likelihood can be rewritten as::
2⋅log(p(𝐲)) = -(n⋅p - c⋅p) log(2π) + log(|MᵀM|)
- log(|Z|) + 2·n·log(|U₁S₁⁻½|)
- log(|MᵀR⁻¹M - MᵀR⁻¹XZ⁻¹XᵀR⁻¹M|)
- 𝐲ᵀR⁻¹𝐲 + (𝐲ᵀR⁻¹X)Z⁻¹(XᵀR⁻¹𝐲)
- 𝐦ᵀR⁻¹𝐦 + (𝐦ᵀR⁻¹X)Z⁻¹(XᵀR⁻¹𝐦)
+ 2𝐲ᵀR⁻¹𝐦 - 2(𝐲ᵀR⁻¹X)Z⁻¹(XᵀR⁻¹𝐦).
Returns
-------
lml : float
Log of the marginal likelihood.
References
----------
.. [R07] LaMotte, L. R. (2007). A direct derivation of the REML likelihood
function. Statistical Papers, 48(2), 321-327.
"""
terms = self._terms
yKiy = terms["yKiy"]
mKiy = terms["mKiy"]
mKim = terms["mKim"]
lml = -self._df * log2pi + self._logdet_MM - self._logdetK
lml -= self._logdetH
lml += -yKiy - mKim + 2 * mKiy
return lml / 2 |
def _lml_gradient(self):
"""
Gradient of the log of the marginal likelihood.
Let 𝐲 = vec(Y), 𝕂 = K⁻¹∂(K)K⁻¹, and H = MᵀK⁻¹M. The gradient is given by::
2⋅∂log(p(𝐲)) = -tr(K⁻¹∂K) - tr(H⁻¹∂H) + 𝐲ᵀ𝕂𝐲 - 𝐦ᵀ𝕂(2⋅𝐲-𝐦)
- 2⋅(𝐦-𝐲)ᵀK⁻¹∂(𝐦).
Observe that
∂𝛃 = -H⁻¹(∂H)𝛃 - H⁻¹Mᵀ𝕂𝐲 and ∂H = -Mᵀ𝕂M.
Let Z = I + XᵀR⁻¹X and 𝓡 = R⁻¹(∂K)R⁻¹. We use Woodbury matrix identity to
write ::
𝐲ᵀ𝕂𝐲 = 𝐲ᵀ𝓡𝐲 - 2(𝐲ᵀ𝓡X)Z⁻¹(XᵀR⁻¹𝐲) + (𝐲ᵀR⁻¹X)Z⁻¹(Xᵀ𝓡X)Z⁻¹(XᵀR⁻¹𝐲)
Mᵀ𝕂M = Mᵀ𝓡M - 2(Mᵀ𝓡X)Z⁻¹(XᵀR⁻¹M) + (MᵀR⁻¹X)Z⁻¹(Xᵀ𝓡X)Z⁻¹(XᵀR⁻¹M)
Mᵀ𝕂𝐲 = Mᵀ𝓡𝐲 - (MᵀR⁻¹X)Z⁻¹(Xᵀ𝓡𝐲) - (Mᵀ𝓡X)Z⁻¹(XᵀR⁻¹𝐲)
+ (MᵀR⁻¹X)Z⁻¹(Xᵀ𝓡X)Z⁻¹(XᵀR⁻¹𝐲)
H⁻¹ = MᵀR⁻¹M - (MᵀR⁻¹X)Z⁻¹(XᵀR⁻¹M),
where we have used parentheses to separate expressions
that we will compute separately. For example, we have ::
𝐲ᵀ𝓡𝐲 = 𝐲ᵀ(U₁S₁⁻¹U₁ᵀ ⊗ I)(∂C₀ ⊗ GGᵀ)(U₁S₁⁻¹U₁ᵀ ⊗ I)𝐲
= 𝐲ᵀ(U₁S₁⁻¹U₁ᵀ∂C₀ ⊗ G)(U₁S₁⁻¹U₁ᵀ ⊗ Gᵀ)𝐲
= vec(GᵀYU₁S₁⁻¹U₁ᵀ∂C₀)ᵀvec(GᵀYU₁S₁⁻¹U₁ᵀ),
when the derivative is over the parameters of C₀. Otherwise, we have
𝐲ᵀ𝓡𝐲 = vec(YU₁S₁⁻¹U₁ᵀ∂C₁)ᵀvec(YU₁S₁⁻¹U₁ᵀ).
The above equations can be more compactly written as
𝐲ᵀ𝓡𝐲 = vec(EᵢᵀYW∂Cᵢ)ᵀvec(EᵢᵀYW),
where W = U₁S₁⁻¹U₁ᵀ, E₀ = G, and E₁ = I. We will now just state the results for
the other instances of the aBc form, which follow similar derivations::
Xᵀ𝓡X = (L₀ᵀW∂CᵢWL₀) ⊗ (GᵀEᵢEᵢᵀG)
Mᵀ𝓡y = (AᵀW∂Cᵢ⊗XᵀEᵢ)vec(EᵢᵀYW) = vec(XᵀEᵢEᵢᵀYW∂CᵢWA)
Mᵀ𝓡X = AᵀW∂CᵢWL₀ ⊗ XᵀEᵢEᵢᵀG
Mᵀ𝓡M = AᵀW∂CᵢWA ⊗ XᵀEᵢEᵢᵀX
Xᵀ𝓡𝐲 = GᵀEᵢEᵢᵀYW∂CᵢWL₀
From Woodbury matrix identity and Kronecker product properties we have ::
tr(K⁻¹∂K) = tr[W∂Cᵢ]tr[EᵢEᵢᵀ] - tr[Z⁻¹(Xᵀ𝓡X)]
tr(H⁻¹∂H) = - tr[(MᵀR⁻¹M)(Mᵀ𝕂M)] + tr[(MᵀR⁻¹X)Z⁻¹(XᵀR⁻¹M)(Mᵀ𝕂M)]
Note also that ::
∂𝛃 = H⁻¹Mᵀ𝕂M𝛃 - H⁻¹Mᵀ𝕂𝐲.
Returns
-------
C0.Lu : ndarray
Gradient of the log of the marginal likelihood over C₀ parameters.
C1.Lu : ndarray
Gradient of the log of the marginal likelihood over C₁ parameters.
"""
from scipy.linalg import cho_solve
terms = self._terms
dC0 = self._cov.C0.gradient()["Lu"]
dC1 = self._cov.C1.gradient()["Lu"]
b = terms["b"]
W = terms["W"]
Lh = terms["Lh"]
Lz = terms["Lz"]
WA = terms["WA"]
WL0 = terms["WL0"]
YW = terms["YW"]
MRiM = terms["MRiM"]
MRiy = terms["MRiy"]
XRiM = terms["XRiM"]
XRiy = terms["XRiy"]
ZiXRiM = terms["ZiXRiM"]
ZiXRiy = terms["ZiXRiy"]
WdC0 = _mdot(W, dC0)
WdC1 = _mdot(W, dC1)
AWdC0 = _mdot(WA.T, dC0)
AWdC1 = _mdot(WA.T, dC1)
# Mᵀ𝓡M
MR0M = _mkron(_mdot(AWdC0, WA), self._XGGX)
MR1M = _mkron(_mdot(AWdC1, WA), self._XX)
# Mᵀ𝓡X
MR0X = _mkron(_mdot(AWdC0, WL0), self._XGGG)
MR1X = _mkron(_mdot(AWdC1, WL0), self._GX.T)
# Mᵀ𝓡𝐲 = (AᵀW∂Cᵢ⊗XᵀEᵢ)vec(EᵢᵀYW) = vec(XᵀEᵢEᵢᵀYW∂CᵢWA)
MR0y = vec(_mdot(self._XGGY, _mdot(WdC0, WA)))
MR1y = vec(_mdot(self._XY, WdC1, WA))
# Xᵀ𝓡X
XR0X = _mkron(_mdot(WL0.T, dC0, WL0), self._GGGG)
XR1X = _mkron(_mdot(WL0.T, dC1, WL0), self._GG)
# Xᵀ𝓡𝐲
XR0y = vec(_mdot(self._GGGY, WdC0, WL0))
XR1y = vec(_mdot(self._GY, WdC1, WL0))
# 𝐲ᵀ𝓡𝐲 = vec(EᵢᵀYW∂Cᵢ)ᵀvec(EᵢᵀYW)
yR0y = vec(_mdot(self._GY, WdC0)).T @ vec(self._GY @ W)
yR1y = (YW.T * _mdot(self._Y, WdC1).T).T.sum(axis=(0, 1))
ZiXR0X = cho_solve(Lz, XR0X)
ZiXR1X = cho_solve(Lz, XR1X)
ZiXR0y = cho_solve(Lz, XR0y)
ZiXR1y = cho_solve(Lz, XR1y)
# Mᵀ𝕂y = Mᵀ𝓡𝐲 - (MᵀR⁻¹X)Z⁻¹(Xᵀ𝓡𝐲) - (Mᵀ𝓡X)Z⁻¹(XᵀR⁻¹𝐲)
# + (MᵀR⁻¹X)Z⁻¹(Xᵀ𝓡X)Z⁻¹(XᵀR⁻¹𝐲)
MK0y = MR0y - _mdot(XRiM.T, ZiXR0y) - _mdot(MR0X, ZiXRiy)
MK0y += _mdot(XRiM.T, ZiXR0X, ZiXRiy)
MK1y = MR1y - _mdot(XRiM.T, ZiXR1y) - _mdot(MR1X, ZiXRiy)
MK1y += _mdot(XRiM.T, ZiXR1X, ZiXRiy)
# 𝐲ᵀ𝕂𝐲 = 𝐲ᵀ𝓡𝐲 - 2(𝐲ᵀ𝓡X)Z⁻¹(XᵀR⁻¹𝐲) + (𝐲ᵀR⁻¹X)Z⁻¹(Xᵀ𝓡X)Z⁻¹(XᵀR⁻¹𝐲)
yK0y = yR0y - 2 * XR0y.T @ ZiXRiy + ZiXRiy.T @ _mdot(XR0X, ZiXRiy)
yK1y = yR1y - 2 * XR1y.T @ ZiXRiy + ZiXRiy.T @ _mdot(XR1X, ZiXRiy)
# Mᵀ𝕂M = Mᵀ𝓡M - (Mᵀ𝓡X)Z⁻¹(XᵀR⁻¹M) - (MᵀR⁻¹X)Z⁻¹(Xᵀ𝓡M)
# + (MᵀR⁻¹X)Z⁻¹(Xᵀ𝓡X)Z⁻¹(XᵀR⁻¹M)
MR0XZiXRiM = _mdot(MR0X, ZiXRiM)
MK0M = MR0M - MR0XZiXRiM - MR0XZiXRiM.transpose([1, 0, 2])
MK0M += _mdot(ZiXRiM.T, XR0X, ZiXRiM)
MR1XZiXRiM = _mdot(MR1X, ZiXRiM)
MK1M = MR1M - MR1XZiXRiM - MR1XZiXRiM.transpose([1, 0, 2])
MK1M += _mdot(ZiXRiM.T, XR1X, ZiXRiM)
MK0m = _mdot(MK0M, b)
mK0y = b.T @ MK0y
mK0m = b.T @ MK0m
MK1m = _mdot(MK1M, b)
mK1y = b.T @ MK1y
mK1m = b.T @ MK1m
XRim = XRiM @ b
MRim = MRiM @ b
db = {"C0.Lu": cho_solve(Lh, MK0m - MK0y), "C1.Lu": cho_solve(Lh, MK1m - MK1y)}
grad = {
"C0.Lu": -trace(WdC0) * self._trGG + trace(ZiXR0X),
"C1.Lu": -trace(WdC1) * self.nsamples + trace(ZiXR1X),
}
if self._restricted:
grad["C0.Lu"] += cho_solve(Lh, MK0M).diagonal().sum(1)
grad["C1.Lu"] += cho_solve(Lh, MK1M).diagonal().sum(1)
mKiM = MRim.T - XRim.T @ ZiXRiM
yKiM = MRiy.T - XRiy.T @ ZiXRiM
grad["C0.Lu"] += yK0y - 2 * mK0y + mK0m - 2 * _mdot(mKiM, db["C0.Lu"])
grad["C0.Lu"] += 2 * _mdot(yKiM, db["C0.Lu"])
grad["C1.Lu"] += yK1y - 2 * mK1y + mK1m - 2 * _mdot(mKiM, db["C1.Lu"])
grad["C1.Lu"] += 2 * _mdot(yKiM, db["C1.Lu"])
grad["C0.Lu"] /= 2
grad["C1.Lu"] /= 2
return grad |
def gradient(self):
r"""Gradient of the log of the marginal likelihood.
Returns
-------
dict
Map between variables to their gradient values.
"""
self._update_approx()
g = self._ep.lml_derivatives(self._X)
ed = exp(-self.logitdelta)
es = exp(self.logscale)
grad = dict()
grad["logitdelta"] = g["delta"] * (ed / (1 + ed)) / (1 + ed)
grad["logscale"] = g["scale"] * es
grad["beta"] = g["mean"]
return grad |
def gradient(self):
"""
Derivative of the covariance matrix over the lower triangular, flat part of L.
It is equal to
∂K/∂Lᵢⱼ = ALᵀ + LAᵀ,
where Aᵢⱼ is an n×m matrix of zeros except at [Aᵢⱼ]ᵢⱼ=1.
Returns
-------
Lu : ndarray
Derivative of K over the lower-triangular, flat part of L.
"""
L = self.L
n = self.L.shape[0]
grad = {"Lu": zeros((n, n, n * self._L.shape[1]))}
for ii in range(self._L.shape[0] * self._L.shape[1]):
row = ii // self._L.shape[1]
col = ii % self._L.shape[1]
grad["Lu"][row, :, ii] = L[:, col]
grad["Lu"][:, row, ii] += L[:, col]
return grad |
def beta(self):
"""
Fixed-effect sizes.
Returns
-------
effect-sizes : numpy.ndarray
Optimal fixed-effect sizes.
Notes
-----
Setting the derivative of log(p(𝐲)) over effect sizes equal
to zero leads to solutions 𝜷 from equation ::
(QᵀX)ᵀD⁻¹(QᵀX)𝜷 = (QᵀX)ᵀD⁻¹(Qᵀ𝐲).
"""
from numpy_sugar.linalg import rsolve
return rsolve(self._X["VT"], rsolve(self._X["tX"], self.mean())) |
def beta_covariance(self):
"""
Estimates the covariance-matrix of the optimal beta.
Returns
-------
beta-covariance : ndarray
(Xᵀ(s((1-𝛿)K + 𝛿I))⁻¹X)⁻¹.
References
----------
.. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John
Wiley & Sons.
"""
from numpy_sugar.linalg import ddot
tX = self._X["tX"]
Q = concatenate(self._QS[0], axis=1)
S0 = self._QS[1]
D = self.v0 * S0 + self.v1
D = D.tolist() + [self.v1] * (len(self._y) - len(D))
D = asarray(D)
A = inv(tX.T @ (Q @ ddot(1 / D, Q.T @ tX)))
VT = self._X["VT"]
H = lstsq(VT, A, rcond=None)[0]
return lstsq(VT, H.T, rcond=None)[0] |
def fix(self, param):
"""
Disable parameter optimization.
Parameters
----------
param : str
Possible values are ``"delta"``, ``"beta"``, and ``"scale"``.
"""
if param == "delta":
super()._fix("logistic")
else:
self._fix[param] = True |
def unfix(self, param):
"""
Enable parameter optimization.
Parameters
----------
param : str
Possible values are ``"delta"``, ``"beta"``, and ``"scale"``.
"""
if param == "delta":
self._unfix("logistic")
else:
self._fix[param] = False |
def fit(self, verbose=True):
"""
Maximise the marginal likelihood.
Parameters
----------
verbose : bool, optional
``True`` for progress output; ``False`` otherwise.
Defaults to ``True``.
"""
if not self._isfixed("logistic"):
self._maximize_scalar(desc="LMM", rtol=1e-6, atol=1e-6, verbose=verbose)
if not self._fix["beta"]:
self._update_beta()
if not self._fix["scale"]:
self._update_scale() |
def get_fast_scanner(self):
"""
Return :class:`.FastScanner` for association scan.
Returns
-------
fast-scanner : :class:`.FastScanner`
Instance of a class designed to perform very fast association scan.
"""
v0 = self.v0
v1 = self.v1
QS = (self._QS[0], v0 * self._QS[1])
return FastScanner(self._y, self.X, QS, v1) |
def value(self):
"""
Internal use only.
"""
if not self._fix["beta"]:
self._update_beta()
if not self._fix["scale"]:
self._update_scale()
return self.lml() |
def lml(self):
"""
Log of the marginal likelihood.
Returns
-------
lml : float
Log of the marginal likelihood.
Notes
-----
The log of the marginal likelihood is given by ::
2⋅log(p(𝐲)) = -n⋅log(2π) - n⋅log(s) - log|D| - (Qᵀ𝐲)ᵀs⁻¹D⁻¹(Qᵀ𝐲)
+ (Qᵀ𝐲)ᵀs⁻¹D⁻¹(QᵀX𝜷)/2 - (QᵀX𝜷)ᵀs⁻¹D⁻¹(QᵀX𝜷).
By using the optimal 𝜷, the log of the marginal likelihood can be rewritten
as::
2⋅log(p(𝐲)) = -n⋅log(2π) - n⋅log(s) - log|D| + (Qᵀ𝐲)ᵀs⁻¹D⁻¹Qᵀ(X𝜷-𝐲).
In the extreme case where 𝜷 is such that 𝐲 = X𝜷, the maximum is attained as
s→0.
For optimals 𝜷 and s, the log of the marginal likelihood can be further
simplified to ::
2⋅log(p(𝐲; 𝜷, s)) = -n⋅log(2π) - n⋅log s - log|D| - n.
"""
reml = (self._logdetXX() - self._logdetH()) / 2
if self._optimal["scale"]:
lml = self._lml_optimal_scale()
else:
lml = self._lml_arbitrary_scale()
return lml + reml |
def delta(self):
"""
Variance ratio between ``K`` and ``I``.
"""
v = float(self._logistic.value)
if v > 0.0:
v = 1 / (1 + exp(-v))
else:
v = exp(v)
v = v / (v + 1.0)
return min(max(v, epsilon.tiny), 1 - epsilon.tiny) |
def _logdetXX(self):
"""
log(|XᵀX|).
"""
if not self._restricted:
return 0.0
ldet = slogdet(self._X["tX"].T @ self._X["tX"])
if ldet[0] != 1.0:
raise ValueError("The determinant of XᵀX should be positive.")
return ldet[1] |
def _logdetH(self):
"""
log(|H|) for H = s⁻¹XᵀQD⁻¹QᵀX.
"""
if not self._restricted:
return 0.0
ldet = slogdet(sum(self._XTQDiQTX) / self.scale)
if ldet[0] != 1.0:
raise ValueError("The determinant of H should be positive.")
return ldet[1] |
def _lml_optimal_scale(self):
"""
Log of the marginal likelihood for optimal scale.
Implementation for unrestricted LML::
Returns
-------
lml : float
Log of the marginal likelihood.
"""
assert self._optimal["scale"]
n = len(self._y)
lml = -self._df * log2pi - self._df - n * log(self.scale)
lml -= sum(npsum(log(D)) for D in self._D)
return lml / 2 |
def _lml_arbitrary_scale(self):
"""
Log of the marginal likelihood for arbitrary scale.
Returns
-------
lml : float
Log of the marginal likelihood.
"""
s = self.scale
D = self._D
n = len(self._y)
lml = -self._df * log2pi - n * log(s)
lml -= sum(npsum(log(d)) for d in D)
d = (mTQ - yTQ for (mTQ, yTQ) in zip(self._mTQ, self._yTQ))
lml -= sum((i / j) @ i for (i, j) in zip(d, D)) / s
return lml / 2 |
def _df(self):
"""
Degrees of freedom.
"""
if not self._restricted:
return self.nsamples
return self.nsamples - self._X["tX"].shape[1] |
def get_fast_scanner(self):
r"""Return :class:`glimix_core.lmm.FastScanner` for the current
delta."""
from numpy_sugar.linalg import ddot, economic_qs, sum2diag
y = self.eta / self.tau
if self._QS is None:
K = eye(y.shape[0]) / self.tau
else:
Q0 = self._QS[0][0]
S0 = self._QS[1]
K = dot(ddot(Q0, self.v0 * S0), Q0.T)
K = sum2diag(K, 1 / self.tau)
return FastScanner(y, self._X, economic_qs(K), self.v1) |
def value(self):
r"""Log of the marginal likelihood.
Formally,
.. math::
- \frac{n}{2}\log{2\pi} - \frac{1}{2} \log{\left|
v_0 \mathrm K + v_1 \mathrm I + \tilde{\Sigma} \right|}
- \frac{1}{2}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)^{\intercal}
\left( v_0 \mathrm K + v_1 \mathrm I +
\tilde{\Sigma} \right)^{-1}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)
Returns
-------
float
:math:`\log{p(\tilde{\boldsymbol\mu})}`
"""
from numpy_sugar.linalg import ddot, sum2diag
if self._cache["value"] is not None:
return self._cache["value"]
scale = exp(self.logscale)
delta = 1 / (1 + exp(-self.logitdelta))
v0 = scale * (1 - delta)
v1 = scale * delta
mu = self.eta / self.tau
n = len(mu)
if self._QS is None:
K = zeros((n, n))
else:
Q0 = self._QS[0][0]
S0 = self._QS[1]
K = dot(ddot(Q0, S0), Q0.T)
A = sum2diag(sum2diag(v0 * K, v1), 1 / self.tau)
m = mu - self.mean()
v = -n * log(2 * pi)
v -= slogdet(A)[1]
v -= dot(m, solve(A, m))
self._cache["value"] = v / 2
return self._cache["value"] |
def _initialize(self):
r"""Initialize the mean and covariance of the posterior.
Given that :math:`\tilde{\mathrm T}` is a matrix of zeros right before
the first EP iteration, we have
.. math::
\boldsymbol\mu = \mathrm K^{-1} \mathbf m ~\text{ and }~
\Sigma = \mathrm K
as the initial posterior mean and covariance.
"""
if self._mean is None or self._cov is None:
return
Q = self._cov["QS"][0][0]
S = self._cov["QS"][1]
if S.size > 0:
self.tau[:] = 1 / npsum((Q * sqrt(S)) ** 2, axis=1)
else:
self.tau[:] = 0.0
self.eta[:] = self._mean
self.eta[:] *= self.tau |
def L(self):
r"""Cholesky decomposition of :math:`\mathrm B`.
.. math::
\mathrm B = \mathrm Q^{\intercal}\tilde{\mathrm{T}}\mathrm Q
+ \mathrm{S}^{-1}
"""
from scipy.linalg import cho_factor
from numpy_sugar.linalg import ddot, sum2diag
if self._L_cache is not None:
return self._L_cache
Q = self._cov["QS"][0][0]
S = self._cov["QS"][1]
B = dot(Q.T, ddot(self._site.tau, Q, left=True))
sum2diag(B, 1.0 / S, out=B)
self._L_cache = cho_factor(B, lower=True)[0]
return self._L_cache |
def build_engine_session(connection, echo=False, autoflush=None, autocommit=None, expire_on_commit=None,
scopefunc=None):
"""Build an engine and a session.
:param str connection: An RFC-1738 database connection string
:param bool echo: Turn on echoing SQL
:param Optional[bool] autoflush: Defaults to True if not specified in kwargs or configuration.
:param Optional[bool] autocommit: Defaults to False if not specified in kwargs or configuration.
:param Optional[bool] expire_on_commit: Defaults to False if not specified in kwargs or configuration.
:param scopefunc: Scoped function to pass to :func:`sqlalchemy.orm.scoped_session`
:rtype: tuple[Engine,Session]
From the Flask-SQLAlchemy documentation:
An extra key ``'scopefunc'`` can be set on the ``options`` dict to
specify a custom scope function. If it's not provided, Flask's app
context stack identity is used. This will ensure that sessions are
created and removed with the request/response cycle, and should be fine
in most cases.
"""
if connection is None:
raise ValueError('can not build engine when connection is None')
engine = create_engine(connection, echo=echo)
autoflush = autoflush if autoflush is not None else False
autocommit = autocommit if autocommit is not None else False
expire_on_commit = expire_on_commit if expire_on_commit is not None else True
log.debug('auto flush: %s, auto commit: %s, expire on commmit: %s', autoflush, autocommit, expire_on_commit)
#: A SQLAlchemy session maker
session_maker = sessionmaker(
bind=engine,
autoflush=autoflush,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
)
#: A SQLAlchemy session object
session = scoped_session(
session_maker,
scopefunc=scopefunc
)
return engine, session |
def _get_connection(cls, connection: Optional[str] = None) -> str:
"""Get a default connection string.
Wraps :func:`bio2bel.utils.get_connection` and passing this class's :data:`module_name` to it.
"""
return get_connection(cls.module_name, connection=connection) |
def setup_smtp_factory(**settings):
""" expects a dictionary with 'mail.' keys to create an appropriate smtplib.SMTP instance"""
return CustomSMTP(
host=settings.get('mail.host', 'localhost'),
port=int(settings.get('mail.port', 25)),
user=settings.get('mail.user'),
password=settings.get('mail.password'),
timeout=float(settings.get('mail.timeout', 60)),
) |
def sendMultiPart(smtp, gpg_context, sender, recipients, subject, text, attachments):
""" a helper method that composes and sends an email with attachments
requires a pre-configured smtplib.SMTP instance"""
sent = 0
for to in recipients:
if not to.startswith('<'):
uid = '<%s>' % to
else:
uid = to
if not checkRecipient(gpg_context, uid):
continue
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = to
msg['Subject'] = subject
msg["Date"] = formatdate(localtime=True)
msg.preamble = u'This is an email in encrypted multipart format.'
attach = MIMEText(str(gpg_context.encrypt(text.encode('utf-8'), uid, always_trust=True)))
attach.set_charset('UTF-8')
msg.attach(attach)
for attachment in attachments:
with open(attachment, 'rb') as fp:
attach = MIMEBase('application', 'octet-stream')
attach.set_payload(str(gpg_context.encrypt_file(fp, uid, always_trust=True)))
attach.add_header('Content-Disposition', 'attachment', filename=basename('%s.pgp' % attachment))
msg.attach(attach)
# TODO: need to catch exception?
# yes :-) we need to adjust the status accordingly (>500 so it will be destroyed)
smtp.begin()
smtp.sendmail(sender, to, msg.as_string())
smtp.quit()
sent += 1
return sent |
def begin(self):
""" connects and optionally authenticates a connection."""
self.connect(self.host, self.port)
if self.user:
self.starttls()
self.login(self.user, self.password) |
def make_downloader(url: str, path: str) -> Callable[[bool], str]: # noqa: D202
"""Make a function that downloads the data for you, or uses a cached version at the given path.
:param url: The URL of some data
:param path: The path of the cached data, or where data is cached if it does not already exist
:return: A function that downloads the data and returns the path of the data
"""
def download_data(force_download: bool = False) -> str:
"""Download the data.
:param force_download: If true, overwrites a previously cached file
"""
if os.path.exists(path) and not force_download:
log.info('using cached data at %s', path)
else:
log.info('downloading %s to %s', url, path)
urlretrieve(url, path)
return path
return download_data |
def make_df_getter(data_url: str, data_path: str, **kwargs) -> Callable[[Optional[str], bool, bool], pd.DataFrame]:
"""Build a function that handles downloading tabular data and parsing it into a pandas DataFrame.
:param data_url: The URL of the data
:param data_path: The path where the data should get stored
:param kwargs: Any other arguments to pass to :func:`pandas.read_csv`
"""
download_function = make_downloader(data_url, data_path)
def get_df(url: Optional[str] = None, cache: bool = True, force_download: bool = False) -> pd.DataFrame:
"""Get the data as a pandas DataFrame.
:param url: The URL (or file path) to download.
:param cache: If true, the data is downloaded to the file system, else it is loaded from the internet
:param force_download: If true, overwrites a previously cached file
"""
if url is None and cache:
url = download_function(force_download=force_download)
return pd.read_csv(
url or data_url,
**kwargs
)
return get_df |
def generate(self, **kwargs):
'''
Generate a :term:`URI` based on parameters passed.
:param id: The id of the concept or collection.
:param type: What we're generating a :term:`URI` for: `concept`
or `collection`.
:rtype: string
'''
if kwargs['type'] not in ['concept', 'collection']:
raise ValueError('Type %s is invalid' % kwargs['type'])
return (
self.pattern % (self.vocabulary_id, kwargs['type'], kwargs['id'])
).lower() |
def has_address(start: int, data_length: int) -> bool:
"""
Determine whether the packet has an "address" encoded into it.
There exists an undocumented bug/edge case in the spec - some packets
with 0x82 as _start_, still encode the address into the packet, and thus
throws off decoding. This edge case is handled explicitly.
"""
return bool(0x01 & start) or (start == 0x82 and data_length == 16) |
def decode_timestamp(data: str) -> datetime.datetime:
"""
Decode timestamp using bespoke decoder.
Cannot use simple strptime since the ness panel contains a bug
that P199E zone and state updates emitted on the hour cause a minute
value of `60` to be sent, causing strptime to fail. This decoder handles
this edge case.
"""
year = 2000 + int(data[0:2])
month = int(data[2:4])
day = int(data[4:6])
hour = int(data[6:8])
minute = int(data[8:10])
second = int(data[10:12])
if minute == 60:
minute = 0
hour += 1
return datetime.datetime(year=year, month=month, day=day, hour=hour,
minute=minute, second=second) |
def create_application(connection: Optional[str] = None) -> Flask:
"""Create a Flask application."""
app = Flask(__name__)
flask_bootstrap.Bootstrap(app)
Admin(app)
connection = connection or DEFAULT_CACHE_CONNECTION
engine, session = build_engine_session(connection)
for name, add_admin in add_admins.items():
url = '/{}'.format(name)
add_admin(app, session, url=url, endpoint=name, name=name)
log.debug('added %s - %s to %s', name, add_admin, url)
app.register_blueprint(ui)
return app |
def register_provider(self, provider):
'''
Register a :class:`skosprovider.providers.VocabularyProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
to register.
:raises RegistryException: A provider with this id or uri has already
been registered.
'''
if provider.get_vocabulary_id() in self.providers:
raise RegistryException(
'A provider with this id has already been registered.'
)
self.providers[provider.get_vocabulary_id()] = provider
if provider.concept_scheme.uri in self.concept_scheme_uri_map:
raise RegistryException(
'A provider with URI %s has already been registered.' % provider.concept_scheme.uri
)
self.concept_scheme_uri_map[provider.concept_scheme.uri] = provider.get_vocabulary_id() |
def remove_provider(self, id):
'''
Remove the provider with the given id or :term:`URI`.
:param str id: The identifier for the provider.
:returns: A :class:`skosprovider.providers.VocabularyProvider` or
`False` if the id is unknown.
'''
if id in self.providers:
p = self.providers.get(id, False)
del self.providers[id]
del self.concept_scheme_uri_map[p.concept_scheme.uri]
return p
elif id in self.concept_scheme_uri_map:
id = self.concept_scheme_uri_map[id]
return self.remove_provider(id)
else:
return False |
def get_provider(self, id):
'''
Get a provider by id or :term:`uri`.
:param str id: The identifier for the provider. This can either be the
id with which it was registered or the :term:`uri` of the conceptscheme
that the provider services.
:returns: A :class:`skosprovider.providers.VocabularyProvider`
or `False` if the id or uri is unknown.
'''
if id in self.providers:
return self.providers.get(id, False)
elif is_uri(id) and id in self.concept_scheme_uri_map:
return self.providers.get(self.concept_scheme_uri_map[id], False)
return False |
def get_providers(self, **kwargs):
'''Get all providers registered.
If keyword `ids` is present, get only the providers with these ids.
If keys `subject` is present, get only the providers that have this subject.
.. code-block:: python
# Get all providers with subject 'biology'
registry.get_providers(subject='biology')
# Get all providers with id 1 or 2
registry.get_providers(ids=[1,2])
# Get all providers with id 1 or 2 and subject 'biology'
registry.get_providers(ids=[1,2], subject='biology']
:param list ids: Only return providers with one of the Ids or :term:`URIs <uri>`.
:param str subject: Only return providers with this subject.
:returns: A list of :class:`providers <skosprovider.providers.VocabularyProvider>`
'''
if 'ids' in kwargs:
ids = [self.concept_scheme_uri_map.get(id, id) for id in kwargs['ids']]
providers = [
self.providers[k] for k in self.providers.keys() if k in ids
]
else:
providers = list(self.providers.values())
if 'subject' in kwargs:
providers = [p for p in providers if kwargs['subject'] in p.metadata['subject']]
return providers |
def find(self, query, **kwargs):
'''Launch a query across all or a selection of providers.
.. code-block:: python
# Find anything that has a label of church in any provider.
registry.find({'label': 'church'})
# Find anything that has a label of church with the BUILDINGS provider.
# Attention, this syntax was deprecated in version 0.3.0
registry.find({'label': 'church'}, providers=['BUILDINGS'])
# Find anything that has a label of church with the BUILDINGS provider.
registry.find({'label': 'church'}, providers={'ids': ['BUILDINGS']})
# Find anything that has a label of church with a provider
# marked with the subject 'architecture'.
registry.find({'label': 'church'}, providers={'subject': 'architecture'})
# Find anything that has a label of church in any provider.
# If possible, display the results with a Dutch label.
registry.find({'label': 'church'}, language='nl')
:param dict query: The query parameters that will be passed on to each
:meth:`~skosprovider.providers.VocabularyProvider.find` method of
the selected.
:class:`providers <skosprovider.providers.VocabularyProvider>`.
:param dict providers: Optional. If present, it should be a dictionary.
This dictionary can contain any of the keyword arguments available
to the :meth:`get_providers` method. The query will then only
be passed to the providers confirming to these arguments.
:param string language: Optional. If present, it should be a
:term:`language-tag`. This language-tag is passed on to the
underlying providers and used when selecting the label to display
for each concept.
:returns: a list of :class:`dict`.
Each dict has two keys: id and concepts.
'''
if 'providers' not in kwargs:
providers = self.get_providers()
else:
pargs = kwargs['providers']
if isinstance(pargs, list):
providers = self.get_providers(ids=pargs)
else:
providers = self.get_providers(**pargs)
kwarguments = {}
if 'language' in kwargs:
kwarguments['language'] = kwargs['language']
return [{'id': p.get_vocabulary_id(), 'concepts': p.find(query, **kwarguments)}
for p in providers] |
def get_all(self, **kwargs):
'''Get all concepts from all providers.
.. code-block:: python
# get all concepts in all providers.
registry.get_all()
# get all concepts in all providers.
# If possible, display the results with a Dutch label.
registry.get_all(language='nl')
:param string language: Optional. If present, it should be a
:term:`language-tag`. This language-tag is passed on to the
underlying providers and used when selecting the label to display
for each concept.
:returns: a list of :class:`dict`.
Each dict has two keys: id and concepts.
'''
kwarguments = {}
if 'language' in kwargs:
kwarguments['language'] = kwargs['language']
return [{'id': p.get_vocabulary_id(), 'concepts': p.get_all(**kwarguments)}
for p in self.providers.values()] |
def get_by_uri(self, uri):
'''Get a concept or collection by its uri.
Returns a single concept or collection if one exists with this uri.
Returns False otherwise.
:param string uri: The uri to find a concept or collection for.
:raises ValueError: The uri is invalid.
:rtype: :class:`skosprovider.skos.Concept` or
:class:`skosprovider.skos.Collection`
'''
if not is_uri(uri):
raise ValueError('%s is not a valid URI.' % uri)
# Check if there's a provider that's more likely to have the URI
csuris = [csuri for csuri in self.concept_scheme_uri_map.keys() if uri.startswith(csuri)]
for csuri in csuris:
c = self.get_provider(csuri).get_by_uri(uri)
if c:
return c
# Check all providers
for p in self.providers.values():
c = p.get_by_uri(uri)
if c:
return c
return False |
def find_module(self, fullname, path=None):
"""Find a module if its name starts with :code:`self.group` and is registered."""
if not fullname.startswith(self._group_with_dot):
return
end_name = fullname[len(self._group_with_dot):]
for entry_point in iter_entry_points(group=self.group, name=None):
if entry_point.name == end_name:
return self |
def load_module(self, fullname):
"""Load a module if its name starts with :code:`self.group` and is registered."""
if fullname in sys.modules:
return sys.modules[fullname]
end_name = fullname[len(self._group_with_dot):]
for entry_point in iter_entry_points(group=self.group, name=end_name):
mod = entry_point.load()
sys.modules[fullname] = mod
return mod |
def upload_theme():
""" upload and/or update the theme with the current git state"""
get_vars()
with fab.settings():
local_theme_path = path.abspath(
path.join(fab.env['config_base'],
fab.env.instance.config['local_theme_path']))
rsync(
'-av',
'--delete',
'%s/' % local_theme_path,
'{{host_string}}:{themes_dir}/{ploy_theme_name}'.format(**AV)
)
briefkasten_ctl('restart') |
def upload_pgp_keys():
""" upload and/or update the PGP keys for editors, import them into PGP"""
get_vars()
upload_target = '/tmp/pgp_pubkeys.tmp'
with fab.settings(fab.hide('running')):
fab.run('rm -rf %s' % upload_target)
fab.run('mkdir %s' % upload_target)
local_key_path = path.join(fab.env['config_base'], fab.env.instance.config['local_pgpkey_path'])
remote_key_path = '/var/briefkasten/pgp_pubkeys/'.format(**AV)
rsync('-av', local_key_path, '{host_string}:%s' % upload_target)
fab.run('chown -R %s %s' % (AV['appuser'], remote_key_path))
fab.run('chmod 700 %s' % remote_key_path)
with fab.shell_env(GNUPGHOME=remote_key_path):
fab.sudo('''gpg --import %s/*.*''' % upload_target,
user=AV['appuser'], shell_escape=False)
fab.run('rm -rf %s' % upload_target) |
def upload_backend(index='dev', user=None):
"""
Build the backend and upload it to the remote server at the given index
"""
get_vars()
use_devpi(index=index)
with fab.lcd('../application'):
fab.local('make upload') |
def update_backend(use_pypi=False, index='dev', build=True, user=None, version=None):
"""
Install the backend from the given devpi index at the given version on the target host and restart the service.
If version is None, it defaults to the latest version
Optionally, build and upload the application first from local sources. This requires a
full backend development environment on the machine running this command (pyramid etc.)
"""
get_vars()
if value_asbool(build):
upload_backend(index=index, user=user)
with fab.cd('{apphome}'.format(**AV)):
if value_asbool(use_pypi):
command = 'bin/pip install --upgrade briefkasten'
else:
command = 'bin/pip install --upgrade --pre -i {ploy_default_publish_devpi}/briefkasten/{index}/+simple/ briefkasten'.format(
index=index,
user=user,
**AV)
if version:
command = '%s==%s' % (command, version)
fab.sudo(command)
briefkasten_ctl('restart') |
def _sort(self, concepts, sort=None, language='any', reverse=False):
'''
Returns a sorted version of a list of concepts. Will leave the original
list unsorted.
:param list concepts: A list of concepts and collections.
:param string sort: What to sort on: `id`, `label` or `sortlabel`
:param string language: Language to use when sorting on `label` or
`sortlabel`.
:param boolean reverse: Reverse the sort order?
:rtype: list
'''
sorted = copy.copy(concepts)
if sort:
sorted.sort(key=methodcaller('_sortkey', sort, language), reverse=reverse)
return sorted |
def _include_in_find(self, c, query):
'''
:param c: A :class:`skosprovider.skos.Concept` or
:class:`skosprovider.skos.Collection`.
:param query: A dict that can be used to express a query.
:rtype: boolean
'''
include = True
if include and 'type' in query:
include = query['type'] == c.type
if include and 'label' in query:
def finder(l, query):
if not self.case_insensitive:
return l.label.find(query['label'])
else:
return l.label.upper().find(query['label'].upper())
include = any([finder(l, query) >= 0 for l in c.labels])
if include and 'collection' in query:
coll = self.get_by_id(query['collection']['id'])
if not coll or not isinstance(coll, Collection):
raise ValueError(
'You are searching for items in an unexisting collection.'
)
if 'depth' in query['collection'] and query['collection']['depth'] == 'all':
members = self.expand(coll.id)
else:
members = coll.members
include = any([True for id in members if str(id) == str(c.id)])
return include |
def _get_find_dict(self, c, **kwargs):
'''
Return a dict that can be used in the return list of the :meth:`find`
method.
:param c: A :class:`skosprovider.skos.Concept` or
:class:`skosprovider.skos.Collection`.
:rtype: dict
'''
language = self._get_language(**kwargs)
return {
'id': c.id,
'uri': c.uri,
'type': c.type,
'label': None if c.label() is None else c.label(language).label
} |
async def update(self) -> None:
"""Force update of alarm status and zones"""
_LOGGER.debug("Requesting state update from server (S00, S14)")
await asyncio.gather(
# List unsealed Zones
self.send_command('S00'),
# Arming status update
self.send_command('S14'),
) |
async def _update_loop(self) -> None:
"""Schedule a state update to keep the connection alive"""
await asyncio.sleep(self._update_interval)
while not self._closed:
await self.update()
await asyncio.sleep(self._update_interval) |
def add_cli_to_bel_namespace(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``upload_bel_namespace`` command to main :mod:`click` function."""
@main.command()
@click.option('-u', '--update', is_flag=True)
@click.pass_obj
def upload(manager: BELNamespaceManagerMixin, update):
"""Upload names/identifiers to terminology store."""
namespace = manager.upload_bel_namespace(update=update)
click.echo(f'uploaded [{namespace.id}] {namespace.keyword}')
return main |
def add_cli_clear_bel_namespace(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``clear_bel_namespace`` command to main :mod:`click` function."""
@main.command()
@click.pass_obj
def drop(manager: BELNamespaceManagerMixin):
"""Clear names/identifiers to terminology store."""
namespace = manager.drop_bel_namespace()
if namespace:
click.echo(f'namespace {namespace} was cleared')
return main |
def add_cli_write_bel_namespace(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``write_bel_namespace`` command to main :mod:`click` function."""
@main.command()
@click.option('-d', '--directory', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),
help='output directory')
@click.pass_obj
def write(manager: BELNamespaceManagerMixin, directory: str):
"""Write a BEL namespace names/identifiers to terminology store."""
manager.write_directory(directory)
return main |
def add_cli_write_bel_annotation(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``write_bel_annotation`` command to main :mod:`click` function."""
@main.command()
@click.option('-d', '--directory', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),
help='output directory')
@click.pass_obj
def write(manager: BELNamespaceManagerMixin, directory: str):
"""Write a BEL annotation."""
with open(os.path.join(directory, manager.identifiers_namespace), 'w') as file:
manager.write_bel_annotation(file)
return main |
def _iterate_namespace_models(self, **kwargs) -> Iterable:
"""Return an iterator over the models to be converted to the namespace."""
return tqdm(
self._get_query(self.namespace_model),
total=self._count_model(self.namespace_model),
**kwargs
) |
def _get_default_namespace(self) -> Optional[Namespace]:
"""Get the reference BEL namespace if it exists."""
return self._get_query(Namespace).filter(Namespace.url == self._get_namespace_url()).one_or_none() |
def _make_namespace(self) -> Namespace:
"""Make a namespace."""
namespace = Namespace(
name=self._get_namespace_name(),
keyword=self._get_namespace_keyword(),
url=self._get_namespace_url(),
version=str(time.asctime()),
)
self.session.add(namespace)
entries = self._get_namespace_entries(namespace)
self.session.add_all(entries)
t = time.time()
log.info('committing models')
self.session.commit()
log.info('committed models in %.2f seconds', time.time() - t)
return namespace |
def _get_old_entry_identifiers(namespace: Namespace) -> Set[NamespaceEntry]:
"""Convert a PyBEL generalized namespace entries to a set.
Default to using the identifier, but can be overridden to use the name instead.
>>> {term.identifier for term in namespace.entries}
"""
return {term.identifier for term in namespace.entries} |
def _update_namespace(self, namespace: Namespace) -> None:
"""Update an already-created namespace.
Note: Only call this if namespace won't be none!
"""
old_entry_identifiers = self._get_old_entry_identifiers(namespace)
new_count = 0
skip_count = 0
for model in self._iterate_namespace_models():
if self._get_identifier(model) in old_entry_identifiers:
continue
entry = self._create_namespace_entry_from_model(model, namespace=namespace)
if entry is None or entry.name is None:
skip_count += 1
continue
new_count += 1
self.session.add(entry)
t = time.time()
log.info('got %d new entries. skipped %d entries missing names. committing models', new_count, skip_count)
self.session.commit()
log.info('committed models in %.2f seconds', time.time() - t) |
def add_namespace_to_graph(self, graph: BELGraph) -> Namespace:
"""Add this manager's namespace to the graph."""
namespace = self.upload_bel_namespace()
graph.namespace_url[namespace.keyword] = namespace.url
# Add this manager as an annotation, too
self._add_annotation_to_graph(graph)
return namespace |
def _add_annotation_to_graph(self, graph: BELGraph) -> None:
"""Add this manager as an annotation to the graph."""
if 'bio2bel' not in graph.annotation_list:
graph.annotation_list['bio2bel'] = set()
graph.annotation_list['bio2bel'].add(self.module_name) |
def upload_bel_namespace(self, update: bool = False) -> Namespace:
"""Upload the namespace to the PyBEL database.
:param update: Should the namespace be updated first?
"""
if not self.is_populated():
self.populate()
namespace = self._get_default_namespace()
if namespace is None:
log.info('making namespace for %s', self._get_namespace_name())
return self._make_namespace()
if update:
self._update_namespace(namespace)
return namespace |
def drop_bel_namespace(self) -> Optional[Namespace]:
"""Remove the default namespace if it exists."""
namespace = self._get_default_namespace()
if namespace is not None:
for entry in tqdm(namespace.entries, desc=f'deleting entries in {self._get_namespace_name()}'):
self.session.delete(entry)
self.session.delete(namespace)
log.info('committing deletions')
self.session.commit()
return namespace |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.