code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def minimize_lbfgs(
fun: Callable,
x0: ArrayLikeTree,
maxiter: int = 30,
maxcor: float = 10,
gtol: float = 1e-08,
ftol: float = 1e-05,
maxls: int = 1000,
**lbfgs_kwargs,
) -> tuple[OptStep, LBFGSHistory]:
"""
Minimize a function using L-BFGS
Parameters
----------
fun:
function of the form f(x) where x is a pytree and returns a real scalar.
The function should be composed of operations with vjp defined.
x0:
initial guess
maxiter:
maximum number of iterations
maxcor:
maximum number of metric corrections ("history size")
ftol:
terminates the minimization when `(f_k - f_{k+1}) < ftol`
gtol:
terminates the minimization when `|g_k|_norm < gtol`
maxls:
maximum number of line search steps (per iteration)
**lbfgs_kwargs
other keyword arguments passed to `jaxopt.LBFGS`.
Returns
-------
Optimization results and optimization path
"""
# Ravel pytree into flat array.
x0_raveled, unravel_fn = ravel_pytree(x0)
unravel_fn_mapped = jax.vmap(unravel_fn)
# Run LBFGS optimizer on flat input.
last_step_raveled, history_raveled = _minimize_lbfgs(
lambda x: fun(unravel_fn(x)),
x0_raveled,
maxiter,
maxcor,
gtol,
ftol,
maxls,
**lbfgs_kwargs,
)
# Unravel final optimization step.
last_step = OptStep(
params=unravel_fn(last_step_raveled.params),
state=LbfgsState(
iter_num=last_step_raveled.state.iter_num,
value=last_step_raveled.state.value,
grad=unravel_fn(last_step_raveled.state.grad),
stepsize=last_step_raveled.state.stepsize,
error=last_step_raveled.state.error,
s_history=unravel_fn_mapped(last_step_raveled.state.s_history),
y_history=unravel_fn_mapped(last_step_raveled.state.y_history),
rho_history=last_step_raveled.state.rho_history,
gamma=last_step_raveled.state.gamma,
aux=last_step_raveled.state.aux,
),
)
# Unravel optimization path history.
history = LBFGSHistory(
x=unravel_fn_mapped(history_raveled.x),
f=history_raveled.f,
g=unravel_fn_mapped(history_raveled.g),
alpha=unravel_fn_mapped(history_raveled.alpha),
update_mask=jax.tree.map(
lambda x: x.astype(history_raveled.update_mask.dtype),
unravel_fn_mapped(history_raveled.update_mask.astype(x0_raveled.dtype)),
),
)
return last_step, history
|
Minimize a function using L-BFGS
Parameters
----------
fun:
function of the form f(x) where x is a pytree and returns a real scalar.
The function should be composed of operations with vjp defined.
x0:
initial guess
maxiter:
maximum number of iterations
maxcor:
maximum number of metric corrections ("history size")
ftol:
terminates the minimization when `(f_k - f_{k+1}) < ftol`
gtol:
terminates the minimization when `|g_k|_norm < gtol`
maxls:
maximum number of line search steps (per iteration)
**lbfgs_kwargs
other keyword arguments passed to `jaxopt.LBFGS`.
Returns
-------
Optimization results and optimization path
|
minimize_lbfgs
|
python
|
blackjax-devs/blackjax
|
blackjax/optimizers/lbfgs.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/optimizers/lbfgs.py
|
Apache-2.0
|
def lbfgs_recover_alpha(alpha_lm1, s_l, z_l, epsilon=1e-12):
"""
Compute diagonal elements of the inverse Hessian approximation from optimation path.
It implements the inner loop body of Algorithm 3 in :cite:p:`zhang2022pathfinder`.
Parameters
----------
alpha_lm1
The diagonal element of the inverse Hessian approximation of the previous iteration
s_l
The update of the position (current position - previous position)
z_l
The update of the gradient (current gradient - previous gradient). Note that in :cite:p:`zhang2022pathfinder`
it is defined as the negative of the update of the gradient, but since we are optimizing
the negative log prob function taking the update of the gradient is correct here.
Returns
-------
alpha_l
The diagonal element of the inverse Hessian approximation of the current iteration
mask_l
The indicator of whether the update of position and gradient are included in
the inverse-Hessian approximation or not.
"""
def compute_next_alpha(s_l, z_l, alpha_lm1):
a = z_l.T @ jnp.diag(alpha_lm1) @ z_l
b = z_l.T @ s_l
c = s_l.T @ jnp.diag(1.0 / alpha_lm1) @ s_l
inv_alpha_l = (
a / (b * alpha_lm1)
+ z_l**2 / b
- (a * s_l**2) / (b * c * alpha_lm1**2)
)
return 1.0 / inv_alpha_l
pred = s_l.T @ z_l > (epsilon * jnp.linalg.norm(z_l, 2))
alpha_l = lax.cond(
pred, compute_next_alpha, lambda *_: alpha_lm1, s_l, z_l, alpha_lm1
)
mask_l = jnp.where(
pred,
jnp.ones_like(alpha_lm1, dtype=bool),
jnp.zeros_like(alpha_lm1, dtype=bool),
)
return alpha_l, mask_l
|
Compute diagonal elements of the inverse Hessian approximation from optimation path.
It implements the inner loop body of Algorithm 3 in :cite:p:`zhang2022pathfinder`.
Parameters
----------
alpha_lm1
The diagonal element of the inverse Hessian approximation of the previous iteration
s_l
The update of the position (current position - previous position)
z_l
The update of the gradient (current gradient - previous gradient). Note that in :cite:p:`zhang2022pathfinder`
it is defined as the negative of the update of the gradient, but since we are optimizing
the negative log prob function taking the update of the gradient is correct here.
Returns
-------
alpha_l
The diagonal element of the inverse Hessian approximation of the current iteration
mask_l
The indicator of whether the update of position and gradient are included in
the inverse-Hessian approximation or not.
|
lbfgs_recover_alpha
|
python
|
blackjax-devs/blackjax
|
blackjax/optimizers/lbfgs.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/optimizers/lbfgs.py
|
Apache-2.0
|
def lbfgs_inverse_hessian_factors(S, Z, alpha):
"""
Calculates factors for inverse hessian factored representation.
It implements formula II.2 of:
Pathfinder: Parallel quasi-newton variational inference, Lu Zhang et al., arXiv:2108.03782
"""
param_dims = S.shape[-1]
StZ = S.T @ Z
R = jnp.triu(StZ) + jnp.eye(param_dims) * jnp.finfo(S.dtype).eps
eta = jnp.diag(StZ)
beta = jnp.hstack([jnp.diag(alpha) @ Z, S])
minvR = -jnp.linalg.inv(R)
alphaZ = jnp.diag(jnp.sqrt(alpha)) @ Z
block_dd = minvR.T @ (alphaZ.T @ alphaZ + jnp.diag(eta)) @ minvR
gamma = jnp.block(
[[jnp.zeros((param_dims, param_dims)), minvR], [minvR.T, block_dd]]
)
return beta, gamma
|
Calculates factors for inverse hessian factored representation.
It implements formula II.2 of:
Pathfinder: Parallel quasi-newton variational inference, Lu Zhang et al., arXiv:2108.03782
|
lbfgs_inverse_hessian_factors
|
python
|
blackjax-devs/blackjax
|
blackjax/optimizers/lbfgs.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/optimizers/lbfgs.py
|
Apache-2.0
|
def lbfgs_inverse_hessian_formula_2(alpha, beta, gamma):
"""
Calculates inverse hessian from factors as in formula II.3 of:
Pathfinder: Parallel quasi-newton variational inference, Lu Zhang et al., arXiv:2108.03782
"""
param_dims = alpha.shape[0]
dsqrt_alpha = jnp.diag(jnp.sqrt(alpha))
idsqrt_alpha = jnp.diag(1 / jnp.sqrt(alpha))
return (
dsqrt_alpha
@ (jnp.eye(param_dims) + idsqrt_alpha @ beta @ gamma @ beta.T @ idsqrt_alpha)
@ dsqrt_alpha
)
|
Calculates inverse hessian from factors as in formula II.3 of:
Pathfinder: Parallel quasi-newton variational inference, Lu Zhang et al., arXiv:2108.03782
|
lbfgs_inverse_hessian_formula_2
|
python
|
blackjax-devs/blackjax
|
blackjax/optimizers/lbfgs.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/optimizers/lbfgs.py
|
Apache-2.0
|
def bfgs_sample(rng_key, num_samples, position, grad_position, alpha, beta, gamma):
"""
Draws approximate samples of target distribution.
It implements Algorithm 4 in:
Pathfinder: Parallel quasi-newton variational inference, Lu Zhang et al., arXiv:2108.03782
"""
if not isinstance(num_samples, tuple):
num_samples = (num_samples,)
Q, R = jnp.linalg.qr(jnp.diag(jnp.sqrt(1 / alpha)) @ beta)
param_dims = beta.shape[0]
Id = jnp.identity(R.shape[0])
L = jnp.linalg.cholesky(Id + R @ gamma @ R.T)
logdet = jnp.log(jnp.prod(alpha)) + 2 * jnp.log(jnp.linalg.det(L))
mu = (
position
+ jnp.diag(alpha) @ grad_position
+ beta @ gamma @ beta.T @ grad_position
)
u = jax.random.normal(rng_key, num_samples + (param_dims, 1))
phi = mu[..., None] + jnp.diag(jnp.sqrt(alpha)) @ (Q @ (L - Id) @ (Q.T @ u) + u)
logdensity = -0.5 * (
logdet
+ jnp.einsum("...ji,...ji->...", u, u)
+ param_dims * jnp.log(2.0 * jnp.pi)
)
return phi[..., 0], logdensity
|
Draws approximate samples of target distribution.
It implements Algorithm 4 in:
Pathfinder: Parallel quasi-newton variational inference, Lu Zhang et al., arXiv:2108.03782
|
bfgs_sample
|
python
|
blackjax-devs/blackjax
|
blackjax/optimizers/lbfgs.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/optimizers/lbfgs.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_estimator: Callable,
gradient_estimator: Callable,
zeta: float = 1,
num_partitions: int = 512,
energy_gap: float = 100,
min_energy: float = 0,
) -> SamplingAlgorithm:
r"""Implements the (basic) user interface for the Contour SGLD kernel.
Parameters
----------
logdensity_estimator
A function that returns an estimation of the model's logdensity given
a position and a batch of data.
gradient_estimator
A function that takes a position, a batch of data and returns an estimation
of the gradient of the log-density at this position.
zeta
Hyperparameter that controls the geometric property of the flattened
density. If `zeta=0` the function reduces to the SGLD step function.
temperature
Temperature parameter.
num_partitions
The number of partitions we divide the energy landscape into.
energy_gap
The difference in energy :math:`\Delta u` between the successive
partitions. Can be determined by running e.g. an optimizer to determine
the range of energies. `num_partition` * `energy_gap` should match this
range.
min_energy
A rough estimate of the minimum energy in a dataset, which should be
strictly smaller than the exact minimum energy! e.g. if the minimum
energy of a dataset is 3456, we can set min_energy to be any value
smaller than 3456. Set it to 0 is acceptable, but not efficient enough.
the closer the gap between min_energy and 3456 is, the better.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(num_partitions, energy_gap, min_energy)
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, num_partitions)
def step_fn(
rng_key: PRNGKey,
state: ContourSGLDState,
minibatch: ArrayLikeTree,
step_size_diff: float,
step_size_stoch: float,
temperature: float = 1.0,
) -> ContourSGLDState:
return kernel(
rng_key,
state,
logdensity_estimator,
gradient_estimator,
minibatch,
step_size_diff,
step_size_stoch,
zeta,
temperature,
)
return SamplingAlgorithm(init_fn, step_fn) # type: ignore[arg-type]
|
Implements the (basic) user interface for the Contour SGLD kernel.
Parameters
----------
logdensity_estimator
A function that returns an estimation of the model's logdensity given
a position and a batch of data.
gradient_estimator
A function that takes a position, a batch of data and returns an estimation
of the gradient of the log-density at this position.
zeta
Hyperparameter that controls the geometric property of the flattened
density. If `zeta=0` the function reduces to the SGLD step function.
temperature
Temperature parameter.
num_partitions
The number of partitions we divide the energy landscape into.
energy_gap
The difference in energy :math:`\Delta u` between the successive
partitions. Can be determined by running e.g. an optimizer to determine
the range of energies. `num_partition` * `energy_gap` should match this
range.
min_energy
A rough estimate of the minimum energy in a dataset, which should be
strictly smaller than the exact minimum energy! e.g. if the minimum
energy of a dataset is 3456, we can set min_energy to be any value
smaller than 3456. Set it to 0 is acceptable, but not efficient enough.
the closer the gap between min_energy and 3456 is, the better.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/csgld.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/csgld.py
|
Apache-2.0
|
def overdamped_langevin():
"""Euler solver for overdamped Langevin diffusion.
This algorithm was ported from :cite:p:`coullon2022sgmcmcjax`.
"""
def one_step(
rng_key: PRNGKey,
position: ArrayLikeTree,
logdensity_grad: ArrayLikeTree,
step_size: float,
temperature: float = 1.0,
) -> ArrayTree:
noise = generate_gaussian_noise(rng_key, position)
position = jax.tree_util.tree_map(
lambda p, g, n: p
+ step_size * g
+ jnp.sqrt(2 * temperature * step_size) * n,
position,
logdensity_grad,
noise,
)
return position
return one_step
|
Euler solver for overdamped Langevin diffusion.
This algorithm was ported from :cite:p:`coullon2022sgmcmcjax`.
|
overdamped_langevin
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/diffusions.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/diffusions.py
|
Apache-2.0
|
def sghmc(alpha: float = 0.01, beta: float = 0):
"""Euler solver for the diffusion equation of the SGHMC algorithm :cite:p:`chen2014stochastic`,
with parameters alpha and beta scaled according to :cite:p:`ma2015complete`.
This algorithm was ported from :cite:p:`coullon2022sgmcmcjax`.
"""
def one_step(
rng_key: PRNGKey,
position: ArrayLikeTree,
momentum: ArrayLikeTree,
logdensity_grad: ArrayLikeTree,
step_size: float,
temperature: float = 1.0,
):
noise = generate_gaussian_noise(rng_key, position)
position = jax.tree_util.tree_map(
lambda x, p: x + step_size * p, position, momentum
)
momentum = jax.tree_util.tree_map(
lambda p, g, n: (1.0 - alpha * step_size) * p
+ step_size * g
+ jnp.sqrt(
step_size * temperature * (2 * alpha - step_size * temperature * beta)
)
* n,
momentum,
logdensity_grad,
noise,
)
return position, momentum
return one_step
|
Euler solver for the diffusion equation of the SGHMC algorithm :cite:p:`chen2014stochastic`,
with parameters alpha and beta scaled according to :cite:p:`ma2015complete`.
This algorithm was ported from :cite:p:`coullon2022sgmcmcjax`.
|
sghmc
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/diffusions.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/diffusions.py
|
Apache-2.0
|
def sgnht(alpha: float = 0.01, beta: float = 0):
"""Euler solver for the diffusion equation of the SGNHT algorithm :cite:p:`ding2014bayesian`.
This algorithm was ported from :cite:p:`coullon2022sgmcmcjax`.
"""
def one_step(
rng_key: PRNGKey,
position: ArrayLikeTree,
momentum: ArrayLikeTree,
xi: float,
logdensity_grad: ArrayLikeTree,
step_size: float,
temperature: float = 1.0,
):
noise = generate_gaussian_noise(rng_key, position)
position = jax.tree_util.tree_map(
lambda x, p: x + step_size * p, position, momentum
)
momentum = jax.tree_util.tree_map(
lambda p, g, n: (1.0 - xi * step_size) * p
+ step_size * g
+ jnp.sqrt(
step_size * temperature * (2 * alpha - step_size * temperature * beta)
)
* n,
momentum,
logdensity_grad,
noise,
)
momentum_dot = jax.tree_util.tree_reduce(
operator.add, jax.tree_util.tree_map(lambda x: jnp.sum(x * x), momentum)
)
d = pytree_size(momentum)
xi = xi + step_size * (momentum_dot / d - temperature)
return position, momentum, xi
return one_step
|
Euler solver for the diffusion equation of the SGNHT algorithm :cite:p:`ding2014bayesian`.
This algorithm was ported from :cite:p:`coullon2022sgmcmcjax`.
|
sgnht
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/diffusions.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/diffusions.py
|
Apache-2.0
|
def logdensity_estimator(
logprior_fn: Callable, loglikelihood_fn: Callable, data_size: int
) -> Callable:
"""Builds a simple estimator for the log-density.
This estimator first appeared in :cite:p:`robbins1951stochastic`. The `logprior_fn` function has a
single argument: the current position (value of parameters). The
`loglikelihood_fn` takes two arguments: the current position and a batch of
data; if there are several variables (as, for instance, in a supervised
learning contexts), they are passed in a tuple.
This algorithm was ported from :cite:p:`coullon2022sgmcmcjax`.
Parameters
----------
logprior_fn
The log-probability density function corresponding to the prior
distribution.
loglikelihood_fn
The log-probability density function corresponding to the likelihood.
data_size
The number of items in the full dataset.
"""
def logdensity_estimator_fn(
position: ArrayLikeTree, minibatch: ArrayLikeTree
) -> ArrayTree:
"""Return an approximation of the log-posterior density.
Parameters
----------
position
The current value of the random variables.
batch
The current batch of data
Returns
-------
An approximation of the value of the log-posterior density function for
the current value of the random variables.
"""
logprior = logprior_fn(position)
batch_loglikelihood = jax.vmap(loglikelihood_fn, in_axes=(None, 0))
return logprior + data_size * jnp.mean(
batch_loglikelihood(position, minibatch), axis=0
)
return logdensity_estimator_fn
|
Builds a simple estimator for the log-density.
This estimator first appeared in :cite:p:`robbins1951stochastic`. The `logprior_fn` function has a
single argument: the current position (value of parameters). The
`loglikelihood_fn` takes two arguments: the current position and a batch of
data; if there are several variables (as, for instance, in a supervised
learning contexts), they are passed in a tuple.
This algorithm was ported from :cite:p:`coullon2022sgmcmcjax`.
Parameters
----------
logprior_fn
The log-probability density function corresponding to the prior
distribution.
loglikelihood_fn
The log-probability density function corresponding to the likelihood.
data_size
The number of items in the full dataset.
|
logdensity_estimator
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/gradients.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/gradients.py
|
Apache-2.0
|
def logdensity_estimator_fn(
position: ArrayLikeTree, minibatch: ArrayLikeTree
) -> ArrayTree:
"""Return an approximation of the log-posterior density.
Parameters
----------
position
The current value of the random variables.
batch
The current batch of data
Returns
-------
An approximation of the value of the log-posterior density function for
the current value of the random variables.
"""
logprior = logprior_fn(position)
batch_loglikelihood = jax.vmap(loglikelihood_fn, in_axes=(None, 0))
return logprior + data_size * jnp.mean(
batch_loglikelihood(position, minibatch), axis=0
)
|
Return an approximation of the log-posterior density.
Parameters
----------
position
The current value of the random variables.
batch
The current batch of data
Returns
-------
An approximation of the value of the log-posterior density function for
the current value of the random variables.
|
logdensity_estimator_fn
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/gradients.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/gradients.py
|
Apache-2.0
|
def grad_estimator(
logprior_fn: Callable, loglikelihood_fn: Callable, data_size: int
) -> Callable:
"""Build a simple estimator for the gradient of the log-density."""
logdensity_estimator_fn = logdensity_estimator(
logprior_fn, loglikelihood_fn, data_size
)
return jax.grad(logdensity_estimator_fn)
|
Build a simple estimator for the gradient of the log-density.
|
grad_estimator
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/gradients.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/gradients.py
|
Apache-2.0
|
def control_variates(
logdensity_grad_estimator: Callable,
centering_position: ArrayLikeTree,
data: ArrayLikeTree,
) -> Callable:
"""Builds a control variate gradient estimator :cite:p:`baker2019control`.
This algorithm was ported from :cite:p:`coullon2022sgmcmcjax`.
Parameters
----------
logdensity_grad_estimator
A function that approximates the target's gradient function.
data
The full dataset.
centering_position
Centering position for the control variates (typically the MAP).
"""
cv_grad_value = logdensity_grad_estimator(centering_position, data)
def cv_grad_estimator_fn(
position: ArrayLikeTree, minibatch: ArrayLikeTree
) -> ArrayTree:
"""Return an approximation of the log-posterior density.
Parameters
----------
position
The current value of the random variables.
batch
The current batch of data. The first dimension is assumed to be the
batch dimension.
Returns
-------
An approximation of the value of the log-posterior density function for
the current value of the random variables.
"""
grad_estimate = logdensity_grad_estimator(position, minibatch)
center_grad_estimate = logdensity_grad_estimator(centering_position, minibatch)
return jax.tree.map(
lambda grad_est, cv_grad_est, cv_grad: cv_grad + grad_est - cv_grad_est,
grad_estimate,
center_grad_estimate,
cv_grad_value,
)
return cv_grad_estimator_fn
|
Builds a control variate gradient estimator :cite:p:`baker2019control`.
This algorithm was ported from :cite:p:`coullon2022sgmcmcjax`.
Parameters
----------
logdensity_grad_estimator
A function that approximates the target's gradient function.
data
The full dataset.
centering_position
Centering position for the control variates (typically the MAP).
|
control_variates
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/gradients.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/gradients.py
|
Apache-2.0
|
def cv_grad_estimator_fn(
position: ArrayLikeTree, minibatch: ArrayLikeTree
) -> ArrayTree:
"""Return an approximation of the log-posterior density.
Parameters
----------
position
The current value of the random variables.
batch
The current batch of data. The first dimension is assumed to be the
batch dimension.
Returns
-------
An approximation of the value of the log-posterior density function for
the current value of the random variables.
"""
grad_estimate = logdensity_grad_estimator(position, minibatch)
center_grad_estimate = logdensity_grad_estimator(centering_position, minibatch)
return jax.tree.map(
lambda grad_est, cv_grad_est, cv_grad: cv_grad + grad_est - cv_grad_est,
grad_estimate,
center_grad_estimate,
cv_grad_value,
)
|
Return an approximation of the log-posterior density.
Parameters
----------
position
The current value of the random variables.
batch
The current batch of data. The first dimension is assumed to be the
batch dimension.
Returns
-------
An approximation of the value of the log-posterior density function for
the current value of the random variables.
|
cv_grad_estimator_fn
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/gradients.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/gradients.py
|
Apache-2.0
|
def build_kernel(alpha: float = 0.01, beta: float = 0) -> Callable:
"""Stochastic gradient Hamiltonian Monte Carlo (SgHMC) algorithm."""
integrator = diffusions.sghmc(alpha, beta)
def kernel(
rng_key: PRNGKey,
position: ArrayLikeTree,
grad_estimator: Callable,
minibatch: ArrayLikeTree,
step_size: float,
num_integration_steps: int,
temperature: float = 1.0,
) -> ArrayTree:
def body_fn(state, rng_key):
position, momentum = state
logdensity_grad = grad_estimator(position, minibatch)
position, momentum = integrator(
rng_key, position, momentum, logdensity_grad, step_size, temperature
)
return ((position, momentum), position)
momentum = generate_gaussian_noise(rng_key, position)
keys = jax.random.split(rng_key, num_integration_steps)
(position, momentum), _ = jax.lax.scan(body_fn, (position, momentum), keys)
return position
return kernel
|
Stochastic gradient Hamiltonian Monte Carlo (SgHMC) algorithm.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/sghmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/sghmc.py
|
Apache-2.0
|
def as_top_level_api(
grad_estimator: Callable,
num_integration_steps: int = 10,
alpha: float = 0.01,
beta: float = 0,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the SGHMC kernel.
The general sghmc kernel builder (:meth:`blackjax.sgmcmc.sghmc.build_kernel`, alias
`blackjax.sghmc.build_kernel`) can be cumbersome to manipulate. Since most users
only need to specify the kernel parameters at initialization time, we
provide a helper function that specializes the general kernel.
Example
-------
To initialize a SGHMC kernel one needs to specify a schedule function, which
returns a step size at each sampling step, and a gradient estimator
function. Here for a constant step size, and `data_size` data samples:
.. code::
grad_estimator = blackjax.sgmcmc.gradients.grad_estimator(logprior_fn, loglikelihood_fn, data_size)
We can now initialize the sghmc kernel and the state. Like HMC, SGHMC needs the user to specify a number of integration steps.
.. code::
sghmc = blackjax.sghmc(grad_estimator, num_integration_steps)
Assuming we have an iterator `batches` that yields batches of data we can
perform one step:
.. code::
step_size = 1e-3
minibatch = next(batches)
new_position = sghmc.step(rng_key, position, minibatch, step_size)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(sghmc.step)
new_position, info = step(rng_key, position, minibatch, step_size)
Parameters
----------
grad_estimator
A function that takes a position, a batch of data and returns an estimation
of the gradient of the log-density at this position.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(alpha, beta)
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position)
def step_fn(
rng_key: PRNGKey,
state: ArrayLikeTree,
minibatch: ArrayLikeTree,
step_size: float,
temperature: float = 1,
) -> ArrayTree:
return kernel(
rng_key,
state,
grad_estimator,
minibatch,
step_size,
num_integration_steps,
temperature,
)
return SamplingAlgorithm(init_fn, step_fn) # type: ignore[arg-type]
|
Implements the (basic) user interface for the SGHMC kernel.
The general sghmc kernel builder (:meth:`blackjax.sgmcmc.sghmc.build_kernel`, alias
`blackjax.sghmc.build_kernel`) can be cumbersome to manipulate. Since most users
only need to specify the kernel parameters at initialization time, we
provide a helper function that specializes the general kernel.
Example
-------
To initialize a SGHMC kernel one needs to specify a schedule function, which
returns a step size at each sampling step, and a gradient estimator
function. Here for a constant step size, and `data_size` data samples:
.. code::
grad_estimator = blackjax.sgmcmc.gradients.grad_estimator(logprior_fn, loglikelihood_fn, data_size)
We can now initialize the sghmc kernel and the state. Like HMC, SGHMC needs the user to specify a number of integration steps.
.. code::
sghmc = blackjax.sghmc(grad_estimator, num_integration_steps)
Assuming we have an iterator `batches` that yields batches of data we can
perform one step:
.. code::
step_size = 1e-3
minibatch = next(batches)
new_position = sghmc.step(rng_key, position, minibatch, step_size)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(sghmc.step)
new_position, info = step(rng_key, position, minibatch, step_size)
Parameters
----------
grad_estimator
A function that takes a position, a batch of data and returns an estimation
of the gradient of the log-density at this position.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/sghmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/sghmc.py
|
Apache-2.0
|
def build_kernel() -> Callable:
"""Stochastic gradient Langevin Dynamics (SgLD) algorithm."""
integrator = diffusions.overdamped_langevin()
def kernel(
rng_key: PRNGKey,
position: ArrayLikeTree,
grad_estimator: Callable,
minibatch: ArrayLikeTree,
step_size: float,
temperature: float = 1.0,
) -> ArrayTree:
logdensity_grad = grad_estimator(position, minibatch)
new_position = integrator(
rng_key, position, logdensity_grad, step_size, temperature
)
return new_position
return kernel
|
Stochastic gradient Langevin Dynamics (SgLD) algorithm.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/sgld.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/sgld.py
|
Apache-2.0
|
def as_top_level_api(
grad_estimator: Callable,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the SGLD kernel.
The general sgld kernel builder (:meth:`blackjax.sgmcmc.sgld.build_kernel`, alias
`blackjax.sgld.build_kernel`) can be cumbersome to manipulate. Since most users
only need to specify the kernel parameters at initialization time, we
provide a helper function that specializes the general kernel.
Example
-------
To initialize a SGLD kernel one needs to specify a schedule function, which
returns a step size at each sampling step, and a gradient estimator
function. Here for a constant step size, and `data_size` data samples:
.. code::
grad_fn = blackjax.sgmcmc.gradients.grad_estimator(logprior_fn, loglikelihood_fn, data_size)
We can now initialize the sgld kernel and the state:
.. code::
sgld = blackjax.sgld(grad_fn)
Assuming we have an iterator `batches` that yields batches of data we can
perform one step:
.. code::
step_size = 1e-3
minibatch = next(batches)
new_position = sgld.step(rng_key, position, minibatch, step_size)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(sgld.step)
new_position, info = step(rng_key, position, minibatch, step_size)
Parameters
----------
grad_estimator
A function that takes a position, a batch of data and returns an estimation
of the gradient of the log-density at this position.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel()
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position)
def step_fn(
rng_key: PRNGKey,
state: ArrayLikeTree,
minibatch: ArrayLikeTree,
step_size: float,
temperature: float = 1,
) -> ArrayTree:
return kernel(rng_key, state, grad_estimator, minibatch, step_size, temperature)
return SamplingAlgorithm(init_fn, step_fn) # type: ignore[arg-type]
|
Implements the (basic) user interface for the SGLD kernel.
The general sgld kernel builder (:meth:`blackjax.sgmcmc.sgld.build_kernel`, alias
`blackjax.sgld.build_kernel`) can be cumbersome to manipulate. Since most users
only need to specify the kernel parameters at initialization time, we
provide a helper function that specializes the general kernel.
Example
-------
To initialize a SGLD kernel one needs to specify a schedule function, which
returns a step size at each sampling step, and a gradient estimator
function. Here for a constant step size, and `data_size` data samples:
.. code::
grad_fn = blackjax.sgmcmc.gradients.grad_estimator(logprior_fn, loglikelihood_fn, data_size)
We can now initialize the sgld kernel and the state:
.. code::
sgld = blackjax.sgld(grad_fn)
Assuming we have an iterator `batches` that yields batches of data we can
perform one step:
.. code::
step_size = 1e-3
minibatch = next(batches)
new_position = sgld.step(rng_key, position, minibatch, step_size)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(sgld.step)
new_position, info = step(rng_key, position, minibatch, step_size)
Parameters
----------
grad_estimator
A function that takes a position, a batch of data and returns an estimation
of the gradient of the log-density at this position.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/sgld.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/sgld.py
|
Apache-2.0
|
def as_top_level_api(
grad_estimator: Callable,
alpha: float = 0.01,
beta: float = 0.0,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the SGNHT kernel.
The general sgnht kernel (:meth:`blackjax.sgmcmc.sgnht.build_kernel`, alias
`blackjax.sgnht.build_kernel`) can be cumbersome to manipulate. Since most users
only need to specify the kernel parameters at initialization time, we
provide a helper function that specializes the general kernel.
Example
-------
To initialize a SGNHT kernel one needs to specify a schedule function, which
returns a step size at each sampling step, and a gradient estimator
function. Here for a constant step size, and `data_size` data samples:
.. code::
grad_estimator = blackjax.sgmcmc.gradients.grad_estimator(logprior_fn, loglikelihood_fn, data_size)
We can now initialize the sgnht kernel and the state.
.. code::
sgnht = blackjax.sgnht(grad_estimator)
state = sgnht.init(rng_key, position)
Assuming we have an iterator `batches` that yields batches of data we can
perform one step:
.. code::
step_size = 1e-3
minibatch = next(batches)
new_state = sgnht.step(rng_key, state, minibatch, step_size)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(sgnht.step)
new_state = step(rng_key, state, minibatch, step_size)
Parameters
----------
grad_estimator
A function that takes a position, a batch of data and returns an estimation
of the gradient of the log-density at this position.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(alpha, beta)
def init_fn(
position: ArrayLikeTree,
rng_key: PRNGKey,
init_xi: Union[None, float] = None,
):
return init(position, rng_key, init_xi or alpha)
def step_fn(
rng_key: PRNGKey,
state: SGNHTState,
minibatch: ArrayLikeTree,
step_size: float,
temperature: float = 1,
) -> SGNHTState:
return kernel(rng_key, state, grad_estimator, minibatch, step_size, temperature)
return SamplingAlgorithm(init_fn, step_fn) # type: ignore[arg-type]
|
Implements the (basic) user interface for the SGNHT kernel.
The general sgnht kernel (:meth:`blackjax.sgmcmc.sgnht.build_kernel`, alias
`blackjax.sgnht.build_kernel`) can be cumbersome to manipulate. Since most users
only need to specify the kernel parameters at initialization time, we
provide a helper function that specializes the general kernel.
Example
-------
To initialize a SGNHT kernel one needs to specify a schedule function, which
returns a step size at each sampling step, and a gradient estimator
function. Here for a constant step size, and `data_size` data samples:
.. code::
grad_estimator = blackjax.sgmcmc.gradients.grad_estimator(logprior_fn, loglikelihood_fn, data_size)
We can now initialize the sgnht kernel and the state.
.. code::
sgnht = blackjax.sgnht(grad_estimator)
state = sgnht.init(rng_key, position)
Assuming we have an iterator `batches` that yields batches of data we can
perform one step:
.. code::
step_size = 1e-3
minibatch = next(batches)
new_state = sgnht.step(rng_key, state, minibatch, step_size)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(sgnht.step)
new_state = step(rng_key, state, minibatch, step_size)
Parameters
----------
grad_estimator
A function that takes a position, a batch of data and returns an estimation
of the gradient of the log-density at this position.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/sgmcmc/sgnht.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/sgmcmc/sgnht.py
|
Apache-2.0
|
def build_kernel(
logprior_fn: Callable,
loglikelihood_fn: Callable,
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
resampling_fn: Callable,
target_ess: float,
root_solver: Callable = solver.dichotomy,
**extra_parameters,
) -> Callable:
r"""Build a Tempered SMC step using an adaptive schedule.
Parameters
----------
logprior_fn: Callable
A function that computes the log-prior density.
loglikelihood_fn: Callable
A function that returns the log-likelihood density.
mcmc_kernel_factory: Callable
A callable function that creates a mcmc kernel from a log-probability
density function.
make_mcmc_state: Callable
A function that creates a new mcmc state from a position and a
log-probability density function.
resampling_fn: Callable
A random function that resamples generated particles based of weights
target_ess: float
The target ESS for the adaptive MCMC tempering
root_solver: Callable, optional
A solver utility to find delta matching the target ESS. Signature is
`root_solver(fun, delta_0, min_delta, max_delta)`, default is a dichotomy solver
use_log_ess: bool, optional
Use ESS in log space to solve for delta, default is `True`.
This is usually more stable when using gradient based solvers.
Returns
-------
A callable that takes a rng_key and a TemperedSMCState that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def compute_delta(state: tempered.TemperedSMCState) -> float:
lmbda = state.lmbda
max_delta = 1 - lmbda
delta = ess.ess_solver(
jax.vmap(loglikelihood_fn),
state.particles,
target_ess,
max_delta,
root_solver,
)
delta = jnp.clip(delta, 0.0, max_delta)
return delta
tempered_kernel = tempered.build_kernel(
logprior_fn,
loglikelihood_fn,
mcmc_step_fn,
mcmc_init_fn,
resampling_fn,
**extra_parameters,
)
def kernel(
rng_key: PRNGKey,
state: tempered.TemperedSMCState,
num_mcmc_steps: int,
mcmc_parameters: dict,
) -> tuple[tempered.TemperedSMCState, base.SMCInfo]:
delta = compute_delta(state)
lmbda = delta + state.lmbda
return tempered_kernel(rng_key, state, num_mcmc_steps, lmbda, mcmc_parameters)
return kernel
|
Build a Tempered SMC step using an adaptive schedule.
Parameters
----------
logprior_fn: Callable
A function that computes the log-prior density.
loglikelihood_fn: Callable
A function that returns the log-likelihood density.
mcmc_kernel_factory: Callable
A callable function that creates a mcmc kernel from a log-probability
density function.
make_mcmc_state: Callable
A function that creates a new mcmc state from a position and a
log-probability density function.
resampling_fn: Callable
A random function that resamples generated particles based of weights
target_ess: float
The target ESS for the adaptive MCMC tempering
root_solver: Callable, optional
A solver utility to find delta matching the target ESS. Signature is
`root_solver(fun, delta_0, min_delta, max_delta)`, default is a dichotomy solver
use_log_ess: bool, optional
Use ESS in log space to solve for delta, default is `True`.
This is usually more stable when using gradient based solvers.
Returns
-------
A callable that takes a rng_key and a TemperedSMCState that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/adaptive_tempered.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/adaptive_tempered.py
|
Apache-2.0
|
def as_top_level_api(
logprior_fn: Callable,
loglikelihood_fn: Callable,
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
mcmc_parameters: dict,
resampling_fn: Callable,
target_ess: float,
root_solver: Callable = solver.dichotomy,
num_mcmc_steps: int = 10,
**extra_parameters,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the Adaptive Tempered SMC kernel.
Parameters
----------
logprior_fn
The log-prior function of the model we wish to draw samples from.
loglikelihood_fn
The log-likelihood function of the model we wish to draw samples from.
mcmc_step_fn
The MCMC step function used to update the particles.
mcmc_init_fn
The MCMC init function used to build a MCMC state from a particle position.
mcmc_parameters
The parameters of the MCMC step function. Parameters with leading dimension
length of 1 are shared amongst the particles.
resampling_fn
The function used to resample the particles.
target_ess
The number of effective sample size to aim for at each step.
root_solver
The solver used to adaptively compute the temperature given a target number
of effective samples.
num_mcmc_steps
The number of times the MCMC kernel is applied to the particles per step.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(
logprior_fn,
loglikelihood_fn,
mcmc_step_fn,
mcmc_init_fn,
resampling_fn,
target_ess,
root_solver,
**extra_parameters,
)
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position)
def step_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
num_mcmc_steps,
mcmc_parameters,
)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the (basic) user interface for the Adaptive Tempered SMC kernel.
Parameters
----------
logprior_fn
The log-prior function of the model we wish to draw samples from.
loglikelihood_fn
The log-likelihood function of the model we wish to draw samples from.
mcmc_step_fn
The MCMC step function used to update the particles.
mcmc_init_fn
The MCMC init function used to build a MCMC state from a particle position.
mcmc_parameters
The parameters of the MCMC step function. Parameters with leading dimension
length of 1 are shared amongst the particles.
resampling_fn
The function used to resample the particles.
target_ess
The number of effective sample size to aim for at each step.
root_solver
The solver used to adaptively compute the temperature given a target number
of effective samples.
num_mcmc_steps
The number of times the MCMC kernel is applied to the particles per step.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/adaptive_tempered.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/adaptive_tempered.py
|
Apache-2.0
|
def step(
rng_key: PRNGKey,
state: SMCState,
update_fn: Callable,
weight_fn: Callable,
resample_fn: Callable,
num_resampled: Optional[int] = None,
) -> tuple[SMCState, SMCInfo]:
"""General SMC sampling step.
`update_fn` here corresponds to the Markov kernel $M_{t+1}$, and `weight_fn`
corresponds to the potential function $G_t$. We first use `update_fn` to
generate new particles from the current ones, weigh these particles using
`weight_fn` and resample them with `resample_fn`.
The `update_fn` and `weight_fn` functions must be batched by the called either
using `jax.vmap` or `jax.pmap`.
In Feynman-Kac terms, the algorithm goes roughly as follows:
.. code::
M_t: update_fn
G_t: weight_fn
R_t: resample_fn
idx = R_t(weights)
x_t = x_tm1[idx]
x_{t+1} = M_t(x_t)
weights = G_t(x_{t+1})
Parameters
----------
rng_key
Key used to generate pseudo-random numbers.
state
Current state of the SMC sampler: particles and their respective
log-weights
update_fn
Function that takes an array of keys and particles and returns
new particles.
weight_fn
Function that assigns a weight to the particles.
resample_fn
Function that resamples the particles.
num_resampled
The number of particles to resample. This can be used to implement
Waste-Free SMC :cite:p:`dau2020waste`, in which case we resample a number :math:`M<N`
of particles, and the update function is in charge of returning
:math:`N` samples.
Returns
-------
new_particles
An array that contains the new particles generated by this SMC step.
info
An `SMCInfo` object that contains extra information about the SMC
transition.
"""
updating_key, resampling_key = jax.random.split(rng_key, 2)
num_particles = state.weights.shape[0]
if num_resampled is None:
num_resampled = num_particles
resampling_idx = resample_fn(resampling_key, state.weights, num_resampled)
particles = jax.tree.map(lambda x: x[resampling_idx], state.particles)
keys = jax.random.split(updating_key, num_resampled)
particles, update_info = update_fn(keys, particles, state.update_parameters)
log_weights = weight_fn(particles)
logsum_weights = jax.scipy.special.logsumexp(log_weights)
normalizing_constant = logsum_weights - jnp.log(num_particles)
weights = jnp.exp(log_weights - logsum_weights)
return SMCState(particles, weights, state.update_parameters), SMCInfo(
resampling_idx, normalizing_constant, update_info
)
|
General SMC sampling step.
`update_fn` here corresponds to the Markov kernel $M_{t+1}$, and `weight_fn`
corresponds to the potential function $G_t$. We first use `update_fn` to
generate new particles from the current ones, weigh these particles using
`weight_fn` and resample them with `resample_fn`.
The `update_fn` and `weight_fn` functions must be batched by the called either
using `jax.vmap` or `jax.pmap`.
In Feynman-Kac terms, the algorithm goes roughly as follows:
.. code::
M_t: update_fn
G_t: weight_fn
R_t: resample_fn
idx = R_t(weights)
x_t = x_tm1[idx]
x_{t+1} = M_t(x_t)
weights = G_t(x_{t+1})
Parameters
----------
rng_key
Key used to generate pseudo-random numbers.
state
Current state of the SMC sampler: particles and their respective
log-weights
update_fn
Function that takes an array of keys and particles and returns
new particles.
weight_fn
Function that assigns a weight to the particles.
resample_fn
Function that resamples the particles.
num_resampled
The number of particles to resample. This can be used to implement
Waste-Free SMC :cite:p:`dau2020waste`, in which case we resample a number :math:`M<N`
of particles, and the update function is in charge of returning
:math:`N` samples.
Returns
-------
new_particles
An array that contains the new particles generated by this SMC step.
info
An `SMCInfo` object that contains extra information about the SMC
transition.
|
step
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/base.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/base.py
|
Apache-2.0
|
def update_and_take_last(
mcmc_init_fn,
tempered_logposterior_fn,
shared_mcmc_step_fn,
num_mcmc_steps,
n_particles,
):
"""Given N particles, runs num_mcmc_steps of a kernel starting at each particle, and
returns the last values, waisting the previous num_mcmc_steps-1
samples per chain.
"""
def mcmc_kernel(rng_key, position, step_parameters):
state = mcmc_init_fn(position, tempered_logposterior_fn)
def body_fn(state, rng_key):
new_state, info = shared_mcmc_step_fn(
rng_key, state, tempered_logposterior_fn, **step_parameters
)
return new_state, info
keys = jax.random.split(rng_key, num_mcmc_steps)
last_state, info = jax.lax.scan(body_fn, state, keys)
return last_state.position, info
return jax.vmap(mcmc_kernel), n_particles
|
Given N particles, runs num_mcmc_steps of a kernel starting at each particle, and
returns the last values, waisting the previous num_mcmc_steps-1
samples per chain.
|
update_and_take_last
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/base.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/base.py
|
Apache-2.0
|
def log_ess(log_weights: Array) -> float:
"""Compute the effective sample size.
Parameters
----------
log_weights: 1D Array
log-weights of the sample
Returns
-------
log_ess: float
The logarithm of the effective sample size
"""
return 2 * jsp.special.logsumexp(log_weights) - jsp.special.logsumexp(
2 * log_weights
)
|
Compute the effective sample size.
Parameters
----------
log_weights: 1D Array
log-weights of the sample
Returns
-------
log_ess: float
The logarithm of the effective sample size
|
log_ess
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/ess.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/ess.py
|
Apache-2.0
|
def ess_solver(
logdensity_fn: Callable,
particles: ArrayLikeTree,
target_ess: float,
max_delta: float,
root_solver: Callable,
):
"""ESS solver for computing the next increment of SMC tempering.
Parameters
----------
logdensity_fn: Callable
The log probability function we wish to sample from.
particles: SMCState
Current state of the tempered SMC algorithm
target_ess: float
The relative ESS targeted for the next increment of SMC tempering
max_delta: float
Max acceptable delta increment
root_solver: Callable, optional
A solver to find the root of a function, takes a function `f`, a starting point `delta0`,
a min value `min_delta`, and a max value `max_delta`.
Default is `BFGS` minimization of `f ** 2` and ignores `min_delta` and `max_delta`.
Returns
-------
delta: float
The increment that solves for the target ESS
"""
logprob = logdensity_fn(particles)
n_particles = logprob.shape[0]
target_val = jnp.log(n_particles * target_ess)
def fun_to_solve(delta):
log_weights = jnp.nan_to_num(-delta * logprob)
ess_val = log_ess(log_weights)
return ess_val - target_val
estimated_delta = root_solver(fun_to_solve, 0.0, max_delta)
return estimated_delta
|
ESS solver for computing the next increment of SMC tempering.
Parameters
----------
logdensity_fn: Callable
The log probability function we wish to sample from.
particles: SMCState
Current state of the tempered SMC algorithm
target_ess: float
The relative ESS targeted for the next increment of SMC tempering
max_delta: float
Max acceptable delta increment
root_solver: Callable, optional
A solver to find the root of a function, takes a function `f`, a starting point `delta0`,
a min value `min_delta`, and a max value `max_delta`.
Default is `BFGS` minimization of `f ** 2` and ignores `min_delta` and `max_delta`.
Returns
-------
delta: float
The increment that solves for the target ESS
|
ess_solver
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/ess.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/ess.py
|
Apache-2.0
|
def unshared_parameters_and_step_fn(mcmc_parameters, mcmc_step_fn):
"""Splits MCMC parameters into two dictionaries. The shared dictionary
represents the parameters common to all chains, and the unshared are
different per chain.
Binds the step fn using the shared parameters.
"""
shared_mcmc_parameters = {}
unshared_mcmc_parameters = {}
for k, v in mcmc_parameters.items():
if v.shape[0] == 1:
shared_mcmc_parameters[k] = v[0, ...]
else:
unshared_mcmc_parameters[k] = v
shared_mcmc_step_fn = partial(mcmc_step_fn, **shared_mcmc_parameters)
return unshared_mcmc_parameters, shared_mcmc_step_fn
|
Splits MCMC parameters into two dictionaries. The shared dictionary
represents the parameters common to all chains, and the unshared are
different per chain.
Binds the step fn using the shared parameters.
|
unshared_parameters_and_step_fn
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/from_mcmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/from_mcmc.py
|
Apache-2.0
|
def build_kernel(
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
resampling_fn: Callable,
update_strategy: Callable = update_and_take_last,
):
"""SMC step from MCMC kernels.
Builds MCMC kernels from the input parameters, which may change across iterations.
Moreover, it defines the way such kernels are used to update the particles. This layer
adapts an API defined in terms of kernels (mcmc_step_fn and mcmc_init_fn) into an API
that depends on an update function over the set of particles.
Returns
-------
A callable that takes a rng_key and a state with .particles and .weights and returns a base.SMCState
and base.SMCInfo pair.
"""
def step(
rng_key: PRNGKey,
state,
num_mcmc_steps: int,
mcmc_parameters: dict,
logposterior_fn: Callable,
log_weights_fn: Callable,
) -> tuple[smc.base.SMCState, smc.base.SMCInfo]:
unshared_mcmc_parameters, shared_mcmc_step_fn = unshared_parameters_and_step_fn(
mcmc_parameters, mcmc_step_fn
)
update_fn, num_resampled = update_strategy(
mcmc_init_fn,
logposterior_fn,
shared_mcmc_step_fn,
n_particles=state.weights.shape[0],
num_mcmc_steps=num_mcmc_steps,
)
return smc.base.step(
rng_key,
SMCState(state.particles, state.weights, unshared_mcmc_parameters),
update_fn,
jax.vmap(log_weights_fn),
resampling_fn,
num_resampled,
)
return step
|
SMC step from MCMC kernels.
Builds MCMC kernels from the input parameters, which may change across iterations.
Moreover, it defines the way such kernels are used to update the particles. This layer
adapts an API defined in terms of kernels (mcmc_step_fn and mcmc_init_fn) into an API
that depends on an update function over the set of particles.
Returns
-------
A callable that takes a rng_key and a state with .particles and .weights and returns a base.SMCState
and base.SMCInfo pair.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/from_mcmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/from_mcmc.py
|
Apache-2.0
|
def build_kernel(
smc_algorithm,
logprior_fn: Callable,
loglikelihood_fn: Callable,
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
resampling_fn: Callable,
mcmc_parameter_update_fn: Callable[
[PRNGKey, SMCState, SMCInfo], Dict[str, ArrayTree]
],
num_mcmc_steps: int = 10,
smc_returns_state_with_parameter_override=False,
**extra_parameters,
) -> Callable:
"""In the context of an SMC sampler (whose step_fn returning state has a .particles attribute), there's an inner
MCMC that is used to perturbate/update each of the particles. This adaptation tunes some parameter of that MCMC,
based on particles. The parameter type must be a valid JAX type.
Parameters
----------
smc_algorithm
Either blackjax.adaptive_tempered_smc or blackjax.tempered_smc (or any other implementation of
a sampling algorithm that returns an SMCState and SMCInfo pair). It is also possible for this
to return an StateWithParameterOverride, in such case smc_returns_state_with_parameter_override needs to be True
logprior_fn
A function that computes the log density of the prior distribution
loglikelihood_fn
A function that returns the probability at a given position.
mcmc_step_fn:
The transition kernel, should take as parameters the dictionary output of mcmc_parameter_update_fn.
mcmc_step_fn(rng_key, state, tempered_logposterior_fn, **mcmc_parameter_update_fn())
mcmc_init_fn
A callable that initializes the inner kernel
mcmc_parameter_update_fn
A callable that takes the SMCState and SMCInfo at step i and constructs a parameter to be used by the inner kernel in i+1 iteration.
extra_parameters:
parameters to be used for the creation of the smc_algorithm.
smc_returns_state_with_parameter_override:
a boolean indicating that the underlying smc_algorithm returns a smc_returns_state_with_parameter_override.
this is used in order to compose different adaptation mechanisms, such as pretuning with tuning.
"""
if smc_returns_state_with_parameter_override:
def extract_state_for_delegate(state):
return state
def compose_new_state(new_state, new_parameter_override):
composed_parameter_override = (
new_state.parameter_override | new_parameter_override
)
return StateWithParameterOverride(
new_state.sampler_state, composed_parameter_override
)
else:
def extract_state_for_delegate(state):
return state.sampler_state
def compose_new_state(new_state, new_parameter_override):
return StateWithParameterOverride(new_state, new_parameter_override)
def kernel(
rng_key: PRNGKey, state: StateWithParameterOverride, **extra_step_parameters
) -> Tuple[StateWithParameterOverride, SMCInfo]:
step_fn = smc_algorithm(
logprior_fn=logprior_fn,
loglikelihood_fn=loglikelihood_fn,
mcmc_step_fn=mcmc_step_fn,
mcmc_init_fn=mcmc_init_fn,
mcmc_parameters=state.parameter_override,
resampling_fn=resampling_fn,
num_mcmc_steps=num_mcmc_steps,
**extra_parameters,
).step
parameter_update_key, step_key = jax.random.split(rng_key, 2)
new_state, info = step_fn(
step_key, extract_state_for_delegate(state), **extra_step_parameters
)
new_parameter_override = mcmc_parameter_update_fn(
parameter_update_key, new_state, info
)
return compose_new_state(new_state, new_parameter_override), info
return kernel
|
In the context of an SMC sampler (whose step_fn returning state has a .particles attribute), there's an inner
MCMC that is used to perturbate/update each of the particles. This adaptation tunes some parameter of that MCMC,
based on particles. The parameter type must be a valid JAX type.
Parameters
----------
smc_algorithm
Either blackjax.adaptive_tempered_smc or blackjax.tempered_smc (or any other implementation of
a sampling algorithm that returns an SMCState and SMCInfo pair). It is also possible for this
to return an StateWithParameterOverride, in such case smc_returns_state_with_parameter_override needs to be True
logprior_fn
A function that computes the log density of the prior distribution
loglikelihood_fn
A function that returns the probability at a given position.
mcmc_step_fn:
The transition kernel, should take as parameters the dictionary output of mcmc_parameter_update_fn.
mcmc_step_fn(rng_key, state, tempered_logposterior_fn, **mcmc_parameter_update_fn())
mcmc_init_fn
A callable that initializes the inner kernel
mcmc_parameter_update_fn
A callable that takes the SMCState and SMCInfo at step i and constructs a parameter to be used by the inner kernel in i+1 iteration.
extra_parameters:
parameters to be used for the creation of the smc_algorithm.
smc_returns_state_with_parameter_override:
a boolean indicating that the underlying smc_algorithm returns a smc_returns_state_with_parameter_override.
this is used in order to compose different adaptation mechanisms, such as pretuning with tuning.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/inner_kernel_tuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/inner_kernel_tuning.py
|
Apache-2.0
|
def as_top_level_api(
smc_algorithm,
logprior_fn: Callable,
loglikelihood_fn: Callable,
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
resampling_fn: Callable,
mcmc_parameter_update_fn: Callable[
[PRNGKey, SMCState, SMCInfo], Dict[str, ArrayTree]
],
initial_parameter_value,
num_mcmc_steps: int = 10,
smc_returns_state_with_parameter_override=False,
**extra_parameters,
) -> SamplingAlgorithm:
"""In the context of an SMC sampler (whose step_fn returning state
has a .particles attribute), there's an inner MCMC that is used
to perturbate/update each of the particles. This adaptation tunes some
parameter of that MCMC, based on particles.
The parameter type must be a valid JAX type.
Parameters
----------
smc_algorithm
Either blackjax.adaptive_tempered_smc or blackjax.tempered_smc (or any other implementation of
a sampling algorithm that returns an SMCState and SMCInfo pair). See blackjax.smc_family
logprior_fn
A function that computes the log density of the prior distribution
loglikelihood_fn
A function that returns the probability at a given position.
mcmc_step_fn
The transition kernel, should take as parameters the dictionary output of mcmc_parameter_update_fn.
mcmc_init_fn
A callable that initializes the inner kernel
mcmc_parameter_update_fn
A callable that takes the SMCState and SMCInfo at step i and constructs a parameter to be used by the
inner kernel in i+1 iteration.
initial_parameter_value
Parameter to be used by the mcmc_factory before the first iteration.
extra_parameters:
parameters to be used for the creation of the smc_algorithm.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(
smc_algorithm,
logprior_fn,
loglikelihood_fn,
mcmc_step_fn,
mcmc_init_fn,
resampling_fn,
mcmc_parameter_update_fn,
num_mcmc_steps,
smc_returns_state_with_parameter_override,
**extra_parameters,
)
def init_fn(position, rng_key=None):
del rng_key
return init(smc_algorithm.init, position, initial_parameter_value)
def step_fn(
rng_key: PRNGKey, state, **extra_step_parameters
) -> Tuple[StateWithParameterOverride, SMCInfo]:
return kernel(rng_key, state, **extra_step_parameters)
return SamplingAlgorithm(init_fn, step_fn)
|
In the context of an SMC sampler (whose step_fn returning state
has a .particles attribute), there's an inner MCMC that is used
to perturbate/update each of the particles. This adaptation tunes some
parameter of that MCMC, based on particles.
The parameter type must be a valid JAX type.
Parameters
----------
smc_algorithm
Either blackjax.adaptive_tempered_smc or blackjax.tempered_smc (or any other implementation of
a sampling algorithm that returns an SMCState and SMCInfo pair). See blackjax.smc_family
logprior_fn
A function that computes the log density of the prior distribution
loglikelihood_fn
A function that returns the probability at a given position.
mcmc_step_fn
The transition kernel, should take as parameters the dictionary output of mcmc_parameter_update_fn.
mcmc_init_fn
A callable that initializes the inner kernel
mcmc_parameter_update_fn
A callable that takes the SMCState and SMCInfo at step i and constructs a parameter to be used by the
inner kernel in i+1 iteration.
initial_parameter_value
Parameter to be used by the mcmc_factory before the first iteration.
extra_parameters:
parameters to be used for the creation of the smc_algorithm.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/inner_kernel_tuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/inner_kernel_tuning.py
|
Apache-2.0
|
def init(particles: ArrayLikeTree, num_datapoints: int) -> PartialPosteriorsSMCState:
"""num_datapoints are the number of observations that could potentially be
used in a partial posterior. Since the initial data_mask is all 0s, it
means that no likelihood term will be added (only prior).
"""
num_particles = jax.tree_util.tree_flatten(particles)[0][0].shape[0]
weights = jnp.ones(num_particles) / num_particles
return PartialPosteriorsSMCState(particles, weights, jnp.zeros(num_datapoints))
|
num_datapoints are the number of observations that could potentially be
used in a partial posterior. Since the initial data_mask is all 0s, it
means that no likelihood term will be added (only prior).
|
init
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/partial_posteriors_path.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/partial_posteriors_path.py
|
Apache-2.0
|
def build_kernel(
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
resampling_fn: Callable,
num_mcmc_steps: Optional[int],
mcmc_parameters: ArrayTree,
partial_logposterior_factory: Callable[[Array], Callable],
update_strategy=update_and_take_last,
) -> Callable:
"""Build the Partial Posteriors (data tempering) SMC kernel.
The distribution's trajectory includes increasingly adding more
datapoints to the likelihood. See Section 2.2 of https://arxiv.org/pdf/2007.11936
Parameters
----------
mcmc_step_fn
A function that computes the log density of the prior distribution
mcmc_init_fn
A function that returns the probability at a given position.
resampling_fn
A random function that resamples generated particles based of weights
num_mcmc_steps
Number of iterations in the MCMC chain.
mcmc_parameters
A dictionary of parameters to be used by the inner MCMC kernels
partial_logposterior_factory:
A callable that given an array of 0 and 1, returns a function logposterior(x).
The array represents which values to include in the logposterior calculation. The logposterior
must be jax compilable.
Returns
-------
A callable that takes a rng_key and PartialPosteriorsSMCState and selectors for
the current and previous posteriors, and takes a data-tempered SMC state.
"""
delegate = smc_from_mcmc(mcmc_step_fn, mcmc_init_fn, resampling_fn, update_strategy)
def step(
key, state: PartialPosteriorsSMCState, data_mask: Array
) -> Tuple[PartialPosteriorsSMCState, smc.base.SMCInfo]:
logposterior_fn = partial_logposterior_factory(data_mask)
previous_logposterior_fn = partial_logposterior_factory(state.data_mask)
def log_weights_fn(x):
return logposterior_fn(x) - previous_logposterior_fn(x)
state, info = delegate(
key, state, num_mcmc_steps, mcmc_parameters, logposterior_fn, log_weights_fn
)
return (
PartialPosteriorsSMCState(state.particles, state.weights, data_mask),
info,
)
return step
|
Build the Partial Posteriors (data tempering) SMC kernel.
The distribution's trajectory includes increasingly adding more
datapoints to the likelihood. See Section 2.2 of https://arxiv.org/pdf/2007.11936
Parameters
----------
mcmc_step_fn
A function that computes the log density of the prior distribution
mcmc_init_fn
A function that returns the probability at a given position.
resampling_fn
A random function that resamples generated particles based of weights
num_mcmc_steps
Number of iterations in the MCMC chain.
mcmc_parameters
A dictionary of parameters to be used by the inner MCMC kernels
partial_logposterior_factory:
A callable that given an array of 0 and 1, returns a function logposterior(x).
The array represents which values to include in the logposterior calculation. The logposterior
must be jax compilable.
Returns
-------
A callable that takes a rng_key and PartialPosteriorsSMCState and selectors for
the current and previous posteriors, and takes a data-tempered SMC state.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/partial_posteriors_path.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/partial_posteriors_path.py
|
Apache-2.0
|
def as_top_level_api(
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
mcmc_parameters: dict,
resampling_fn: Callable,
num_mcmc_steps,
partial_logposterior_factory: Callable,
update_strategy=update_and_take_last,
) -> SamplingAlgorithm:
"""A factory that wraps the kernel into a SamplingAlgorithm object.
See build_kernel for full documentation on the parameters.
"""
kernel = build_kernel(
mcmc_step_fn,
mcmc_init_fn,
resampling_fn,
num_mcmc_steps,
mcmc_parameters,
partial_logposterior_factory,
update_strategy,
)
def init_fn(position: ArrayLikeTree, num_observations, rng_key=None):
del rng_key
return init(position, num_observations)
def step(key: PRNGKey, state: PartialPosteriorsSMCState, data_mask: Array):
return kernel(key, state, data_mask)
return SamplingAlgorithm(init_fn, step) # type: ignore[arg-type]
|
A factory that wraps the kernel into a SamplingAlgorithm object.
See build_kernel for full documentation on the parameters.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/partial_posteriors_path.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/partial_posteriors_path.py
|
Apache-2.0
|
def esjd(m):
"""Implements ESJD (expected squared jumping distance). Inner Mahalanobis distance
is computed using the Cholesky decomposition of M=LLt, and then inverting L.
Whenever M is symmetrical definite positive then it must exist a Cholesky Decomposition.
For example, if M is the Covariance Matrix of Metropolis-Hastings or
the Inverse Mass Matrix of Hamiltonian Monte Carlo.
"""
L = jnp.linalg.cholesky(m)
def measure(previous_position, next_position, acceptance_probability):
difference = ravel_pytree(previous_position)[0] - ravel_pytree(next_position)[0]
difference_by_matrix = jnp.matmul(L, difference)
norm = jnp.linalg.norm(difference_by_matrix, 2)
return acceptance_probability * jnp.power(norm, 2)
return jax.vmap(measure)
|
Implements ESJD (expected squared jumping distance). Inner Mahalanobis distance
is computed using the Cholesky decomposition of M=LLt, and then inverting L.
Whenever M is symmetrical definite positive then it must exist a Cholesky Decomposition.
For example, if M is the Covariance Matrix of Metropolis-Hastings or
the Inverse Mass Matrix of Hamiltonian Monte Carlo.
|
esjd
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/pretuning.py
|
Apache-2.0
|
def update_parameter_distribution(
key: PRNGKey,
previous_param_samples: ArrayLikeTree,
previous_particles: ArrayLikeTree,
latest_particles: ArrayLikeTree,
measure_of_chain_mixing: Callable,
alpha: float,
sigma_parameters: ArrayLikeTree,
acceptance_probability: Array,
):
"""Given an existing parameter distribution that was used to mutate previous_particles
into latest_particles, updates that parameter distribution by resampling from previous_param_samples after adding
noise to those samples. The weights used are a linear function of the measure of chain mixing.
Only works with float parameters, not integers.
See Equation 4 in https://arxiv.org/pdf/1005.1193.pdf
Parameters
----------
previous_param_samples:
samples of the parameters of SMC inner MCMC chains. To be updated.
previous_particles:
particles from which the kernel step started
latest_particles:
particles after the step was performed
measure_of_chain_mixing: Callable
a callable that can compute a performance measure per chain
alpha:
a scalar to add to the weighting. See paper for details
sigma_parameters:
noise to add to the population of parameters to mutate them. must have the same shape of
previous_param_samples.
acceptance_probability:
the energy difference for each of the chains when taking a step from previous_particles
into latest_particles.
"""
noise_key, resampling_key = jax.random.split(key, 2)
noises = jax.tree.map(
lambda x, s: generate_gaussian_noise(noise_key, x.astype("float32"), sigma=s),
previous_param_samples,
sigma_parameters,
)
new_samples = jax.tree.map(lambda x, y: x + y, noises, previous_param_samples)
chain_mixing_measurement = measure_of_chain_mixing(
previous_particles, latest_particles, acceptance_probability
)
weights = alpha + chain_mixing_measurement
weights = weights / jnp.sum(weights)
resampling_idx = stratified(resampling_key, weights, len(chain_mixing_measurement))
return (
jax.tree.map(lambda x: x[resampling_idx], new_samples),
chain_mixing_measurement,
)
|
Given an existing parameter distribution that was used to mutate previous_particles
into latest_particles, updates that parameter distribution by resampling from previous_param_samples after adding
noise to those samples. The weights used are a linear function of the measure of chain mixing.
Only works with float parameters, not integers.
See Equation 4 in https://arxiv.org/pdf/1005.1193.pdf
Parameters
----------
previous_param_samples:
samples of the parameters of SMC inner MCMC chains. To be updated.
previous_particles:
particles from which the kernel step started
latest_particles:
particles after the step was performed
measure_of_chain_mixing: Callable
a callable that can compute a performance measure per chain
alpha:
a scalar to add to the weighting. See paper for details
sigma_parameters:
noise to add to the population of parameters to mutate them. must have the same shape of
previous_param_samples.
acceptance_probability:
the energy difference for each of the chains when taking a step from previous_particles
into latest_particles.
|
update_parameter_distribution
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/pretuning.py
|
Apache-2.0
|
def build_pretune(
mcmc_init_fn: Callable,
mcmc_step_fn: Callable,
alpha: float,
sigma_parameters: ArrayLikeTree,
n_particles: int,
performance_of_chain_measure_factory: Callable = default_measure_factory,
natural_parameters: Optional[List[str]] = None,
positive_parameters: Optional[List[str]] = None,
):
"""Implements Buchholz et al https://arxiv.org/pdf/1808.07730 pretuning procedure.
The goal is to maintain a probability distribution of parameters, in order
to assign different values to each inner MCMC chain.
To have performant parameters for the distribution at step t, it takes a single step, measures
the chain mixing, and reweights the probability distribution of parameters accordingly.
Note that although similar, this strategy is different than inner_kernel_tuning. The latter updates
the parameters based on the particles and transition information after the SMC step is executed. This
implementation runs a single MCMC step which gets discarded, to then proceed with the SMC step execution.
"""
if natural_parameters is None:
round_to_integer_fn = lambda x: x
else:
def round_to_integer_fn(x):
for k in natural_parameters:
x[k] = jax.tree.map(lambda a: jnp.abs(jnp.round(a).astype(int)), x[k])
return x
if positive_parameters is None:
make_positive_fn = lambda x: x
else:
def make_positive_fn(x):
for k in positive_parameters:
x[k] = jax.tree.map(jnp.abs, x[k])
return x
def pretune(key, state, logposterior):
unshared_mcmc_parameters, shared_mcmc_step_fn = unshared_parameters_and_step_fn(
state.parameter_override, mcmc_step_fn
)
one_step_fn, _ = update_and_take_last(
mcmc_init_fn, logposterior, shared_mcmc_step_fn, 1, n_particles
)
new_state, info = one_step_fn(
jax.random.split(key, n_particles),
state.sampler_state.particles,
unshared_mcmc_parameters,
)
performance_of_chain_measure = performance_of_chain_measure_factory(state)
(
new_parameter_distribution,
chain_mixing_measurement,
) = update_parameter_distribution(
key,
previous_param_samples={
key: state.parameter_override[key] for key in sigma_parameters
},
previous_particles=state.sampler_state.particles,
latest_particles=new_state,
measure_of_chain_mixing=performance_of_chain_measure,
alpha=alpha,
sigma_parameters=sigma_parameters,
acceptance_probability=info.acceptance_rate,
)
return (
make_positive_fn(round_to_integer_fn(new_parameter_distribution)),
chain_mixing_measurement,
)
def pretune_and_update(key, state: StateWithParameterOverride, logposterior):
"""
Updates the parameters that need to be pretuned and returns the rest.
"""
new_parameter_distribution, chain_mixing_measurement = pretune(
key, state, logposterior
)
old_parameter_distribution = state.parameter_override
updated_parameter_distribution = old_parameter_distribution
for k in new_parameter_distribution:
updated_parameter_distribution[k] = new_parameter_distribution[k]
return updated_parameter_distribution
return pretune_and_update
|
Implements Buchholz et al https://arxiv.org/pdf/1808.07730 pretuning procedure.
The goal is to maintain a probability distribution of parameters, in order
to assign different values to each inner MCMC chain.
To have performant parameters for the distribution at step t, it takes a single step, measures
the chain mixing, and reweights the probability distribution of parameters accordingly.
Note that although similar, this strategy is different than inner_kernel_tuning. The latter updates
the parameters based on the particles and transition information after the SMC step is executed. This
implementation runs a single MCMC step which gets discarded, to then proceed with the SMC step execution.
|
build_pretune
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/pretuning.py
|
Apache-2.0
|
def pretune_and_update(key, state: StateWithParameterOverride, logposterior):
"""
Updates the parameters that need to be pretuned and returns the rest.
"""
new_parameter_distribution, chain_mixing_measurement = pretune(
key, state, logposterior
)
old_parameter_distribution = state.parameter_override
updated_parameter_distribution = old_parameter_distribution
for k in new_parameter_distribution:
updated_parameter_distribution[k] = new_parameter_distribution[k]
return updated_parameter_distribution
|
Updates the parameters that need to be pretuned and returns the rest.
|
pretune_and_update
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/pretuning.py
|
Apache-2.0
|
def build_kernel(
smc_algorithm,
logprior_fn: Callable,
loglikelihood_fn: Callable,
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
resampling_fn: Callable,
pretune_fn: Callable,
num_mcmc_steps: int = 10,
update_strategy=update_and_take_last,
**extra_parameters,
) -> Callable:
"""In the context of an SMC sampler (whose step_fn returning state has a .particles attribute), there's an inner
MCMC that is used to perturbate/update each of the particles. This adaptation tunes some parameter of that MCMC,
based on particles. The parameter type must be a valid JAX type.
Parameters
----------
smc_algorithm
Either blackjax.adaptive_tempered_smc or blackjax.tempered_smc (or any other implementation of
a sampling algorithm that returns an SMCState and SMCInfo pair).
logprior_fn
A function that computes the log density of the prior distribution
loglikelihood_fn
A function that returns the probability at a given position.
mcmc_step_fn:
The transition kernel, should take as parameters the dictionary output of mcmc_parameter_update_fn.
mcmc_step_fn(rng_key, state, tempered_logposterior_fn, **mcmc_parameter_update_fn())
mcmc_init_fn
A callable that initializes the inner kernel
pretune_fn:
A callable that can update the probability distribution of parameters.
extra_parameters:
parameters to be used for the creation of the smc_algorithm.
"""
delegate = smc_from_mcmc(mcmc_step_fn, mcmc_init_fn, resampling_fn, update_strategy)
def pretuned_step(
rng_key: PRNGKey,
state,
num_mcmc_steps: int,
mcmc_parameters: dict,
logposterior_fn: Callable,
log_weights_fn: Callable,
) -> tuple[smc.base.SMCState, SMCInfoWithParameterDistribution]:
"""Wraps the output of smc.from_mcmc.build_kernel into a pretuning + step method.
This one should be a subtype of the former, in the sense that a usage of the former
can be replaced with an instance of this one.
"""
pretune_key, step_key = jax.random.split(rng_key, 2)
pretuned_parameters = pretune_fn(
pretune_key,
StateWithParameterOverride(state, mcmc_parameters),
logposterior_fn,
)
state, info = delegate(
rng_key,
state,
num_mcmc_steps,
pretuned_parameters,
logposterior_fn,
log_weights_fn,
)
return state, SMCInfoWithParameterDistribution(info, pretuned_parameters)
def kernel(
rng_key: PRNGKey, state: StateWithParameterOverride, **extra_step_parameters
) -> Tuple[StateWithParameterOverride, SMCInfo]:
extra_parameters["update_particles_fn"] = pretuned_step
step_fn = smc_algorithm(
logprior_fn=logprior_fn,
loglikelihood_fn=loglikelihood_fn,
mcmc_step_fn=mcmc_step_fn,
mcmc_init_fn=mcmc_init_fn,
mcmc_parameters=state.parameter_override,
resampling_fn=resampling_fn,
num_mcmc_steps=num_mcmc_steps,
**extra_parameters,
).step
new_state, info = step_fn(rng_key, state.sampler_state, **extra_step_parameters)
return (
StateWithParameterOverride(new_state, info.parameter_override),
info.smc_info,
)
return kernel
|
In the context of an SMC sampler (whose step_fn returning state has a .particles attribute), there's an inner
MCMC that is used to perturbate/update each of the particles. This adaptation tunes some parameter of that MCMC,
based on particles. The parameter type must be a valid JAX type.
Parameters
----------
smc_algorithm
Either blackjax.adaptive_tempered_smc or blackjax.tempered_smc (or any other implementation of
a sampling algorithm that returns an SMCState and SMCInfo pair).
logprior_fn
A function that computes the log density of the prior distribution
loglikelihood_fn
A function that returns the probability at a given position.
mcmc_step_fn:
The transition kernel, should take as parameters the dictionary output of mcmc_parameter_update_fn.
mcmc_step_fn(rng_key, state, tempered_logposterior_fn, **mcmc_parameter_update_fn())
mcmc_init_fn
A callable that initializes the inner kernel
pretune_fn:
A callable that can update the probability distribution of parameters.
extra_parameters:
parameters to be used for the creation of the smc_algorithm.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/pretuning.py
|
Apache-2.0
|
def pretuned_step(
rng_key: PRNGKey,
state,
num_mcmc_steps: int,
mcmc_parameters: dict,
logposterior_fn: Callable,
log_weights_fn: Callable,
) -> tuple[smc.base.SMCState, SMCInfoWithParameterDistribution]:
"""Wraps the output of smc.from_mcmc.build_kernel into a pretuning + step method.
This one should be a subtype of the former, in the sense that a usage of the former
can be replaced with an instance of this one.
"""
pretune_key, step_key = jax.random.split(rng_key, 2)
pretuned_parameters = pretune_fn(
pretune_key,
StateWithParameterOverride(state, mcmc_parameters),
logposterior_fn,
)
state, info = delegate(
rng_key,
state,
num_mcmc_steps,
pretuned_parameters,
logposterior_fn,
log_weights_fn,
)
return state, SMCInfoWithParameterDistribution(info, pretuned_parameters)
|
Wraps the output of smc.from_mcmc.build_kernel into a pretuning + step method.
This one should be a subtype of the former, in the sense that a usage of the former
can be replaced with an instance of this one.
|
pretuned_step
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/pretuning.py
|
Apache-2.0
|
def as_top_level_api(
smc_algorithm,
logprior_fn: Callable,
loglikelihood_fn: Callable,
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
resampling_fn: Callable,
num_mcmc_steps: int,
initial_parameter_value: ArrayLikeTree,
pretune_fn: Callable,
**extra_parameters,
):
"""In the context of an SMC sampler (whose step_fn returning state has a .particles attribute), there's an inner
MCMC that is used to perturbate/update each of the particles. This adaptation tunes some parameter of that MCMC,
based on particles. The parameter type must be a valid JAX type.
Parameters
----------
smc_algorithm
Either blackjax.adaptive_tempered_smc or blackjax.tempered_smc (or any other implementation of
a sampling algorithm that returns an SMCState and SMCInfo pair).
logprior_fn
A function that computes the log density of the prior distribution
loglikelihood_fn
A function that returns the probability at a given position.
mcmc_step_fn:
The transition kernel, should take as parameters the dictionary output of mcmc_parameter_update_fn.
mcmc_step_fn(rng_key, state, tempered_logposterior_fn, **mcmc_parameter_update_fn())
mcmc_init_fn
A callable that initializes the inner kernel
pretune_fn:
A callable that can update the probability distribution of parameters.
extra_parameters:
parameters to be used for the creation of the smc_algorithm.
"""
kernel = build_kernel(
smc_algorithm,
logprior_fn,
loglikelihood_fn,
mcmc_step_fn,
mcmc_init_fn,
resampling_fn,
pretune_fn,
num_mcmc_steps,
**extra_parameters,
)
def init_fn(position, rng_key=None):
del rng_key
return init(smc_algorithm.init, position, initial_parameter_value)
def step_fn(
rng_key: PRNGKey, state, **extra_step_parameters
) -> Tuple[StateWithParameterOverride, SMCInfo]:
return kernel(rng_key, state, **extra_step_parameters)
return SamplingAlgorithm(init_fn, step_fn)
|
In the context of an SMC sampler (whose step_fn returning state has a .particles attribute), there's an inner
MCMC that is used to perturbate/update each of the particles. This adaptation tunes some parameter of that MCMC,
based on particles. The parameter type must be a valid JAX type.
Parameters
----------
smc_algorithm
Either blackjax.adaptive_tempered_smc or blackjax.tempered_smc (or any other implementation of
a sampling algorithm that returns an SMCState and SMCInfo pair).
logprior_fn
A function that computes the log density of the prior distribution
loglikelihood_fn
A function that returns the probability at a given position.
mcmc_step_fn:
The transition kernel, should take as parameters the dictionary output of mcmc_parameter_update_fn.
mcmc_step_fn(rng_key, state, tempered_logposterior_fn, **mcmc_parameter_update_fn())
mcmc_init_fn
A callable that initializes the inner kernel
pretune_fn:
A callable that can update the probability distribution of parameters.
extra_parameters:
parameters to be used for the creation of the smc_algorithm.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/pretuning.py
|
Apache-2.0
|
def dichotomy(fun, min_delta, max_delta, eps=1e-4, max_iter=100):
"""Solves for delta by dichotomy.
If max_delta is such that fun(max_delta) > 0, then we assume that max_delta
can be used as an increment in the tempering.
Parameters
----------
fun: Callable
The decreasing function to solve, we must have fun(min_delta) > 0, fun(max_delta) < 0
min_delta: float
Starting point of the interval search
max_delta: float
End point of the interval search
eps: float
Tolerance for :math:`|f(a) - f(b)|`
max_iter: int
Maximum of iterations in the dichotomy search
Returns
-------
delta: Array, shape (,)
The root of `fun`
"""
def body(carry):
i, a, b, f_a, f_b = carry
mid = 0.5 * (a + b)
f_mid = fun(mid)
a, b, f_a, f_b = jax.lax.cond(
f_mid < 0,
lambda _: (a, mid, f_a, f_mid),
lambda _: (mid, b, f_mid, f_b),
None,
)
return i + 1, a, b, f_a, f_b
def cond(carry):
i, a, b, f_a, f_b = carry
return jnp.logical_and(i < max_iter, f_a - f_b > eps)
f_min_delta, f_max_delta = fun(min_delta), fun(max_delta)
if_no_opt = lambda _: max_delta
def if_opt(_):
_, res_a, res_b, fun_res_a, fun_res_b = jax.lax.while_loop(
cond, body, (0, min_delta, max_delta, f_min_delta, f_max_delta)
)
return res_a
# if the upper end of the interval returns positive already, just return it,
# otherwise search the optimum as long as the start of the interval is positive.
return jax.lax.cond(
f_max_delta > 0,
if_no_opt,
lambda _: jax.lax.cond(f_min_delta > 0, if_opt, lambda _: np.nan, None),
None,
)
|
Solves for delta by dichotomy.
If max_delta is such that fun(max_delta) > 0, then we assume that max_delta
can be used as an increment in the tempering.
Parameters
----------
fun: Callable
The decreasing function to solve, we must have fun(min_delta) > 0, fun(max_delta) < 0
min_delta: float
Starting point of the interval search
max_delta: float
End point of the interval search
eps: float
Tolerance for :math:`|f(a) - f(b)|`
max_iter: int
Maximum of iterations in the dichotomy search
Returns
-------
delta: Array, shape (,)
The root of `fun`
|
dichotomy
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/solver.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/solver.py
|
Apache-2.0
|
def build_kernel(
logprior_fn: Callable,
loglikelihood_fn: Callable,
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
resampling_fn: Callable,
update_strategy: Callable = update_and_take_last,
update_particles_fn: Optional[Callable] = None,
) -> Callable:
"""Build the base Tempered SMC kernel.
Tempered SMC uses tempering to sample from a distribution given by
.. math::
p(x) \\propto p_0(x) \\exp(-V(x)) \\mathrm{d}x
where :math:`p_0` is the prior distribution, typically easy to sample from
and for which the density is easy to compute, and :math:`\\exp(-V(x))` is an
unnormalized likelihood term for which :math:`V(x)` is easy to compute
pointwise.
Parameters
----------
logprior_fn
A function that computes the log density of the prior distribution
loglikelihood_fn
A function that returns the probability at a given
position.
mcmc_step_fn
A function that creates a mcmc kernel from a log-probability density function.
mcmc_init_fn: Callable
A function that creates a new mcmc state from a position and a
log-probability density function.
resampling_fn
A random function that resamples generated particles based of weights
num_mcmc_iterations
Number of iterations in the MCMC chain.
Returns
-------
A callable that takes a rng_key and a TemperedSMCState that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
update_particles = (
smc_from_mcmc.build_kernel(
mcmc_step_fn, mcmc_init_fn, resampling_fn, update_strategy
)
if update_particles_fn is None
else update_particles_fn
)
def kernel(
rng_key: PRNGKey,
state: TemperedSMCState,
num_mcmc_steps: int,
lmbda: float,
mcmc_parameters: dict,
) -> tuple[TemperedSMCState, smc.base.SMCInfo]:
"""Move the particles one step using the Tempered SMC algorithm.
Parameters
----------
rng_key
JAX PRNGKey for randomness
state
Current state of the tempered SMC algorithm
lmbda
Current value of the tempering parameter
mcmc_parameters
The parameters of the MCMC step function. Parameters with leading dimension
length of 1 are shared amongst the particles.
Returns
-------
state
The new state of the tempered SMC algorithm
info
Additional information on the SMC step
"""
delta = lmbda - state.lmbda
def log_weights_fn(position: ArrayLikeTree) -> float:
return delta * loglikelihood_fn(position)
def tempered_logposterior_fn(position: ArrayLikeTree) -> float:
logprior = logprior_fn(position)
tempered_loglikelihood = state.lmbda * loglikelihood_fn(position)
return logprior + tempered_loglikelihood
smc_state, info = update_particles(
rng_key,
state,
num_mcmc_steps,
mcmc_parameters,
tempered_logposterior_fn,
log_weights_fn,
)
tempered_state = TemperedSMCState(
smc_state.particles, smc_state.weights, state.lmbda + delta
)
return tempered_state, info
return kernel
|
Build the base Tempered SMC kernel.
Tempered SMC uses tempering to sample from a distribution given by
.. math::
p(x) \propto p_0(x) \exp(-V(x)) \mathrm{d}x
where :math:`p_0` is the prior distribution, typically easy to sample from
and for which the density is easy to compute, and :math:`\exp(-V(x))` is an
unnormalized likelihood term for which :math:`V(x)` is easy to compute
pointwise.
Parameters
----------
logprior_fn
A function that computes the log density of the prior distribution
loglikelihood_fn
A function that returns the probability at a given
position.
mcmc_step_fn
A function that creates a mcmc kernel from a log-probability density function.
mcmc_init_fn: Callable
A function that creates a new mcmc state from a position and a
log-probability density function.
resampling_fn
A random function that resamples generated particles based of weights
num_mcmc_iterations
Number of iterations in the MCMC chain.
Returns
-------
A callable that takes a rng_key and a TemperedSMCState that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/tempered.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/tempered.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: TemperedSMCState,
num_mcmc_steps: int,
lmbda: float,
mcmc_parameters: dict,
) -> tuple[TemperedSMCState, smc.base.SMCInfo]:
"""Move the particles one step using the Tempered SMC algorithm.
Parameters
----------
rng_key
JAX PRNGKey for randomness
state
Current state of the tempered SMC algorithm
lmbda
Current value of the tempering parameter
mcmc_parameters
The parameters of the MCMC step function. Parameters with leading dimension
length of 1 are shared amongst the particles.
Returns
-------
state
The new state of the tempered SMC algorithm
info
Additional information on the SMC step
"""
delta = lmbda - state.lmbda
def log_weights_fn(position: ArrayLikeTree) -> float:
return delta * loglikelihood_fn(position)
def tempered_logposterior_fn(position: ArrayLikeTree) -> float:
logprior = logprior_fn(position)
tempered_loglikelihood = state.lmbda * loglikelihood_fn(position)
return logprior + tempered_loglikelihood
smc_state, info = update_particles(
rng_key,
state,
num_mcmc_steps,
mcmc_parameters,
tempered_logposterior_fn,
log_weights_fn,
)
tempered_state = TemperedSMCState(
smc_state.particles, smc_state.weights, state.lmbda + delta
)
return tempered_state, info
|
Move the particles one step using the Tempered SMC algorithm.
Parameters
----------
rng_key
JAX PRNGKey for randomness
state
Current state of the tempered SMC algorithm
lmbda
Current value of the tempering parameter
mcmc_parameters
The parameters of the MCMC step function. Parameters with leading dimension
length of 1 are shared amongst the particles.
Returns
-------
state
The new state of the tempered SMC algorithm
info
Additional information on the SMC step
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/tempered.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/tempered.py
|
Apache-2.0
|
def as_top_level_api(
logprior_fn: Callable,
loglikelihood_fn: Callable,
mcmc_step_fn: Callable,
mcmc_init_fn: Callable,
mcmc_parameters: dict,
resampling_fn: Callable,
num_mcmc_steps: Optional[int] = 10,
update_strategy=update_and_take_last,
update_particles_fn=None,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the Adaptive Tempered SMC kernel.
Parameters
----------
logprior_fn
The log-prior function of the model we wish to draw samples from.
loglikelihood_fn
The log-likelihood function of the model we wish to draw samples from.
mcmc_step_fn
The MCMC step function used to update the particles.
mcmc_init_fn
The MCMC init function used to build a MCMC state from a particle position.
mcmc_parameters
The parameters of the MCMC step function. Parameters with leading dimension
length of 1 are shared amongst the particles.
resampling_fn
The function used to resample the particles.
num_mcmc_steps
The number of times the MCMC kernel is applied to the particles per step.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(
logprior_fn,
loglikelihood_fn,
mcmc_step_fn,
mcmc_init_fn,
resampling_fn,
update_strategy,
update_particles_fn,
)
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position)
def step_fn(rng_key: PRNGKey, state, lmbda):
return kernel(
rng_key,
state,
num_mcmc_steps,
lmbda,
mcmc_parameters,
)
return SamplingAlgorithm(init_fn, step_fn) # type: ignore[arg-type]
|
Implements the (basic) user interface for the Adaptive Tempered SMC kernel.
Parameters
----------
logprior_fn
The log-prior function of the model we wish to draw samples from.
loglikelihood_fn
The log-likelihood function of the model we wish to draw samples from.
mcmc_step_fn
The MCMC step function used to update the particles.
mcmc_init_fn
The MCMC init function used to build a MCMC state from a particle position.
mcmc_parameters
The parameters of the MCMC step function. Parameters with leading dimension
length of 1 are shared amongst the particles.
resampling_fn
The function used to resample the particles.
num_mcmc_steps
The number of times the MCMC kernel is applied to the particles per step.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/tempered.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/tempered.py
|
Apache-2.0
|
def update_waste_free(
mcmc_init_fn,
logposterior_fn,
mcmc_step_fn,
n_particles: int,
p: int,
num_resampled,
num_mcmc_steps=None,
):
"""
Given M particles, mutates them using p-1 steps. Returns M*P-1 particles,
consistent of the initial plus all the intermediate steps, thus implementing a
waste-free update function
See Algorithm 2: https://arxiv.org/abs/2011.02328
"""
if num_mcmc_steps is not None:
raise ValueError(
"Can't use waste free SMC with a num_mcmc_steps parameter, set num_mcmc_steps = None"
)
num_mcmc_steps = p - 1
def mcmc_kernel(rng_key, position, step_parameters):
state = mcmc_init_fn(position, logposterior_fn)
def body_fn(state, rng_key):
new_state, info = mcmc_step_fn(
rng_key, state, logposterior_fn, **step_parameters
)
return new_state, (new_state, info)
_, (states, infos) = jax.lax.scan(
body_fn, state, jax.random.split(rng_key, num_mcmc_steps)
)
return states, infos
def update(rng_key, position, step_parameters):
"""
Given the initial particles, runs a chain starting at each.
The combines the initial particles with all the particles generated
at each step of each chain.
"""
states, infos = jax.vmap(mcmc_kernel)(rng_key, position, step_parameters)
# step particles is num_resmapled, num_mcmc_steps, dimension_of_variable
# want to transformed into num_resampled * num_mcmc_steps, dimension of variable
def reshape_step_particles(x):
_num_resampled, num_mcmc_steps, *dimension_of_variable = x.shape
return x.reshape((_num_resampled * num_mcmc_steps, *dimension_of_variable))
step_particles = jax.tree.map(reshape_step_particles, states.position)
new_particles = jax.tree.map(
lambda x, y: jnp.concatenate([x, y]), position, step_particles
)
return new_particles, infos
return update, num_resampled
|
Given M particles, mutates them using p-1 steps. Returns M*P-1 particles,
consistent of the initial plus all the intermediate steps, thus implementing a
waste-free update function
See Algorithm 2: https://arxiv.org/abs/2011.02328
|
update_waste_free
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/waste_free.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/waste_free.py
|
Apache-2.0
|
def update(rng_key, position, step_parameters):
"""
Given the initial particles, runs a chain starting at each.
The combines the initial particles with all the particles generated
at each step of each chain.
"""
states, infos = jax.vmap(mcmc_kernel)(rng_key, position, step_parameters)
# step particles is num_resmapled, num_mcmc_steps, dimension_of_variable
# want to transformed into num_resampled * num_mcmc_steps, dimension of variable
def reshape_step_particles(x):
_num_resampled, num_mcmc_steps, *dimension_of_variable = x.shape
return x.reshape((_num_resampled * num_mcmc_steps, *dimension_of_variable))
step_particles = jax.tree.map(reshape_step_particles, states.position)
new_particles = jax.tree.map(
lambda x, y: jnp.concatenate([x, y]), position, step_particles
)
return new_particles, infos
|
Given the initial particles, runs a chain starting at each.
The combines the initial particles with all the particles generated
at each step of each chain.
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/waste_free.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/waste_free.py
|
Apache-2.0
|
def update_scale_from_acceptance_rate(
scales: jax.Array,
acceptance_rates: jax.Array,
target_acceptance_rate: float = 0.234,
) -> jax.Array:
"""
Given N chains from some MCMC algorithm like Random Walk Metropolis
and N scale factors, each associated to a different chain.
Updates the scale factors taking into account acceptance rates and
the average acceptance rate.
Under certain assumptions it is known that the optimal acceptance rate
of Metropolis Hastings is 0.4 for 1 dimension and converges to
0.234 in infinite dimensions. In practice, 0.234 is a reasonable
assumption for 5 or more dimensions.
If certain chain is below optimal acceptance rate, its scale will decrease
and if its above, its scale will increase,
-------
Parameters
----------
scales
(n_chains) array consisting of N scale factors, associated to N markov chains
acceptance_rates
(n_chains) acceptance rate of the N markov chains
target_acceptance_rate
a float with a desirable acceptance rate for the chains.
Returns
-------
(n_chains) new scales, with the aim of getting acceptance rates closer to target
if the chains were to be run again.
"""
chain_scales = jnp.exp(jnp.log(scales) + acceptance_rates - target_acceptance_rate)
return 0.5 * (chain_scales + chain_scales.mean())
|
Given N chains from some MCMC algorithm like Random Walk Metropolis
and N scale factors, each associated to a different chain.
Updates the scale factors taking into account acceptance rates and
the average acceptance rate.
Under certain assumptions it is known that the optimal acceptance rate
of Metropolis Hastings is 0.4 for 1 dimension and converges to
0.234 in infinite dimensions. In practice, 0.234 is a reasonable
assumption for 5 or more dimensions.
If certain chain is below optimal acceptance rate, its scale will decrease
and if its above, its scale will increase,
-------
Parameters
----------
scales
(n_chains) array consisting of N scale factors, associated to N markov chains
acceptance_rates
(n_chains) acceptance rate of the N markov chains
target_acceptance_rate
a float with a desirable acceptance rate for the chains.
Returns
-------
(n_chains) new scales, with the aim of getting acceptance rates closer to target
if the chains were to be run again.
|
update_scale_from_acceptance_rate
|
python
|
blackjax-devs/blackjax
|
blackjax/smc/tuning/from_kernel_info.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/smc/tuning/from_kernel_info.py
|
Apache-2.0
|
def init(
position: ArrayLikeTree,
optimizer: GradientTransformation,
*optimizer_args,
**optimizer_kwargs,
) -> MFVIState:
"""Initialize the mean-field VI state."""
mu = jax.tree.map(jnp.zeros_like, position)
rho = jax.tree.map(lambda x: -2.0 * jnp.ones_like(x), position)
opt_state = optimizer.init((mu, rho))
return MFVIState(mu, rho, opt_state)
|
Initialize the mean-field VI state.
|
init
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/meanfield_vi.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/meanfield_vi.py
|
Apache-2.0
|
def step(
rng_key: PRNGKey,
state: MFVIState,
logdensity_fn: Callable,
optimizer: GradientTransformation,
num_samples: int = 5,
stl_estimator: bool = True,
) -> tuple[MFVIState, MFVIInfo]:
"""Approximate the target density using the mean-field approximation.
Parameters
----------
rng_key
Key for JAX's pseudo-random number generator.
init_state
Initial state of the mean-field approximation.
logdensity_fn
Function that represents the target log-density to approximate.
optimizer
Optax `GradientTransformation` to be used for optimization.
num_samples
The number of samples that are taken from the approximation
at each step to compute the Kullback-Leibler divergence between
the approximation and the target log-density.
stl_estimator
Whether to use stick-the-landing (STL) gradient estimator :cite:p:`roeder2017sticking` for gradient estimation.
The STL estimator has lower gradient variance by removing the score function term
from the gradient. It is suggested by :cite:p:`agrawal2020advances` to always keep it in order for better results.
"""
parameters = (state.mu, state.rho)
def kl_divergence_fn(parameters):
mu, rho = parameters
z = _sample(rng_key, mu, rho, num_samples)
if stl_estimator:
mu = jax.lax.stop_gradient(mu)
rho = jax.lax.stop_gradient(rho)
logq = jax.vmap(generate_meanfield_logdensity(mu, rho))(z)
logp = jax.vmap(logdensity_fn)(z)
return (logq - logp).mean()
elbo, elbo_grad = jax.value_and_grad(kl_divergence_fn)(parameters)
updates, new_opt_state = optimizer.update(elbo_grad, state.opt_state, parameters)
new_parameters = jax.tree.map(lambda p, u: p + u, parameters, updates)
new_state = MFVIState(new_parameters[0], new_parameters[1], new_opt_state)
return new_state, MFVIInfo(elbo)
|
Approximate the target density using the mean-field approximation.
Parameters
----------
rng_key
Key for JAX's pseudo-random number generator.
init_state
Initial state of the mean-field approximation.
logdensity_fn
Function that represents the target log-density to approximate.
optimizer
Optax `GradientTransformation` to be used for optimization.
num_samples
The number of samples that are taken from the approximation
at each step to compute the Kullback-Leibler divergence between
the approximation and the target log-density.
stl_estimator
Whether to use stick-the-landing (STL) gradient estimator :cite:p:`roeder2017sticking` for gradient estimation.
The STL estimator has lower gradient variance by removing the score function term
from the gradient. It is suggested by :cite:p:`agrawal2020advances` to always keep it in order for better results.
|
step
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/meanfield_vi.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/meanfield_vi.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
optimizer: GradientTransformation,
num_samples: int = 100,
):
"""High-level implementation of Mean-Field Variational Inference.
Parameters
----------
logdensity_fn
A function that represents the log-density function associated with
the distribution we want to sample from.
optimizer
Optax optimizer to use to optimize the ELBO.
num_samples
Number of samples to take at each step to optimize the ELBO.
Returns
-------
A ``VIAlgorithm``.
"""
def init_fn(position: ArrayLikeTree):
return init(position, optimizer)
def step_fn(rng_key: PRNGKey, state: MFVIState) -> tuple[MFVIState, MFVIInfo]:
return step(rng_key, state, logdensity_fn, optimizer, num_samples)
def sample_fn(rng_key: PRNGKey, state: MFVIState, num_samples: int):
return sample(rng_key, state, num_samples)
return VIAlgorithm(init_fn, step_fn, sample_fn)
|
High-level implementation of Mean-Field Variational Inference.
Parameters
----------
logdensity_fn
A function that represents the log-density function associated with
the distribution we want to sample from.
optimizer
Optax optimizer to use to optimize the ELBO.
num_samples
Number of samples to take at each step to optimize the ELBO.
Returns
-------
A ``VIAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/meanfield_vi.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/meanfield_vi.py
|
Apache-2.0
|
def approximate(
rng_key: PRNGKey,
logdensity_fn: Callable,
initial_position: ArrayLikeTree,
num_samples: int = 200,
*, # lgbfs parameters
maxiter=30,
maxcor=10,
maxls=1000,
gtol=1e-08,
ftol=1e-05,
**lbfgs_kwargs,
) -> tuple[PathfinderState, PathfinderInfo]:
"""Pathfinder variational inference algorithm.
Pathfinder locates normal approximations to the target density along a
quasi-Newton optimization path, with local covariance estimated using
the inverse Hessian estimates produced by the L-BFGS optimizer.
Function implements the algorithm 3 in :cite:p:`zhang2022pathfinder`:
Parameters
----------
rng_key
PRPNG key
logdensity_fn
(un-normalized) log densify function of target distribution to take
approximate samples from
initial_position
starting point of the L-BFGS optimization routine
num_samples
number of samples to draw to estimate ELBO
maxiter
Maximum number of iterations of the LGBFS algorithm.
maxcor
Maximum number of metric corrections of the LGBFS algorithm ("history
size")
ftol
The LGBFS algorithm terminates the minimization when `(f_k - f_{k+1}) <
ftol`
gtol
The LGBFS algorithm terminates the minimization when `|g_k|_norm < gtol`
maxls
The maximum number of line search steps (per iteration) for the LGBFS
algorithm
**lbfgs_kwargs
other keyword arguments passed to `jaxopt.LBFGS`.
Returns
-------
A PathfinderState with information on the iteration in the optimization path
whose approximate samples yields the highest ELBO, and PathfinderInfo that
contains all the states traversed.
"""
initial_position_flatten, unravel_fn = ravel_pytree(initial_position)
objective_fn = lambda x: -logdensity_fn(unravel_fn(x))
(_, status), history = _minimize_lbfgs(
objective_fn,
initial_position_flatten,
maxiter,
maxcor,
gtol,
ftol,
maxls,
**lbfgs_kwargs,
)
# Get postions and gradients of the optimization path (including the starting point).
position = history.x
grad_position = history.g
alpha = history.alpha
# Get the update of position and gradient.
update_mask = history.update_mask[1:]
s = jnp.diff(position, axis=0)
z = jnp.diff(grad_position, axis=0)
# Account for the mask
s_masked = jnp.where(update_mask, s, jnp.zeros_like(s))
z_masked = jnp.where(update_mask, z, jnp.zeros_like(z))
# Pad 0 to leading dimension so we have constant shape output
s_padded = jnp.pad(s_masked, ((maxcor, 0), (0, 0)), mode="constant")
z_padded = jnp.pad(z_masked, ((maxcor, 0), (0, 0)), mode="constant")
def path_finder_body_fn(rng_key, S, Z, alpha_l, theta, theta_grad):
"""The for loop body in Algorithm 1 of the Pathfinder paper."""
beta, gamma = lbfgs_inverse_hessian_factors(S.T, Z.T, alpha_l)
phi, logq = bfgs_sample(
rng_key=rng_key,
num_samples=num_samples,
position=theta,
grad_position=theta_grad,
alpha=alpha_l,
beta=beta,
gamma=gamma,
)
logp = -jax.vmap(objective_fn)(phi)
elbo = (logp - logq).mean() # Algorithm 7 of the paper
return elbo, beta, gamma
# Index and reshape S and Z to be sliding window view shape=(maxiter,
# maxcor, param_dim), so we can vmap over all the iterations.
# This is in effect numpy.lib.stride_tricks.sliding_window_view
path_size = maxiter + 1
index = jnp.arange(path_size)[:, None] + jnp.arange(maxcor)[None, :]
s_j = s_padded[index.reshape(path_size, maxcor)].reshape(path_size, maxcor, -1)
z_j = z_padded[index.reshape(path_size, maxcor)].reshape(path_size, maxcor, -1)
rng_keys = jax.random.split(rng_key, path_size)
elbo, beta, gamma = jax.vmap(path_finder_body_fn)(
rng_keys, s_j, z_j, alpha, position, grad_position
)
elbo = jnp.where(
(jnp.arange(path_size) < (status.iter_num)) & jnp.isfinite(elbo),
elbo,
-jnp.inf,
)
unravel_fn_mapped = jax.vmap(unravel_fn)
pathfinder_result = PathfinderState(
elbo,
unravel_fn_mapped(position),
unravel_fn_mapped(grad_position),
alpha,
beta,
gamma,
)
max_elbo_idx = jnp.argmax(elbo)
return jax.tree.map(lambda x: x[max_elbo_idx], pathfinder_result), PathfinderInfo(
pathfinder_result
)
|
Pathfinder variational inference algorithm.
Pathfinder locates normal approximations to the target density along a
quasi-Newton optimization path, with local covariance estimated using
the inverse Hessian estimates produced by the L-BFGS optimizer.
Function implements the algorithm 3 in :cite:p:`zhang2022pathfinder`:
Parameters
----------
rng_key
PRPNG key
logdensity_fn
(un-normalized) log densify function of target distribution to take
approximate samples from
initial_position
starting point of the L-BFGS optimization routine
num_samples
number of samples to draw to estimate ELBO
maxiter
Maximum number of iterations of the LGBFS algorithm.
maxcor
Maximum number of metric corrections of the LGBFS algorithm ("history
size")
ftol
The LGBFS algorithm terminates the minimization when `(f_k - f_{k+1}) <
ftol`
gtol
The LGBFS algorithm terminates the minimization when `|g_k|_norm < gtol`
maxls
The maximum number of line search steps (per iteration) for the LGBFS
algorithm
**lbfgs_kwargs
other keyword arguments passed to `jaxopt.LBFGS`.
Returns
-------
A PathfinderState with information on the iteration in the optimization path
whose approximate samples yields the highest ELBO, and PathfinderInfo that
contains all the states traversed.
|
approximate
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/pathfinder.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/pathfinder.py
|
Apache-2.0
|
def path_finder_body_fn(rng_key, S, Z, alpha_l, theta, theta_grad):
"""The for loop body in Algorithm 1 of the Pathfinder paper."""
beta, gamma = lbfgs_inverse_hessian_factors(S.T, Z.T, alpha_l)
phi, logq = bfgs_sample(
rng_key=rng_key,
num_samples=num_samples,
position=theta,
grad_position=theta_grad,
alpha=alpha_l,
beta=beta,
gamma=gamma,
)
logp = -jax.vmap(objective_fn)(phi)
elbo = (logp - logq).mean() # Algorithm 7 of the paper
return elbo, beta, gamma
|
The for loop body in Algorithm 1 of the Pathfinder paper.
|
path_finder_body_fn
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/pathfinder.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/pathfinder.py
|
Apache-2.0
|
def sample(
rng_key: PRNGKey,
state: PathfinderState,
num_samples: Union[int, tuple[()], tuple[int]] = (),
) -> ArrayTree:
"""Draw from the Pathfinder approximation of the target distribution.
Parameters
----------
rng_key
PRNG key
state
PathfinderState containing information for sampling
num_samples
Number of samples to draw
Returns
-------
Samples drawn from the approximate Pathfinder distribution
"""
position_flatten, unravel_fn = ravel_pytree(state.position)
grad_position_flatten, _ = ravel_pytree(state.grad_position)
phi, logq = bfgs_sample(
rng_key,
num_samples,
position_flatten,
grad_position_flatten,
state.alpha,
state.beta,
state.gamma,
)
if num_samples == ():
return unravel_fn(phi), logq
else:
return jax.vmap(unravel_fn)(phi), logq
|
Draw from the Pathfinder approximation of the target distribution.
Parameters
----------
rng_key
PRNG key
state
PathfinderState containing information for sampling
num_samples
Number of samples to draw
Returns
-------
Samples drawn from the approximate Pathfinder distribution
|
sample
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/pathfinder.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/pathfinder.py
|
Apache-2.0
|
def as_top_level_api(logdensity_fn: Callable) -> PathFinderAlgorithm:
"""Implements the (basic) user interface for the pathfinder kernel.
Pathfinder locates normal approximations to the target density along a
quasi-Newton optimization path, with local covariance estimated using
the inverse Hessian estimates produced by the L-BFGS optimizer.
Pathfinder returns draws from the approximation with the lowest estimated
Kullback-Leibler (KL) divergence to the true posterior.
Note: all the heavy processing in performed in the init function, step
function is just a drawing a sample from a normal distribution
Parameters
----------
logdensity_fn
A function that represents the log-density of the model we want
to sample from.
Returns
-------
A ``VISamplingAlgorithm``.
"""
def approximate_fn(
rng_key: PRNGKey,
position: ArrayLikeTree,
num_samples: int = 200,
**lbfgs_parameters,
):
return approximate(
rng_key, logdensity_fn, position, num_samples, **lbfgs_parameters
)
def sample_fn(rng_key: PRNGKey, state: PathfinderState, num_samples: int):
return sample(rng_key, state, num_samples)
return PathFinderAlgorithm(approximate_fn, sample_fn)
|
Implements the (basic) user interface for the pathfinder kernel.
Pathfinder locates normal approximations to the target density along a
quasi-Newton optimization path, with local covariance estimated using
the inverse Hessian estimates produced by the L-BFGS optimizer.
Pathfinder returns draws from the approximation with the lowest estimated
Kullback-Leibler (KL) divergence to the true posterior.
Note: all the heavy processing in performed in the init function, step
function is just a drawing a sample from a normal distribution
Parameters
----------
logdensity_fn
A function that represents the log-density of the model we want
to sample from.
Returns
-------
A ``VISamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/pathfinder.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/pathfinder.py
|
Apache-2.0
|
def init(
initial_particles: ArrayLikeTree,
kernel_parameters: dict[str, Any],
optimizer: optax.GradientTransformation,
) -> SVGDState:
"""
Initializes Stein Variational Gradient Descent Algorithm.
Parameters
----------
initial_particles
Initial set of particles to start the optimization
kernel_paremeters
Arguments to the kernel function
optimizer
Optax compatible optimizer, which conforms to the `optax.GradientTransformation` protocol
"""
opt_state = optimizer.init(initial_particles)
return SVGDState(initial_particles, kernel_parameters, opt_state)
|
Initializes Stein Variational Gradient Descent Algorithm.
Parameters
----------
initial_particles
Initial set of particles to start the optimization
kernel_paremeters
Arguments to the kernel function
optimizer
Optax compatible optimizer, which conforms to the `optax.GradientTransformation` protocol
|
init
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/svgd.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/svgd.py
|
Apache-2.0
|
def kernel(
state: SVGDState,
grad_logdensity_fn: Callable,
kernel: Callable,
**grad_params,
) -> SVGDState:
"""
Performs one step of Stein Variational Gradient Descent.
See Algorithm 1 of :cite:p:`liu2016stein`.
Parameters
----------
state
SVGDState object containing information about previous iteration
grad_logdensity_fn
gradient, or an estimate, of the target log density function to samples approximately from
kernel
positive semi definite kernel
**grad_params
additional parameters for `grad_logdensity_fn` function, for instance a minibatch parameter
on a gradient estimator.
Returns
-------
SVGDState containing new particles, optimizer state and kernel parameters.
"""
particles, kernel_params, opt_state = state
kernel = functools.partial(kernel, **kernel_params)
def phi_star_summand(particle, particle_):
gradient = grad_logdensity_fn(particle, **grad_params)
k, grad_k = jax.value_and_grad(kernel, argnums=0)(particle, particle_)
return jax.tree_util.tree_map(lambda g, gk: -(k * g) - gk, gradient, grad_k)
functional_gradient = jax.vmap(
lambda p_: jax.tree_util.tree_map(
lambda phi_star: phi_star.mean(axis=0),
jax.vmap(lambda p: phi_star_summand(p, p_))(particles),
)
)(particles)
updates, opt_state = optimizer.update(functional_gradient, opt_state, particles)
particles = optax.apply_updates(particles, updates)
return SVGDState(particles, kernel_params, opt_state)
|
Performs one step of Stein Variational Gradient Descent.
See Algorithm 1 of :cite:p:`liu2016stein`.
Parameters
----------
state
SVGDState object containing information about previous iteration
grad_logdensity_fn
gradient, or an estimate, of the target log density function to samples approximately from
kernel
positive semi definite kernel
**grad_params
additional parameters for `grad_logdensity_fn` function, for instance a minibatch parameter
on a gradient estimator.
Returns
-------
SVGDState containing new particles, optimizer state and kernel parameters.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/svgd.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/svgd.py
|
Apache-2.0
|
def update_median_heuristic(state: SVGDState) -> SVGDState:
"""Median heuristic for setting the bandwidth of RBF kernels.
A reasonable middle-ground for choosing the `length_scale` of the RBF kernel
is to pick the empirical median of the squared distance between particles.
This strategy is called the median heuristic.
"""
position, kernel_parameters, opt_state = state
return SVGDState(position, median_heuristic(kernel_parameters, position), opt_state)
|
Median heuristic for setting the bandwidth of RBF kernels.
A reasonable middle-ground for choosing the `length_scale` of the RBF kernel
is to pick the empirical median of the squared distance between particles.
This strategy is called the median heuristic.
|
update_median_heuristic
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/svgd.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/svgd.py
|
Apache-2.0
|
def as_top_level_api(
grad_logdensity_fn: Callable,
optimizer,
kernel: Callable = rbf_kernel,
update_kernel_parameters: Callable = update_median_heuristic,
):
"""Implements the (basic) user interface for the svgd algorithm :cite:p:`liu2016stein`.
Parameters
----------
grad_logdensity_fn
gradient, or an estimate, of the target log density function to samples approximately from
optimizer
Optax compatible optimizer, which conforms to the `optax.GradientTransformation` protocol
kernel
positive semi definite kernel
update_kernel_parameters
function that updates the kernel parameters given the current state of the particles
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel_ = build_kernel(optimizer)
def init_fn(
initial_position: ArrayLikeTree,
kernel_parameters: dict[str, Any] = {"length_scale": 1.0},
):
return init(initial_position, kernel_parameters, optimizer)
def step_fn(state, **grad_params):
state = kernel_(state, grad_logdensity_fn, kernel, **grad_params)
return update_kernel_parameters(state)
return SamplingAlgorithm(init_fn, step_fn) # type: ignore[arg-type]
|
Implements the (basic) user interface for the svgd algorithm :cite:p:`liu2016stein`.
Parameters
----------
grad_logdensity_fn
gradient, or an estimate, of the target log density function to samples approximately from
optimizer
Optax compatible optimizer, which conforms to the `optax.GradientTransformation` protocol
kernel
positive semi definite kernel
update_kernel_parameters
function that updates the kernel parameters given the current state of the particles
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/vi/svgd.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/vi/svgd.py
|
Apache-2.0
|
def test_hmc(self):
"""Count the number of times the logdensity is compiled when using HMC.
The logdensity is compiled twice: when initializing the state and when
compiling the kernel.
"""
@chex.assert_max_traces(n=2)
def logdensity_fn(x):
return jscipy.stats.norm.logpdf(x)
chex.clear_trace_counter()
rng_key = jax.random.key(0)
state = blackjax.hmc.init(1.0, logdensity_fn)
kernel = blackjax.hmc(
logdensity_fn,
step_size=1e-2,
inverse_mass_matrix=jnp.array([1.0]),
num_integration_steps=10,
)
step = jax.jit(kernel.step)
for i in range(10):
sample_key = jax.random.fold_in(rng_key, i)
state, _ = step(sample_key, state)
|
Count the number of times the logdensity is compiled when using HMC.
The logdensity is compiled twice: when initializing the state and when
compiling the kernel.
|
test_hmc
|
python
|
blackjax-devs/blackjax
|
tests/test_compilation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/test_compilation.py
|
Apache-2.0
|
def test_nuts(self):
"""Count the number of times the logdensity is compiled when using NUTS.
The logdensity is compiled twice: when initializing the state and when
compiling the kernel.
"""
@chex.assert_max_traces(n=2)
def logdensity_fn(x):
return jscipy.stats.norm.logpdf(x)
chex.clear_trace_counter()
rng_key = jax.random.key(0)
state = blackjax.nuts.init(1.0, logdensity_fn)
kernel = blackjax.nuts(
logdensity_fn, step_size=1e-2, inverse_mass_matrix=jnp.array([1.0])
)
step = jax.jit(kernel.step)
for i in range(10):
sample_key = jax.random.fold_in(rng_key, i)
state, _ = step(sample_key, state)
|
Count the number of times the logdensity is compiled when using NUTS.
The logdensity is compiled twice: when initializing the state and when
compiling the kernel.
|
test_nuts
|
python
|
blackjax-devs/blackjax
|
tests/test_compilation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/test_compilation.py
|
Apache-2.0
|
def test_hmc_warmup(self):
"""Count the number of times the logdensity is compiled when using window
adaptation to adapt the value of the step size and the inverse mass
matrix for the HMC algorithm.
"""
@chex.assert_max_traces(n=3)
def logdensity_fn(x):
return jscipy.stats.norm.logpdf(x)
chex.clear_trace_counter()
rng_key = jax.random.key(0)
warmup = blackjax.window_adaptation(
algorithm=blackjax.hmc,
logdensity_fn=logdensity_fn,
target_acceptance_rate=0.8,
num_integration_steps=10,
)
(state, parameters), _ = warmup.run(rng_key, 1.0, num_steps=100)
kernel = jax.jit(blackjax.hmc(logdensity_fn, **parameters).step)
for i in range(10):
sample_key = jax.random.fold_in(rng_key, i)
state, _ = kernel(sample_key, state)
|
Count the number of times the logdensity is compiled when using window
adaptation to adapt the value of the step size and the inverse mass
matrix for the HMC algorithm.
|
test_hmc_warmup
|
python
|
blackjax-devs/blackjax
|
tests/test_compilation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/test_compilation.py
|
Apache-2.0
|
def test_nuts_warmup(self):
"""Count the number of times the logdensity is compiled when using window
adaptation to adapt the value of the step size and the inverse mass
matrix for the NUTS algorithm.
"""
@chex.assert_max_traces(n=3)
def logdensity_fn(x):
return jscipy.stats.norm.logpdf(x)
chex.clear_trace_counter()
rng_key = jax.random.key(0)
warmup = blackjax.window_adaptation(
algorithm=blackjax.nuts,
logdensity_fn=logdensity_fn,
target_acceptance_rate=0.8,
)
(state, parameters), _ = warmup.run(rng_key, 1.0, num_steps=100)
step = jax.jit(blackjax.nuts(logdensity_fn, **parameters).step)
for i in range(10):
sample_key = jax.random.fold_in(rng_key, i)
state, _ = step(sample_key, state)
|
Count the number of times the logdensity is compiled when using window
adaptation to adapt the value of the step size and the inverse mass
matrix for the NUTS algorithm.
|
test_nuts_warmup
|
python
|
blackjax-devs/blackjax
|
tests/test_compilation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/test_compilation.py
|
Apache-2.0
|
def check_compatible(self, initial_state, progress_bar):
"""
Runs 10 steps with `run_inference_algorithm` starting with
`initial_state` and potentially a progress bar.
"""
_ = run_inference_algorithm(
rng_key=self.key,
initial_state=initial_state,
inference_algorithm=self.algorithm,
num_steps=self.num_steps,
progress_bar=progress_bar,
transform=lambda state, info: state.position,
)
|
Runs 10 steps with `run_inference_algorithm` starting with
`initial_state` and potentially a progress bar.
|
check_compatible
|
python
|
blackjax-devs/blackjax
|
tests/test_util.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/test_util.py
|
Apache-2.0
|
def test_preconditioning_matrix(self, seed):
"""Test two different ways of using pre-conditioning matrix has exactly same effect.
We follow the discussion in Appendix G of the Barker 2020 paper.
"""
key = jax.random.key(seed)
init_key, inference_key = jax.random.split(key, 2)
# setup some 2D multivariate normal model
# setup sampling mean and cov
true_x = jnp.array([0.0, 1.0])
data = jax.random.normal(init_key, shape=(1000,)) * true_x[1] + true_x[0]
assert data.shape == (1000,)
# some non-diagonal positive-defininte matrix for pre-conditioning
inv_mass_matrix = jnp.array([[1, 0.1], [0.1, 1]])
metric = metrics.default_metric(inv_mass_matrix)
# define barker kernel two ways
# non-scaled, use pre-conditioning
def logdensity(x, data):
mu_prior = stats.norm.logpdf(x[0], loc=0, scale=1)
sigma_prior = stats.uniform.logpdf(x[1], 0.0, 3.0)
return mu_prior + sigma_prior + jnp.sum(stats.norm.logcdf(data, x[0], x[1]))
logposterior_fn1 = functools.partial(logdensity, data=data)
barker1 = blackjax.barker_proposal(logposterior_fn1, 1e-1, inv_mass_matrix)
state1 = barker1.init(true_x)
# scaled, trivial pre-conditioning
def scaled_logdensity(x_scaled, data, metric):
x = metric.scale(x_scaled, x_scaled, inv=False, trans=False)
return logdensity(x, data)
logposterior_fn2 = functools.partial(
scaled_logdensity, data=data, metric=metric
)
barker2 = blackjax.barker_proposal(logposterior_fn2, 1e-1, jnp.eye(2))
true_x_trans = metric.scale(true_x, true_x, inv=True, trans=True)
state2 = barker2.init(true_x_trans)
n_steps = 10
_, states1 = run_inference_algorithm(
rng_key=inference_key,
initial_state=state1,
inference_algorithm=barker1,
transform=lambda state, info: state.position,
num_steps=n_steps,
)
_, states2 = run_inference_algorithm(
rng_key=inference_key,
initial_state=state2,
inference_algorithm=barker2,
transform=lambda state, info: state.position,
num_steps=n_steps,
)
# states should be the exact same with same random key after transforming
states2_trans = []
for ii in range(n_steps):
s = states2[ii]
states2_trans.append(metric.scale(s, s, inv=False, trans=False))
states2_trans = jnp.array(states2_trans)
assert jnp.allclose(states1, states2_trans)
|
Test two different ways of using pre-conditioning matrix has exactly same effect.
We follow the discussion in Appendix G of the Barker 2020 paper.
|
test_preconditioning_matrix
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_barker.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_barker.py
|
Apache-2.0
|
def HarmonicOscillator(inv_mass_matrix, k=1.0, m=1.0):
"""Potential and Kinetic energy of an harmonic oscillator."""
def neg_potential_energy(q):
return -jnp.sum(0.5 * k * jnp.square(q["x"]))
def kinetic_energy(p, position=None):
del position
v = jnp.multiply(inv_mass_matrix, p["x"])
return jnp.sum(0.5 * jnp.dot(v, p["x"]))
return neg_potential_energy, kinetic_energy
|
Potential and Kinetic energy of an harmonic oscillator.
|
HarmonicOscillator
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_integrators.py
|
Apache-2.0
|
def FreeFall(inv_mass_matrix, g=1.0):
"""Potential and kinetic energy of a free-falling object."""
def neg_potential_energy(q):
return -jnp.sum(g * q["x"])
def kinetic_energy(p, position=None):
del position
v = jnp.multiply(inv_mass_matrix, p["x"])
return jnp.sum(0.5 * jnp.dot(v, p["x"]))
return neg_potential_energy, kinetic_energy
|
Potential and kinetic energy of a free-falling object.
|
FreeFall
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_integrators.py
|
Apache-2.0
|
def PlanetaryMotion(inv_mass_matrix):
"""Potential and kinetic energy for planar planetary motion."""
def neg_potential_energy(q):
return 1.0 / jnp.power(q["x"] ** 2 + q["y"] ** 2, 0.5)
def kinetic_energy(p, position=None):
del position
z = jnp.stack([p["x"], p["y"]], axis=-1)
return 0.5 * jnp.dot(inv_mass_matrix, z**2)
return neg_potential_energy, kinetic_energy
|
Potential and kinetic energy for planar planetary motion.
|
PlanetaryMotion
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_integrators.py
|
Apache-2.0
|
def MultivariateNormal(inv_mass_matrix):
"""Potential and kinetic energy for a multivariate normal distribution."""
def log_density(q):
q, _ = ravel_pytree(q)
return stats.multivariate_normal.logpdf(q, jnp.zeros_like(q), inv_mass_matrix)
def kinetic_energy(p, position=None):
del position
p, _ = ravel_pytree(p)
return 0.5 * p.T @ inv_mass_matrix @ p
return log_density, kinetic_energy
|
Potential and kinetic energy for a multivariate normal distribution.
|
MultivariateNormal
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_integrators.py
|
Apache-2.0
|
def test_esh_momentum_update(self, dims):
"""
Test the numerically efficient version of the momentum update currently
implemented match the naive implementation according to the Equation 16 in
:cite:p:`robnik2023microcanonical`
"""
step_size = 1e-3
key0, key1 = jax.random.split(jax.random.key(62))
gradient = jax.random.uniform(key0, shape=(dims,))
momentum = jax.random.uniform(key1, shape=(dims,))
momentum /= jnp.linalg.norm(momentum)
# Navie implementation
gradient_norm = jnp.linalg.norm(gradient)
gradient_normalized = gradient / gradient_norm
delta = step_size * gradient_norm / (dims - 1)
next_momentum = (
momentum
+ gradient_normalized
* (
jnp.sinh(delta)
+ jnp.dot(gradient_normalized, momentum * (jnp.cosh(delta) - 1))
)
) / (jnp.cosh(delta) + jnp.dot(gradient_normalized, momentum * jnp.sinh(delta)))
# Efficient implementation
update_stable = self.variant(
esh_dynamics_momentum_update_one_step(inverse_mass_matrix=1.0)
)
next_momentum1, *_ = update_stable(momentum, gradient, step_size, 1.0)
np.testing.assert_array_almost_equal(next_momentum, next_momentum1)
|
Test the numerically efficient version of the momentum update currently
implemented match the naive implementation according to the Equation 16 in
:cite:p:`robnik2023microcanonical`
|
test_esh_momentum_update
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_integrators.py
|
Apache-2.0
|
def test_non_separable(self):
"""Test the integration of a non-separable Hamiltonian with a known
closed-form solution, as defined in https://arxiv.org/abs/1609.02212.
"""
def neg_potential(q):
return -0.5 * (q**2 + 1)
def kinetic_energy(p, position=None):
return 0.5 * p**2 * (1 + position**2)
step = self.variant(
integrators.implicit_midpoint(neg_potential, kinetic_energy)
)
step_size = 1e-3
q = jnp.array(-1.0)
p = jnp.array(0.0)
initial_state = integrators.IntegratorState(
q, p, neg_potential(q), jax.grad(neg_potential)(q)
)
def scan_body(state, _):
state = step(state, step_size)
return state, state
final_state, traj = jax.lax.scan(
scan_body,
initial_state,
xs=None,
length=10_000,
)
# The closed-form solution is computed as follows:
t = step_size * np.arange(len(traj.position))
expected = q * ellipj(t * np.sqrt(1 + q**2), q**2 / (1 + q**2))[1]
# Check that the trajectory matches the closed-form solution to
# acceptable precision
chex.assert_trees_all_close(traj.position, expected, atol=step_size)
# And check the conservation of energy
energy = -neg_potential(q) + kinetic_energy(p, position=q)
new_energy = -neg_potential(final_state.position) + kinetic_energy(
final_state.momentum, position=final_state.position
)
self.assertAlmostEqual(energy, new_energy, delta=1e-4)
|
Test the integration of a non-separable Hamiltonian with a known
closed-form solution, as defined in https://arxiv.org/abs/1609.02212.
|
test_non_separable
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_integrators.py
|
Apache-2.0
|
def test_invalid(self, shape, is_inv):
"""Test formatting raises error for invalid shapes"""
mass_matrix = jnp.zeros(shape=shape)
with self.assertRaisesRegex(
ValueError, "The mass matrix has the wrong number of dimensions"
):
metrics._format_covariance(mass_matrix, is_inv)
|
Test formatting raises error for invalid shapes
|
test_invalid
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_metrics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_metrics.py
|
Apache-2.0
|
def test_gaussian_euclidean_ndim_invalid(self, shape):
"""Test Gaussian Euclidean Function returns correct function invalid ndim"""
x = jnp.ones(shape=shape)
with self.assertRaisesRegex(
ValueError, "The mass matrix has the wrong number of dimensions"
):
_ = metrics.gaussian_euclidean(x)
|
Test Gaussian Euclidean Function returns correct function invalid ndim
|
test_gaussian_euclidean_ndim_invalid
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_metrics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_metrics.py
|
Apache-2.0
|
def test_gaussian_euclidean_dim_1(self):
"""Test Gaussian Euclidean Function with ndim 1"""
inverse_mass_matrix = jnp.asarray([1 / 4], dtype=self.dtype)
momentum, kinetic_energy, _, scale = metrics.gaussian_euclidean(
inverse_mass_matrix
)
arbitrary_position = jnp.asarray([12345], dtype=self.dtype)
momentum_val = self.variant(momentum)(self.key, arbitrary_position)
# 2 is square root inverse of 1/4
expected_momentum_val = 2 * random.normal(self.key)
kinetic_energy_val = self.variant(kinetic_energy)(momentum_val)
velocity = inverse_mass_matrix * momentum_val
expected_kinetic_energy_val = 0.5 * velocity * momentum_val
assert momentum_val == expected_momentum_val
assert kinetic_energy_val == expected_kinetic_energy_val
inv_scaled_momentum = scale(
arbitrary_position, momentum_val, inv=True, trans=False
)
scaled_momentum = scale(
arbitrary_position, momentum_val, inv=False, trans=False
)
expected_scaled_momentum = momentum_val / jnp.sqrt(inverse_mass_matrix)
expected_inv_scaled_momentum = momentum_val * jnp.sqrt(inverse_mass_matrix)
chex.assert_trees_all_close(inv_scaled_momentum, expected_inv_scaled_momentum)
chex.assert_trees_all_close(scaled_momentum, expected_scaled_momentum)
|
Test Gaussian Euclidean Function with ndim 1
|
test_gaussian_euclidean_dim_1
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_metrics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_metrics.py
|
Apache-2.0
|
def test_gaussian_euclidean_dim_2(self):
"""Test Gaussian Euclidean Function with ndim 2"""
inverse_mass_matrix = jnp.asarray(
[[2 / 3, 0.5], [0.5, 3 / 4]], dtype=self.dtype
)
momentum, kinetic_energy, _, scale = metrics.gaussian_euclidean(
inverse_mass_matrix
)
arbitrary_position = jnp.asarray([12345, 23456], dtype=self.dtype)
momentum_val = self.variant(momentum)(self.key, arbitrary_position)
L_inv = linalg.inv(linalg.cholesky(inverse_mass_matrix, lower=False))
expected_momentum_val = L_inv @ random.normal(self.key, shape=(2,))
kinetic_energy_val = self.variant(kinetic_energy)(momentum_val)
velocity = jnp.dot(inverse_mass_matrix, momentum_val)
expected_kinetic_energy_val = 0.5 * jnp.matmul(velocity, momentum_val)
np.testing.assert_allclose(expected_momentum_val, momentum_val)
np.testing.assert_allclose(kinetic_energy_val, expected_kinetic_energy_val)
inv_scaled_momentum = scale(
arbitrary_position, momentum_val, inv=True, trans=False
)
scaled_momentum = scale(
arbitrary_position, momentum_val, inv=False, trans=False
)
expected_inv_scaled_momentum = jnp.linalg.inv(L_inv).T @ momentum_val
expected_scaled_momentum = L_inv @ momentum_val
chex.assert_trees_all_close(inv_scaled_momentum, expected_inv_scaled_momentum)
chex.assert_trees_all_close(scaled_momentum, expected_scaled_momentum)
|
Test Gaussian Euclidean Function with ndim 2
|
test_gaussian_euclidean_dim_2
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_metrics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_metrics.py
|
Apache-2.0
|
def test_normal_univariate(self, initial_position):
"""
Move samples are generated in the univariate case,
with std following sigma, and independently of the position.
"""
keys = jax.random.split(self.key, 200)
proposal = normal(sigma=jnp.array([1.0]))
samples = [proposal(key, jnp.array([initial_position])) for key in keys]
self._check_mean_and_std(jnp.array([0.0]), jnp.array([1.0]), samples)
|
Move samples are generated in the univariate case,
with std following sigma, and independently of the position.
|
test_normal_univariate
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_proposal.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_proposal.py
|
Apache-2.0
|
def test_one_step_addition(self):
"""New position is an addition to previous position.
Since the density == 1, the proposal is accepted.
The random step may depend on the previous position
"""
rng_key = jax.random.key(0)
initial_position = jnp.array([50.0])
def random_step(key, position):
assert position == initial_position
return jnp.array([10.0])
def test_logdensity_accepts(position):
"""
a logdensity that ges maximized after the step
"""
return 0.0 if all(position > 59.0) else 0.5
step = build_additive_step()
new_state, _ = step(
rng_key,
RWState(position=initial_position, logdensity=1.0),
test_logdensity_accepts,
random_step,
)
np.testing.assert_allclose(new_state.position, jnp.array([60.0]))
assert new_state.position
|
New position is an addition to previous position.
Since the density == 1, the proposal is accepted.
The random step may depend on the previous position
|
test_one_step_addition
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_random_walk_without_chex.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_random_walk_without_chex.py
|
Apache-2.0
|
def test_proposal_is_independent_of_position(self):
"""New position does not depend on previous position"""
rng_key = jax.random.key(0)
initial_position = jnp.array([50.0])
other_position = jnp.array([15000.0])
step = build_irmh()
for previous_position in [initial_position, other_position]:
new_state, state_info = step(
rng_key,
RWState(position=previous_position, logdensity=1.0),
self.logdensity_accepts,
self.proposal_distribution,
)
np.testing.assert_allclose(new_state.position, jnp.array([10.0]))
np.testing.assert_allclose(state_info.acceptance_rate, 0.367879, rtol=1e-5)
|
New position does not depend on previous position
|
test_proposal_is_independent_of_position
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_random_walk_without_chex.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_random_walk_without_chex.py
|
Apache-2.0
|
def test_non_symmetric_proposal(self):
"""
Given that proposal_logdensity_fn is included,
thus the proposal is non-symmetric.
When computing the acceptance of the proposed state
Then proposal_logdensity_fn value is taken into account
"""
rng_key = jax.random.key(0)
initial_position = jnp.array([50.0])
def test_proposal_logdensity(new_state, prev_state):
return 0.1 if all(new_state.position - 10 < 1e-10) else 0.5
step = build_irmh()
for previous_position in [initial_position]:
_, state_info = step(
rng_key,
RWState(position=previous_position, logdensity=1.0),
self.logdensity_accepts,
self.proposal_distribution,
test_proposal_logdensity,
)
np.testing.assert_allclose(state_info.acceptance_rate, 0.246597)
|
Given that proposal_logdensity_fn is included,
thus the proposal is non-symmetric.
When computing the acceptance of the proposed state
Then proposal_logdensity_fn value is taken into account
|
test_non_symmetric_proposal
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_random_walk_without_chex.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_random_walk_without_chex.py
|
Apache-2.0
|
def test_generate_reject(self):
"""
Steps from previous state,
Builds a proposal from the new state
and given that the sampling rule rejects,
the prev_state is proposed again
"""
rng_key = jax.random.key(0)
prev_state = RWState(jnp.array([30.0]), 15.0)
generate = rmh_proposal(
logdensity_fn=lambda _: 50.0,
transition_distribution=self.transition_distribution,
compute_acceptance_ratio=self.compute_ratio,
sample_proposal=self.reject,
)
sampled_proposal, do_accept, p_accept = generate(rng_key, prev_state)
assert not do_accept
assert p_accept == 0.3
np.testing.assert_allclose(sampled_proposal.position, jnp.array([30.0]))
|
Steps from previous state,
Builds a proposal from the new state
and given that the sampling rule rejects,
the prev_state is proposed again
|
test_generate_reject
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_random_walk_without_chex.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_random_walk_without_chex.py
|
Apache-2.0
|
def test_window_adaptation(
self, case, is_mass_matrix_diagonal, window_adapt_config
):
"""Test the HMC kernel and the Stan warmup."""
rng_key, init_key0, init_key1 = jax.random.split(self.key, 3)
x_data = jax.random.normal(init_key0, shape=(1000, 1))
y_data = 3 * x_data + jax.random.normal(init_key1, shape=x_data.shape)
logposterior_fn_ = functools.partial(
self.regression_logprob, x=x_data, preds=y_data
)
logposterior_fn = lambda x: logposterior_fn_(**x)
warmup_key, inference_key = jax.random.split(rng_key, 2)
warmup = blackjax.window_adaptation(
case["algorithm"],
logposterior_fn,
is_mass_matrix_diagonal,
progress_bar=True,
adaptation_info_fn=window_adapt_config["filter_fn"],
**case["parameters"],
)
(state, parameters), info = warmup.run(
warmup_key,
case["initial_position"],
case["num_warmup_steps"],
)
inference_algorithm = case["algorithm"](logposterior_fn, **parameters)
def check_attrs(attribute, keyset):
for name, param in getattr(info, attribute)._asdict().items():
if name in keyset:
assert param is not None
else:
assert param is None
keysets = window_adapt_config["return_sets"]
if keysets is None:
keysets = (
info.state._fields,
info.info._fields,
info.adaptation_state._fields,
)
for i, attribute in enumerate(["state", "info", "adaptation_state"]):
check_attrs(attribute, keysets[i])
_, (states, _) = run_inference_algorithm(
rng_key=inference_key,
initial_state=state,
inference_algorithm=inference_algorithm,
num_steps=case["num_sampling_steps"],
)
coefs_samples = states.position["coefs"]
scale_samples = np.exp(states.position["log_scale"])
np.testing.assert_allclose(np.mean(scale_samples), 1.0, atol=1e-1)
np.testing.assert_allclose(np.mean(coefs_samples), 3.0, atol=1e-1)
|
Test the HMC kernel and the Stan warmup.
|
test_window_adaptation
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_sampling.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_sampling.py
|
Apache-2.0
|
def __init__(self, d, condition_number):
"""numpy_seed is used to generate a random rotation for the covariance matrix.
If None, the covariance matrix is diagonal."""
self.ndims = d
self.name = "IllConditionedGaussian"
self.condition_number = condition_number
eigs = jnp.logspace(
-0.5 * jnp.log10(condition_number),
0.5 * jnp.log10(condition_number),
d,
)
self.E_x2 = eigs
self.R = jnp.eye(d)
self.Hessian = jnp.diag(1 / eigs)
self.Cov = jnp.diag(eigs)
self.Var_x2 = 2 * jnp.square(self.E_x2)
self.logdensity_fn = lambda x: -0.5 * x.T @ self.Hessian @ x
self.transform = lambda x: x
self.sample_init = lambda key: jax.random.normal(
key, shape=(self.ndims,)
) * jnp.max(jnp.sqrt(eigs))
|
numpy_seed is used to generate a random rotation for the covariance matrix.
If None, the covariance matrix is diagonal.
|
__init__
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_sampling.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_sampling.py
|
Apache-2.0
|
def test_pathfinder_adaptation(
self,
algorithm,
num_warmup_steps,
initial_position,
num_sampling_steps,
parameters,
):
"""Test the HMC kernel and the Stan warmup."""
rng_key, init_key0, init_key1 = jax.random.split(self.key, 3)
x_data = jax.random.normal(init_key0, shape=(1000, 1))
y_data = 3 * x_data + jax.random.normal(init_key1, shape=x_data.shape)
logposterior_fn_ = functools.partial(
self.regression_logprob, x=x_data, preds=y_data
)
logposterior_fn = lambda x: logposterior_fn_(**x)
warmup_key, inference_key = jax.random.split(rng_key, 2)
warmup = blackjax.pathfinder_adaptation(
algorithm,
logposterior_fn,
**parameters,
)
(state, parameters), _ = warmup.run(
warmup_key,
initial_position,
num_warmup_steps,
)
inference_algorithm = algorithm(logposterior_fn, **parameters)
_, states = run_inference_algorithm(
rng_key=inference_key,
initial_state=state,
inference_algorithm=inference_algorithm,
num_steps=num_sampling_steps,
transform=lambda state, info: state.position,
)
coefs_samples = states["coefs"]
scale_samples = np.exp(states["log_scale"])
np.testing.assert_allclose(np.mean(scale_samples), 1.0, atol=1e-1)
np.testing.assert_allclose(np.mean(coefs_samples), 3.0, atol=1e-1)
|
Test the HMC kernel and the Stan warmup.
|
test_pathfinder_adaptation
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_sampling.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_sampling.py
|
Apache-2.0
|
def test_meads(self):
"""Test the MEADS adaptation w/ GHMC kernel."""
rng_key, init_key0, init_key1 = jax.random.split(self.key, 3)
x_data = jax.random.normal(init_key0, shape=(1000, 1))
y_data = 3 * x_data + jax.random.normal(init_key1, shape=x_data.shape)
logposterior_fn_ = functools.partial(
self.regression_logprob, x=x_data, preds=y_data
)
logposterior_fn = lambda x: logposterior_fn_(**x)
init_key, warmup_key, inference_key = jax.random.split(rng_key, 3)
num_chains = 128
warmup = blackjax.meads_adaptation(
logposterior_fn,
num_chains=num_chains,
)
scale_key, coefs_key = jax.random.split(init_key, 2)
log_scales = 1.0 + jax.random.normal(scale_key, (num_chains,))
coefs = 4.0 + jax.random.normal(coefs_key, (num_chains,))
initial_positions = {"log_scale": log_scales, "coefs": coefs}
(last_states, parameters), _ = warmup.run(
warmup_key,
initial_positions,
num_steps=1000,
)
inference_algorithm = blackjax.ghmc(logposterior_fn, **parameters)
chain_keys = jax.random.split(inference_key, num_chains)
_, states = jax.vmap(
lambda key, state: run_inference_algorithm(
rng_key=key,
initial_state=state,
inference_algorithm=inference_algorithm,
transform=lambda state, info: state.position,
num_steps=100,
)
)(chain_keys, last_states)
coefs_samples = states["coefs"]
scale_samples = np.exp(states["log_scale"])
np.testing.assert_allclose(np.mean(scale_samples), 1.0, atol=1e-1)
np.testing.assert_allclose(np.mean(coefs_samples), 3.0, atol=1e-1)
|
Test the MEADS adaptation w/ GHMC kernel.
|
test_meads
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_sampling.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_sampling.py
|
Apache-2.0
|
def test_chees(self, jitter_generator):
"""Test the ChEES adaptation w/ HMC kernel."""
rng_key, init_key0, init_key1 = jax.random.split(self.key, 3)
x_data = jax.random.normal(init_key0, shape=(1000, 1))
y_data = 3 * x_data + jax.random.normal(init_key1, shape=x_data.shape)
logposterior_fn_ = functools.partial(
self.regression_logprob, x=x_data, preds=y_data
)
logposterior_fn = lambda x: logposterior_fn_(**x)
init_key, warmup_key, inference_key = jax.random.split(rng_key, 3)
num_chains = 128
warmup = blackjax.chees_adaptation(
logposterior_fn, num_chains=num_chains, jitter_generator=jitter_generator
)
scale_key, coefs_key = jax.random.split(init_key, 2)
log_scales = 1.0 + jax.random.normal(scale_key, (num_chains,))
coefs = 4.0 + jax.random.normal(coefs_key, (num_chains,))
initial_positions = {"log_scale": log_scales, "coefs": coefs}
(last_states, parameters), _ = warmup.run(
warmup_key,
initial_positions,
step_size=0.001,
optim=optax.adam(learning_rate=0.1),
num_steps=1000,
)
inference_algorithm = blackjax.dynamic_hmc(logposterior_fn, **parameters)
chain_keys = jax.random.split(inference_key, num_chains)
_, states = jax.vmap(
lambda key, state: run_inference_algorithm(
rng_key=key,
initial_state=state,
inference_algorithm=inference_algorithm,
transform=lambda state, info: state.position,
num_steps=100,
)
)(chain_keys, last_states)
coefs_samples = states["coefs"]
scale_samples = np.exp(states["log_scale"])
np.testing.assert_allclose(np.mean(scale_samples), 1.0, atol=1e-1)
np.testing.assert_allclose(np.mean(coefs_samples), 3.0, atol=1e-1)
|
Test the ChEES adaptation w/ HMC kernel.
|
test_chees
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_sampling.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_sampling.py
|
Apache-2.0
|
def generate_multivariate_target(self, rng=None):
"""Genrate a Multivariate Normal distribution as target."""
if rng is None:
loc = jnp.array([0.0, 3])
scale = jnp.array([1.0, 2.0])
rho = jnp.array(0.75)
else:
loc_rng, scale_rng, rho_rng = jax.random.split(rng, 3)
loc = jax.random.normal(loc_rng, [2]) * 10.0
scale = jnp.abs(jax.random.normal(scale_rng, [2])) * 2.5
rho = jax.random.uniform(rho_rng, [], minval=-1.0, maxval=1.0)
cov = jnp.diag(scale**2)
cov = cov.at[0, 1].set(rho * scale[0] * scale[1])
cov = cov.at[1, 0].set(rho * scale[0] * scale[1])
def logdensity_fn(x):
return stats.multivariate_normal.logpdf(x, loc, cov).sum()
return logdensity_fn, loc, scale, rho, cov
|
Genrate a Multivariate Normal distribution as target.
|
generate_multivariate_target
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_sampling.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_sampling.py
|
Apache-2.0
|
def test_mcse(self, algorithm, parameters, is_mass_matrix_diagonal):
"""Test convergence using Monte Carlo CLT across multiple chains."""
pos_init_key, sample_key = jax.random.split(self.key)
(
logdensity_fn,
true_loc,
true_scale,
true_rho,
true_cov,
) = self.generate_multivariate_target(None)
if is_mass_matrix_diagonal is not None:
if is_mass_matrix_diagonal:
inverse_mass_matrix = true_scale**2
else:
inverse_mass_matrix = true_cov
inference_algorithm = algorithm(
logdensity_fn,
inverse_mass_matrix=inverse_mass_matrix,
**parameters,
)
else:
inference_algorithm = algorithm(logdensity_fn, **parameters)
num_chains = 10
initial_positions = jax.random.normal(pos_init_key, [num_chains, 2])
initial_states = jax.vmap(inference_algorithm.init, in_axes=(0,))(
initial_positions
)
multi_chain_sample_key = jax.random.split(sample_key, num_chains)
inference_loop_multiple_chains = jax.vmap(
functools.partial(
run_inference_algorithm,
inference_algorithm=inference_algorithm,
transform=lambda state, info: state.position,
num_steps=2_000,
)
)
_, states = inference_loop_multiple_chains(
rng_key=multi_chain_sample_key, initial_state=initial_states
)
posterior_samples = states[:, -1000:]
posterior_delta = posterior_samples - true_loc
posterior_variance = posterior_delta**2.0
posterior_correlation = jnp.prod(posterior_delta, axis=-1, keepdims=True) / (
true_scale[0] * true_scale[1]
)
_ = jax.tree.map(
self.mcse_test,
[posterior_samples, posterior_variance, posterior_correlation],
[true_loc, true_scale**2, true_rho],
)
|
Test convergence using Monte Carlo CLT across multiple chains.
|
test_mcse
|
python
|
blackjax-devs/blackjax
|
tests/mcmc/test_sampling.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/mcmc/test_sampling.py
|
Apache-2.0
|
def test_dual_averaging(self):
"""We test the dual averaging algorithm by searching for the point that
minimizes the gradient of a simple function.
"""
# we need to wrap the gradient in a namedtuple as we optimize for a target
# acceptance probability in the context of HMC.
f = lambda x: (x - 1) ** 2
grad_f = jax.jit(jax.grad(f))
# Our target gradient is 0. we increase the rate of convergence by
# increasing the value of gamma (see documentation of the algorithm).
init, update, final = dual_averaging(gamma=0.3)
unpdate_fn = self.variant(update)
da_state = init(3)
for _ in range(100):
x = jnp.exp(da_state.log_x)
g = grad_f(x)
da_state = unpdate_fn(da_state, g)
self.assertAlmostEqual(final(da_state), 1.0, delta=1e-1)
|
We test the dual averaging algorithm by searching for the point that
minimizes the gradient of a simple function.
|
test_dual_averaging
|
python
|
blackjax-devs/blackjax
|
tests/optimizers/test_optimizers.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/optimizers/test_optimizers.py
|
Apache-2.0
|
def test_minimize_lbfgs(self, maxiter, maxcor):
"""Test if dot product between approximate inverse hessian and gradient is
the same between two loop recursion algorthm of LBFGS and formulas of the
pathfinder paper"""
def regression_logprob(log_scale, coefs, preds, x):
"""Linear regression"""
scale = jnp.exp(log_scale)
scale_prior = stats.expon.logpdf(scale, 0, 1) + log_scale
coefs_prior = stats.norm.logpdf(coefs, 0, 5)
y = jnp.dot(x, coefs)
logpdf = stats.norm.logpdf(preds, y, scale)
return sum(x.sum() for x in [scale_prior, coefs_prior, logpdf])
def regression_model(key):
init_key0, init_key1 = jax.random.split(key, 2)
x_data = jax.random.normal(init_key0, shape=(10_000, 1))
y_data = 3 * x_data + jax.random.normal(init_key1, shape=x_data.shape)
logposterior_fn_ = functools.partial(
regression_logprob, x=x_data, preds=y_data
)
logposterior_fn = lambda x: logposterior_fn_(**x)
return logposterior_fn
fn = regression_model(self.key)
b0 = {"log_scale": 0.0, "coefs": 2.0}
b0_flatten, unravel_fn = ravel_pytree(b0)
objective_fn = lambda x: -fn(unravel_fn(x))
(_, status), history = self.variant(
functools.partial(
minimize_lbfgs, objective_fn, maxiter=maxiter, maxcor=maxcor
)
)(b0_flatten)
history = jax.tree.map(lambda x: x[: status.iter_num + 1], history)
# Test recover alpha
S = jnp.diff(history.x, axis=0)
Z = jnp.diff(history.g, axis=0)
alpha0 = history.alpha[0]
def scan_fn(alpha, val):
alpha_l, mask_l = lbfgs_recover_alpha(alpha, *val)
return alpha_l, (alpha_l, mask_l)
_, (alpha, mask) = jax.lax.scan(scan_fn, alpha0, (S, Z))
np.testing.assert_array_almost_equal(alpha, history.alpha[1:])
np.testing.assert_array_equal(mask, history.update_mask[1:])
# Test inverse hessian product
S_partial = S[-maxcor:].T
Z_partial = Z[-maxcor:].T
alpha = history.alpha[-1]
beta, gamma = lbfgs_inverse_hessian_factors(S_partial, Z_partial, alpha)
inv_hess_1 = lbfgs_inverse_hessian_formula_1(alpha, beta, gamma)
inv_hess_2 = lbfgs_inverse_hessian_formula_2(alpha, beta, gamma)
gamma = compute_gamma(S_partial, Z_partial, -1)
pk = inv_hessian_product(
-history.g[-1],
status.s_history,
status.y_history,
status.rho_history,
gamma,
status.iter_num % maxcor,
)
np.testing.assert_allclose(pk, -inv_hess_1 @ history.g[-1], atol=1e-3)
np.testing.assert_allclose(pk, -inv_hess_2 @ history.g[-1], atol=1e-3)
|
Test if dot product between approximate inverse hessian and gradient is
the same between two loop recursion algorthm of LBFGS and formulas of the
pathfinder paper
|
test_minimize_lbfgs
|
python
|
blackjax-devs/blackjax
|
tests/optimizers/test_optimizers.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/optimizers/test_optimizers.py
|
Apache-2.0
|
def test_recover_diag_inv_hess(self):
"Compare inverse Hessian estimation from LBFGS with known groundtruth."
nd = 5
mean = np.linspace(3.0, 50.0, nd)
cov = np.diag(np.linspace(1.0, 10.0, nd))
def loss_fn(x):
return -stats.multivariate_normal.logpdf(x, mean, cov)
(result, status), history = self.variant(
functools.partial(minimize_lbfgs, loss_fn, maxiter=50)
)(np.zeros(nd))
history = jax.tree.map(lambda x: x[: status.iter_num + 1], history)
np.testing.assert_allclose(result, mean, rtol=0.01)
S_partial = jnp.diff(history.x, axis=0)[-10:].T
Z_partial = jnp.diff(history.g, axis=0)[-10:].T
alpha = history.alpha[-1]
beta, gamma = lbfgs_inverse_hessian_factors(S_partial, Z_partial, alpha)
inv_hess_1 = lbfgs_inverse_hessian_formula_1(alpha, beta, gamma)
inv_hess_2 = lbfgs_inverse_hessian_formula_2(alpha, beta, gamma)
np.testing.assert_allclose(np.diag(inv_hess_1), np.diag(cov), rtol=0.01)
np.testing.assert_allclose(inv_hess_1, inv_hess_2, rtol=0.01)
|
Compare inverse Hessian estimation from LBFGS with known groundtruth.
|
test_recover_diag_inv_hess
|
python
|
blackjax-devs/blackjax
|
tests/optimizers/test_optimizers.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/optimizers/test_optimizers.py
|
Apache-2.0
|
def test_recover_posterior(self, ndim):
"""Test if pathfinder is able to estimate well enough the posterior of a
normal-normal conjugate model"""
def logp_posterior_conjugate_normal_model(
x, observed, prior_mu, prior_prec, true_prec
):
n = observed.shape[0]
posterior_cov = jnp.linalg.inv(prior_prec + n * true_prec)
posterior_mu = (
posterior_cov
@ (
prior_prec @ prior_mu[:, None]
+ n * true_prec @ observed.mean(0)[:, None]
)
)[:, 0]
return stats.multivariate_normal.logpdf(x, posterior_mu, posterior_cov)
def logp_unnormalized_posterior(x, observed, prior_mu, prior_prec, true_cov):
logp = 0.0
logp += stats.multivariate_normal.logpdf(x, prior_mu, prior_prec)
logp += stats.multivariate_normal.logpdf(observed, x, true_cov).sum()
return logp
rng_key_chol, rng_key_observed, rng_key_pathfinder = jax.random.split(
self.key, 3
)
L = jnp.tril(jax.random.normal(rng_key_chol, (ndim, ndim)))
true_mu = jnp.arange(ndim)
true_cov = L @ L.T
true_prec = jnp.linalg.pinv(true_cov)
prior_mu = jnp.zeros(ndim)
prior_prec = jnp.eye(ndim)
observed = jax.random.multivariate_normal(
rng_key_observed, true_mu, true_cov, shape=(10_000,)
)
logp_model = functools.partial(
logp_unnormalized_posterior,
observed=observed,
prior_mu=prior_mu,
prior_prec=prior_prec,
true_cov=true_cov,
)
x0 = jnp.ones(ndim)
pathfinder = blackjax.pathfinder(logp_model)
out, _ = self.variant(pathfinder.approximate)(rng_key_pathfinder, x0)
sim_p, log_p = bfgs_sample(
rng_key_pathfinder,
10_000,
out.position,
out.grad_position,
out.alpha,
out.beta,
out.gamma,
)
log_q = logp_posterior_conjugate_normal_model(
sim_p, observed, prior_mu, prior_prec, true_prec
)
kl = (log_p - log_q).mean()
# TODO(junpenglao): Make this test more robust.
self.assertAlmostEqual(kl, 0.0, delta=2.5)
|
Test if pathfinder is able to estimate well enough the posterior of a
normal-normal conjugate model
|
test_recover_posterior
|
python
|
blackjax-devs/blackjax
|
tests/optimizers/test_pathfinder.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/optimizers/test_pathfinder.py
|
Apache-2.0
|
def test_scale_when_aceptance_below_optimal(self):
"""
Given that the acceptance rate is below optimal,
the scale gets reduced.
"""
np.testing.assert_allclose(
update_scale_from_acceptance_rate(
scales=jnp.array([0.5]), acceptance_rates=jnp.array([0.2])
),
jnp.array([0.483286]),
rtol=1e-4,
)
|
Given that the acceptance rate is below optimal,
the scale gets reduced.
|
test_scale_when_aceptance_below_optimal
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_inner_kernel_tuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_inner_kernel_tuning.py
|
Apache-2.0
|
def test_scale_when_aceptance_above_optimal(self):
"""
Given that the acceptance rate is above optimal
the scale increases
-------
"""
np.testing.assert_allclose(
update_scale_from_acceptance_rate(
scales=jnp.array([0.5]), acceptance_rates=jnp.array([0.3])
),
jnp.array([0.534113]),
rtol=1e-4,
)
|
Given that the acceptance rate is above optimal
the scale increases
-------
|
test_scale_when_aceptance_above_optimal
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_inner_kernel_tuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_inner_kernel_tuning.py
|
Apache-2.0
|
def test_scale_mean_smoothes(self):
"""
The end result depends on the mean acceptance rate,
smoothing the results
"""
np.testing.assert_allclose(
update_scale_from_acceptance_rate(
scales=jnp.array([0.5, 0.5]), acceptance_rates=jnp.array([0.3, 0.2])
),
jnp.array([0.521406, 0.495993]),
rtol=1e-4,
)
|
The end result depends on the mean acceptance rate,
smoothing the results
|
test_scale_mean_smoothes
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_inner_kernel_tuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_inner_kernel_tuning.py
|
Apache-2.0
|
def test_tuning_pretuning(self):
"""
Tests that we can apply tuning on some parameters
and pretuning in some others at the same time.
"""
(
init_particles,
logprior_fn,
loglikelihood_fn,
) = self.particles_prior_loglikelihood()
n_particles = 100
dimentions = 2
step_size_key, integration_steps_key = jax.random.split(self.key, 2)
# Set initial samples for integration steps and step sizes.
integration_steps_distribution = jnp.round(
jax.random.uniform(
integration_steps_key, (n_particles,), minval=1, maxval=50
)
).astype(int)
step_sizes_distribution = jax.random.uniform(
step_size_key, (n_particles,), minval=1e-1 / 2, maxval=1e-1 * 2
)
# Fixes inverse_mass_matrix and distribution for the other two parameters.
initial_parameters = dict(
inverse_mass_matrix=extend_params(jnp.eye(dimentions)),
step_size=step_sizes_distribution,
num_integration_steps=integration_steps_distribution,
)
pretune = build_pretune(
blackjax.hmc.init,
blackjax.hmc.build_kernel(),
alpha=2,
n_particles=n_particles,
sigma_parameters={
"step_size": jnp.array(0.1),
"num_integration_steps": jnp.array(2.0),
},
natural_parameters=["num_integration_steps"],
positive_parameters=["step_size"],
)
def pretuning_factory(
logprior_fn,
loglikelihood_fn,
mcmc_step_fn,
mcmc_init_fn,
mcmc_parameters,
resampling_fn,
num_mcmc_steps,
initial_parameter_value,
target_ess,
):
# we need to wrap the pretuning into a factory, which is what
# the inner_kernel_tuning expects
return blackjax.pretuning(
blackjax.adaptive_tempered_smc,
logprior_fn,
loglikelihood_fn,
mcmc_step_fn,
mcmc_init_fn,
resampling_fn,
num_mcmc_steps,
initial_parameter_value,
pretune,
target_ess=target_ess,
)
def mcmc_parameter_update_fn(key, state, info):
imm = inverse_mass_matrix_from_particles(state.sampler_state.particles)
return {"inverse_mass_matrix": extend_params(imm)}
step = blackjax.smc.inner_kernel_tuning.build_kernel(
pretuning_factory,
logprior_fn,
loglikelihood_fn,
blackjax.hmc.build_kernel(),
blackjax.hmc.init,
resampling.systematic,
mcmc_parameter_update_fn=mcmc_parameter_update_fn,
initial_parameter_value=initial_parameters,
num_mcmc_steps=10,
target_ess=0.5,
smc_returns_state_with_parameter_override=True,
)
def init(position):
return blackjax.smc.inner_kernel_tuning.init(
blackjax.adaptive_tempered_smc.init, position, initial_parameters
)
init_state = init(init_particles)
smc_kernel = self.variant(step)
_, state = adaptive_tempered_loop(smc_kernel, self.key, init_state)
self.assert_linear_regression_test_case(state.sampler_state)
|
Tests that we can apply tuning on some parameters
and pretuning in some others at the same time.
|
test_tuning_pretuning
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_inner_kernel_tuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_inner_kernel_tuning.py
|
Apache-2.0
|
def test_measure_of_chain_mixing_identity(self):
"""
Given identity matrix and 1. acceptance probability
then the mixing is the square of norm 2.
"""
m = np.eye(2)
acceptance_probabilities = np.array([1.0, 1.0])
chain_mixing = esjd(m)(
self.previous_position, self.next_position, acceptance_probabilities
)
np.testing.assert_allclose(chain_mixing[0], 325)
np.testing.assert_allclose(chain_mixing[1], 100)
|
Given identity matrix and 1. acceptance probability
then the mixing is the square of norm 2.
|
test_measure_of_chain_mixing_identity
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_pretuning.py
|
Apache-2.0
|
def test_measure_of_chain_mixing_with_non_1_acceptance_rate(self):
"""
Given identity matrix
then the mixing is the square of norm 2. multiplied by the acceptance rate
"""
m = np.eye(2)
acceptance_probabilities = np.array([0.5, 0.2])
chain_mixing = esjd(m)(
self.previous_position, self.next_position, acceptance_probabilities
)
np.testing.assert_allclose(chain_mixing[0], 162.5)
np.testing.assert_allclose(chain_mixing[1], 20)
|
Given identity matrix
then the mixing is the square of norm 2. multiplied by the acceptance rate
|
test_measure_of_chain_mixing_with_non_1_acceptance_rate
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_pretuning.py
|
Apache-2.0
|
def test_update_param_distribution(self):
"""
Given an extremely good mixing on one chain,
and that the alpha parameter is 0, then the parameters
of that chain with a slight mutation due to noise are reused.
"""
(
new_parameter_distribution,
chain_mixing_measurement,
) = update_parameter_distribution(
self.key,
jnp.array([1.0, 2.0, 3.0]),
self.previous_position,
self.next_position,
measure_of_chain_mixing=lambda x, y, z: jnp.array([1.0, 0.0, 0.0]),
alpha=0,
sigma_parameters=0.0001,
acceptance_probability=None,
)
np.testing.assert_allclose(
new_parameter_distribution,
np.array([1, 1, 1], dtype="float32"),
rtol=1e-3,
)
np.testing.assert_allclose(
chain_mixing_measurement,
np.array([1, 0, 0], dtype="float32"),
rtol=1e-6,
)
|
Given an extremely good mixing on one chain,
and that the alpha parameter is 0, then the parameters
of that chain with a slight mutation due to noise are reused.
|
test_update_param_distribution
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_pretuning.py
|
Apache-2.0
|
def test_update_multi_sigmas(self):
"""
When we have multiple parameters, the performance is attached to its combination
so sampling must work accordingly.
"""
(
new_parameter_distribution,
chain_mixing_measurement,
) = update_parameter_distribution(
self.key,
{
"param_a": jnp.array([1.0, 2.0, 3.0]),
"param_b": jnp.array([[5.0, 6.0], [6.0, 7.0], [4.0, 5.0]]),
},
self.previous_position,
self.next_position,
measure_of_chain_mixing=lambda x, y, z: jnp.array([1.0, 0.0, 0.0]),
alpha=0,
sigma_parameters={"param_a": 0.0001, "param_b": 0.00001},
acceptance_probability=None,
)
print(chain_mixing_measurement)
np.testing.assert_allclose(chain_mixing_measurement, np.array([1.0, 0, 0]))
np.testing.assert_allclose(
new_parameter_distribution["param_a"], jnp.array([1.0, 1.0, 1.0]), atol=0.1
)
np.testing.assert_allclose(
new_parameter_distribution["param_b"],
jnp.array([[5.0, 6.0], [5.0, 6.0], [5.0, 6.0]]),
atol=0.1,
)
|
When we have multiple parameters, the performance is attached to its combination
so sampling must work accordingly.
|
test_update_multi_sigmas
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_pretuning.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_pretuning.py
|
Apache-2.0
|
def test_ess_solver_multivariate(self, target_ess):
"""
Posterior with more than one variable. Let's assume we want to
sample from P(x) x ~ N(mean, cov) x in R^{2}
"""
num_particles = 1000
mean = jnp.zeros((1, 2))
cov = jnp.diag(jnp.array([1, 1]))
_logdensity_fn = lambda pytree: multivariate_logpdf(pytree, mean=mean, cov=cov)
potential = jax.vmap(_logdensity_fn, in_axes=[0], out_axes=0)
particles = np.random.multivariate_normal(
mean=[0.0, 0.0], cov=[[1.0, 0.0], [0.0, 1.0]], size=num_particles
)
self.ess_solver_test_case(potential, particles, target_ess, num_particles, 10.0)
|
Posterior with more than one variable. Let's assume we want to
sample from P(x) x ~ N(mean, cov) x in R^{2}
|
test_ess_solver_multivariate
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_smc_ess.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_smc_ess.py
|
Apache-2.0
|
def test_ess_solver_posterior_signature(self, target_ess):
"""
Posterior with more than one variable. Let's assume we want to
sample from P(x,y) x ~ N(mean, cov) y ~ N(mean, cov)
"""
num_particles = 1000
mean = jnp.zeros((1, 2))
cov = jnp.diag(jnp.array([1, 1]))
def _logdensity_fn(pytree):
return multivariate_logpdf(
pytree[0], mean=mean, cov=cov
) + multivariate_logpdf(pytree[1], mean=mean, cov=cov)
potential = jax.vmap(_logdensity_fn, in_axes=[0], out_axes=0)
particles = [
np.random.multivariate_normal(
mean=[0.0, 0.0], cov=[[1.0, 0.0], [0.0, 1.0]], size=num_particles
),
np.random.multivariate_normal(
mean=[0.0, 0.0], cov=[[1.0, 0.0], [0.0, 1.0]], size=num_particles
),
]
self.ess_solver_test_case(potential, particles, target_ess, num_particles, 10.0)
|
Posterior with more than one variable. Let's assume we want to
sample from P(x,y) x ~ N(mean, cov) y ~ N(mean, cov)
|
test_ess_solver_posterior_signature
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_smc_ess.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_smc_ess.py
|
Apache-2.0
|
def normal_logdensity_fn(x, chol_cov):
"""minus log-density of a centered multivariate normal distribution"""
dim = chol_cov.shape[0]
y = jax.scipy.linalg.solve_triangular(chol_cov, x, lower=True)
normalizing_constant = (
np.sum(np.log(np.abs(np.diag(chol_cov)))) + dim * np.log(2 * np.pi) / 2.0
)
norm_y = jnp.sum(y * y, -1)
return -(0.5 * norm_y + normalizing_constant)
|
minus log-density of a centered multivariate normal distribution
|
normal_logdensity_fn
|
python
|
blackjax-devs/blackjax
|
tests/smc/test_tempered_smc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_tempered_smc.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.