code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def update(
adaptation_state: WindowAdaptationState,
adaptation_stage: tuple,
position: ArrayLikeTree,
acceptance_rate: float,
) -> WindowAdaptationState:
"""Update the adaptation state and parameter values.
Parameters
----------
adaptation_state
Current adptation state.
adaptation_stage
The current stage of the warmup: whether this is a slow window,
a fast window and if we are at the last step of a slow window.
position
Current value of the model parameters.
acceptance_rate
Value of the acceptance rate for the last mcmc step.
Returns
-------
The updated adaptation state.
"""
stage, is_middle_window_end = adaptation_stage
warmup_state = jax.lax.switch(
stage,
(fast_update, slow_update),
position,
acceptance_rate,
adaptation_state,
)
warmup_state = jax.lax.cond(
is_middle_window_end,
slow_final,
lambda x: x,
warmup_state,
)
return warmup_state
|
Update the adaptation state and parameter values.
Parameters
----------
adaptation_state
Current adptation state.
adaptation_stage
The current stage of the warmup: whether this is a slow window,
a fast window and if we are at the last step of a slow window.
position
Current value of the model parameters.
acceptance_rate
Value of the acceptance rate for the last mcmc step.
Returns
-------
The updated adaptation state.
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/window_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/window_adaptation.py
|
Apache-2.0
|
def final(warmup_state: WindowAdaptationState) -> tuple[float, Array]:
"""Return the final values for the step size and mass matrix."""
step_size = jnp.exp(warmup_state.ss_state.log_step_size_avg)
inverse_mass_matrix = warmup_state.imm_state.inverse_mass_matrix
return step_size, inverse_mass_matrix
|
Return the final values for the step size and mass matrix.
|
final
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/window_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/window_adaptation.py
|
Apache-2.0
|
def window_adaptation(
algorithm,
logdensity_fn: Callable,
is_mass_matrix_diagonal: bool = True,
initial_step_size: float = 1.0,
target_acceptance_rate: float = 0.80,
progress_bar: bool = False,
adaptation_info_fn: Callable = return_all_adapt_info,
integrator=mcmc.integrators.velocity_verlet,
**extra_parameters,
) -> AdaptationAlgorithm:
"""Adapt the value of the inverse mass matrix and step size parameters of
algorithms in the HMC fmaily. See Blackjax.hmc_family
Algorithms in the HMC family on a euclidean manifold depend on the value of
at least two parameters: the step size, related to the trajectory
integrator, and the mass matrix, linked to the euclidean metric.
Good tuning is very important, especially for algorithms like NUTS which can
be extremely inefficient with the wrong parameter values. This function
provides a general-purpose algorithm to tune the values of these parameters.
Originally based on Stan's window adaptation, the algorithm has evolved to
improve performance and quality.
Parameters
----------
algorithm
The algorithm whose parameters are being tuned.
logdensity_fn
The log density probability density function from which we wish to
sample.
is_mass_matrix_diagonal
Whether we should adapt a diagonal mass matrix.
initial_step_size
The initial step size used in the algorithm.
target_acceptance_rate
The acceptance rate that we target during step size adaptation.
progress_bar
Whether we should display a progress bar.
adaptation_info_fn
Function to select the adaptation info returned. See return_all_adapt_info
and get_filter_adapt_info_fn in blackjax.adaptation.base. By default all
information is saved - this can result in excessive memory usage if the
information is unused.
**extra_parameters
The extra parameters to pass to the algorithm, e.g. the number of
integration steps for HMC.
Returns
-------
A function that runs the adaptation and returns an `AdaptationResult` object.
"""
mcmc_kernel = algorithm.build_kernel(integrator)
adapt_init, adapt_step, adapt_final = base(
is_mass_matrix_diagonal,
target_acceptance_rate=target_acceptance_rate,
)
def one_step(carry, xs):
_, rng_key, adaptation_stage = xs
state, adaptation_state = carry
new_state, info = mcmc_kernel(
rng_key,
state,
logdensity_fn,
adaptation_state.step_size,
adaptation_state.inverse_mass_matrix,
**extra_parameters,
)
new_adaptation_state = adapt_step(
adaptation_state,
adaptation_stage,
new_state.position,
info.acceptance_rate,
)
return (
(new_state, new_adaptation_state),
adaptation_info_fn(new_state, info, new_adaptation_state),
)
def run(rng_key: PRNGKey, position: ArrayLikeTree, num_steps: int = 1000):
init_state = algorithm.init(position, logdensity_fn)
init_adaptation_state = adapt_init(position, initial_step_size)
if progress_bar:
print("Running window adaptation")
scan_fn = gen_scan_fn(num_steps, progress_bar=progress_bar)
start_state = (init_state, init_adaptation_state)
keys = jax.random.split(rng_key, num_steps)
schedule = build_schedule(num_steps)
last_state, info = scan_fn(
one_step,
start_state,
(jnp.arange(num_steps), keys, schedule),
)
last_chain_state, last_warmup_state, *_ = last_state
step_size, inverse_mass_matrix = adapt_final(last_warmup_state)
parameters = {
"step_size": step_size,
"inverse_mass_matrix": inverse_mass_matrix,
**extra_parameters,
}
return (
AdaptationResults(
last_chain_state,
parameters,
),
info,
)
return AdaptationAlgorithm(run)
|
Adapt the value of the inverse mass matrix and step size parameters of
algorithms in the HMC fmaily. See Blackjax.hmc_family
Algorithms in the HMC family on a euclidean manifold depend on the value of
at least two parameters: the step size, related to the trajectory
integrator, and the mass matrix, linked to the euclidean metric.
Good tuning is very important, especially for algorithms like NUTS which can
be extremely inefficient with the wrong parameter values. This function
provides a general-purpose algorithm to tune the values of these parameters.
Originally based on Stan's window adaptation, the algorithm has evolved to
improve performance and quality.
Parameters
----------
algorithm
The algorithm whose parameters are being tuned.
logdensity_fn
The log density probability density function from which we wish to
sample.
is_mass_matrix_diagonal
Whether we should adapt a diagonal mass matrix.
initial_step_size
The initial step size used in the algorithm.
target_acceptance_rate
The acceptance rate that we target during step size adaptation.
progress_bar
Whether we should display a progress bar.
adaptation_info_fn
Function to select the adaptation info returned. See return_all_adapt_info
and get_filter_adapt_info_fn in blackjax.adaptation.base. By default all
information is saved - this can result in excessive memory usage if the
information is unused.
**extra_parameters
The extra parameters to pass to the algorithm, e.g. the number of
integration steps for HMC.
Returns
-------
A function that runs the adaptation and returns an `AdaptationResult` object.
|
window_adaptation
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/window_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/window_adaptation.py
|
Apache-2.0
|
def build_schedule(
num_steps: int,
initial_buffer_size: int = 75,
final_buffer_size: int = 50,
first_window_size: int = 25,
) -> list[tuple[int, bool]]:
"""Return the schedule for Stan's warmup.
The schedule below is intended to be as close as possible to Stan's :cite:p:`stan_hmc_param`.
The warmup period is split into three stages:
1. An initial fast interval to reach the typical set. Only the step size is
adapted in this window.
2. "Slow" parameters that require global information (typically covariance)
are estimated in a series of expanding intervals with no memory; the step
size is re-initialized at the end of each window. Each window is twice the
size of the preceding window.
3. A final fast interval during which the step size is adapted using the
computed mass matrix.
Schematically:
```
+---------+---+------+------------+------------------------+------+
| fast | s | slow | slow | slow | fast |
+---------+---+------+------------+------------------------+------+
```
The distinction slow/fast comes from the speed at which the algorithms
converge to a stable value; in the common case, estimation of covariance
requires more steps than dual averaging to give an accurate value. See :cite:p:`stan_hmc_param`
for a more detailed explanation.
Fast intervals are given the label 0 and slow intervals the label 1.
Parameters
----------
num_steps: int
The number of warmup steps to perform.
initial_buffer: int
The width of the initial fast adaptation interval.
first_window_size: int
The width of the first slow adaptation interval.
final_buffer_size: int
The width of the final fast adaptation interval.
Returns
-------
A list of tuples (window_label, is_middle_window_end).
"""
schedule = []
# Give up on mass matrix adaptation when the number of warmup steps is too small.
if num_steps < 20:
schedule += [(0, False)] * num_steps
else:
# When the number of warmup steps is smaller that the sum of the provided (or default)
# window sizes we need to resize the different windows.
if initial_buffer_size + first_window_size + final_buffer_size > num_steps:
initial_buffer_size = int(0.15 * num_steps)
final_buffer_size = int(0.1 * num_steps)
first_window_size = num_steps - initial_buffer_size - final_buffer_size
# First stage: adaptation of fast parameters
schedule += [(0, False)] * (initial_buffer_size - 1)
schedule.append((0, False))
# Second stage: adaptation of slow parameters in successive windows
# doubling in size.
final_buffer_start = num_steps - final_buffer_size
next_window_size = first_window_size
next_window_start = initial_buffer_size
while next_window_start < final_buffer_start:
current_start, current_size = next_window_start, next_window_size
if 3 * current_size <= final_buffer_start - current_start:
next_window_size = 2 * current_size
else:
current_size = final_buffer_start - current_start
next_window_start = current_start + current_size
schedule += [(1, False)] * (next_window_start - 1 - current_start)
schedule.append((1, True))
# Last stage: adaptation of fast parameters
schedule += [(0, False)] * (num_steps - 1 - final_buffer_start)
schedule.append((0, False))
schedule = jnp.array(schedule)
return schedule
|
Return the schedule for Stan's warmup.
The schedule below is intended to be as close as possible to Stan's :cite:p:`stan_hmc_param`.
The warmup period is split into three stages:
1. An initial fast interval to reach the typical set. Only the step size is
adapted in this window.
2. "Slow" parameters that require global information (typically covariance)
are estimated in a series of expanding intervals with no memory; the step
size is re-initialized at the end of each window. Each window is twice the
size of the preceding window.
3. A final fast interval during which the step size is adapted using the
computed mass matrix.
Schematically:
```
+---------+---+------+------------+------------------------+------+
| fast | s | slow | slow | slow | fast |
+---------+---+------+------------+------------------------+------+
```
The distinction slow/fast comes from the speed at which the algorithms
converge to a stable value; in the common case, estimation of covariance
requires more steps than dual averaging to give an accurate value. See :cite:p:`stan_hmc_param`
for a more detailed explanation.
Fast intervals are given the label 0 and slow intervals the label 1.
Parameters
----------
num_steps: int
The number of warmup steps to perform.
initial_buffer: int
The width of the initial fast adaptation interval.
first_window_size: int
The width of the first slow adaptation interval.
final_buffer_size: int
The width of the final fast adaptation interval.
Returns
-------
A list of tuples (window_label, is_middle_window_end).
|
build_schedule
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/window_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/window_adaptation.py
|
Apache-2.0
|
def build_kernel(
logdensity_fn: Callable,
integrator: Callable = integrators.isokinetic_mclachlan,
divergence_threshold: float = 1000,
inverse_mass_matrix=1.0,
):
"""Build an MHMCHMC kernel where the number of integration steps is chosen randomly.
Parameters
----------
integrator
The integrator to use to integrate the Hamiltonian dynamics.
divergence_threshold
Value of the difference in energy above which we consider that the transition is divergent.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`. Needs to return an `int`.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def kernel(
rng_key: PRNGKey,
state: HMCState,
step_size: float,
num_integration_steps: int,
L_proposal_factor: float = jnp.inf,
) -> tuple[HMCState, HMCInfo]:
"""Generate a new sample with the MHMCHMC kernel."""
key_momentum, key_integrator = jax.random.split(rng_key, 2)
momentum = generate_unit_vector(key_momentum, state.position)
proposal, info, _ = adjusted_mclmc_proposal(
integrator=integrators.with_isokinetic_maruyama(
integrator(
logdensity_fn=logdensity_fn, inverse_mass_matrix=inverse_mass_matrix
)
),
step_size=step_size,
L_proposal_factor=L_proposal_factor * (num_integration_steps * step_size),
num_integration_steps=num_integration_steps,
divergence_threshold=divergence_threshold,
)(
key_integrator,
integrators.IntegratorState(
state.position, momentum, state.logdensity, state.logdensity_grad
),
)
return (
HMCState(
proposal.position,
proposal.logdensity,
proposal.logdensity_grad,
),
info,
)
return kernel
|
Build an MHMCHMC kernel where the number of integration steps is chosen randomly.
Parameters
----------
integrator
The integrator to use to integrate the Hamiltonian dynamics.
divergence_threshold
Value of the difference in energy above which we consider that the transition is divergent.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`. Needs to return an `int`.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/adjusted_mclmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/adjusted_mclmc.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: HMCState,
step_size: float,
num_integration_steps: int,
L_proposal_factor: float = jnp.inf,
) -> tuple[HMCState, HMCInfo]:
"""Generate a new sample with the MHMCHMC kernel."""
key_momentum, key_integrator = jax.random.split(rng_key, 2)
momentum = generate_unit_vector(key_momentum, state.position)
proposal, info, _ = adjusted_mclmc_proposal(
integrator=integrators.with_isokinetic_maruyama(
integrator(
logdensity_fn=logdensity_fn, inverse_mass_matrix=inverse_mass_matrix
)
),
step_size=step_size,
L_proposal_factor=L_proposal_factor * (num_integration_steps * step_size),
num_integration_steps=num_integration_steps,
divergence_threshold=divergence_threshold,
)(
key_integrator,
integrators.IntegratorState(
state.position, momentum, state.logdensity, state.logdensity_grad
),
)
return (
HMCState(
proposal.position,
proposal.logdensity,
proposal.logdensity_grad,
),
info,
)
|
Generate a new sample with the MHMCHMC kernel.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/adjusted_mclmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/adjusted_mclmc.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
step_size: float,
L_proposal_factor: float = jnp.inf,
inverse_mass_matrix=1.0,
*,
divergence_threshold: int = 1000,
integrator: Callable = integrators.isokinetic_mclachlan,
num_integration_steps,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the MHMCHMC kernel.
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the trajectory.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(
logdensity_fn=logdensity_fn,
integrator=integrator,
inverse_mass_matrix=inverse_mass_matrix,
divergence_threshold=divergence_threshold,
)
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, logdensity_fn)
def update_fn(rng_key: PRNGKey, state):
return kernel(
rng_key=rng_key,
state=state,
step_size=step_size,
num_integration_steps=num_integration_steps,
L_proposal_factor=L_proposal_factor,
)
return SamplingAlgorithm(init_fn, update_fn) # type: ignore[arg-type]
|
Implements the (basic) user interface for the MHMCHMC kernel.
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the trajectory.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/adjusted_mclmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/adjusted_mclmc.py
|
Apache-2.0
|
def adjusted_mclmc_proposal(
integrator: Callable,
step_size: Union[float, ArrayLikeTree],
L_proposal_factor: float,
num_integration_steps: int = 1,
divergence_threshold: float = 1000,
*,
sample_proposal: Callable = static_binomial_sampling,
) -> Callable:
"""Vanilla MHMCHMC algorithm.
The algorithm integrates the trajectory applying a integrator
`num_integration_steps` times in one direction to get a proposal and uses a
Metropolis-Hastings acceptance step to either reject or accept this
proposal. This is what people usually refer to when they talk about "the
HMC algorithm".
Parameters
----------
integrator
integrator used to build the trajectory step by step.
kinetic_energy
Function that computes the kinetic energy.
step_size
Size of the integration step.
num_integration_steps
Number of times we run the integrator to build the trajectory
divergence_threshold
Threshold above which we say that there is a divergence.
Returns
-------
A kernel that generates a new chain state and information about the transition.
"""
def step(i, vars):
state, kinetic_energy, rng_key = vars
rng_key, next_rng_key = jax.random.split(rng_key)
next_state, next_kinetic_energy = integrator(
state, step_size, L_proposal_factor, rng_key
)
return next_state, kinetic_energy + next_kinetic_energy, next_rng_key
def build_trajectory(state, num_integration_steps, rng_key):
return jax.lax.fori_loop(
0 * num_integration_steps, num_integration_steps, step, (state, 0, rng_key)
)
def generate(
rng_key, state: integrators.IntegratorState
) -> tuple[integrators.IntegratorState, HMCInfo, ArrayTree]:
"""Generate a new chain state."""
end_state, kinetic_energy, rng_key = build_trajectory(
state, num_integration_steps, rng_key
)
new_energy = -end_state.logdensity
delta_energy = -state.logdensity + end_state.logdensity - kinetic_energy
delta_energy = jnp.where(jnp.isnan(delta_energy), -jnp.inf, delta_energy)
is_diverging = -delta_energy > divergence_threshold
sampled_state, info = sample_proposal(rng_key, delta_energy, state, end_state)
do_accept, p_accept, other_proposal_info = info
info = HMCInfo(
state.momentum,
p_accept,
do_accept,
is_diverging,
new_energy,
end_state,
num_integration_steps,
)
return sampled_state, info, other_proposal_info
return generate
|
Vanilla MHMCHMC algorithm.
The algorithm integrates the trajectory applying a integrator
`num_integration_steps` times in one direction to get a proposal and uses a
Metropolis-Hastings acceptance step to either reject or accept this
proposal. This is what people usually refer to when they talk about "the
HMC algorithm".
Parameters
----------
integrator
integrator used to build the trajectory step by step.
kinetic_energy
Function that computes the kinetic energy.
step_size
Size of the integration step.
num_integration_steps
Number of times we run the integrator to build the trajectory
divergence_threshold
Threshold above which we say that there is a divergence.
Returns
-------
A kernel that generates a new chain state and information about the transition.
|
adjusted_mclmc_proposal
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/adjusted_mclmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/adjusted_mclmc.py
|
Apache-2.0
|
def build_kernel(
integration_steps_fn,
integrator: Callable = integrators.isokinetic_mclachlan,
divergence_threshold: float = 1000,
next_random_arg_fn: Callable = lambda key: jax.random.split(key)[1],
inverse_mass_matrix=1.0,
):
"""Build a Dynamic MHMCHMC kernel where the number of integration steps is chosen randomly.
Parameters
----------
integrator
The integrator to use to integrate the Hamiltonian dynamics.
divergence_threshold
Value of the difference in energy above which we consider that the transition is divergent.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`. Needs to return an `int`.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def kernel(
rng_key: PRNGKey,
state: DynamicHMCState,
logdensity_fn: Callable,
step_size: float,
L_proposal_factor: float = jnp.inf,
) -> tuple[DynamicHMCState, HMCInfo]:
"""Generate a new sample with the MHMCHMC kernel."""
num_integration_steps = integration_steps_fn(state.random_generator_arg)
key_momentum, key_integrator = jax.random.split(rng_key, 2)
momentum = generate_unit_vector(key_momentum, state.position)
proposal, info, _ = adjusted_mclmc_proposal(
integrator=integrators.with_isokinetic_maruyama(
integrator(
logdensity_fn=logdensity_fn, inverse_mass_matrix=inverse_mass_matrix
)
),
step_size=step_size,
L_proposal_factor=L_proposal_factor * (num_integration_steps * step_size),
num_integration_steps=num_integration_steps,
divergence_threshold=divergence_threshold,
)(
key_integrator,
integrators.IntegratorState(
state.position, momentum, state.logdensity, state.logdensity_grad
),
)
return (
DynamicHMCState(
proposal.position,
proposal.logdensity,
proposal.logdensity_grad,
next_random_arg_fn(state.random_generator_arg),
),
info,
)
return kernel
|
Build a Dynamic MHMCHMC kernel where the number of integration steps is chosen randomly.
Parameters
----------
integrator
The integrator to use to integrate the Hamiltonian dynamics.
divergence_threshold
Value of the difference in energy above which we consider that the transition is divergent.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`. Needs to return an `int`.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/adjusted_mclmc_dynamic.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/adjusted_mclmc_dynamic.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: DynamicHMCState,
logdensity_fn: Callable,
step_size: float,
L_proposal_factor: float = jnp.inf,
) -> tuple[DynamicHMCState, HMCInfo]:
"""Generate a new sample with the MHMCHMC kernel."""
num_integration_steps = integration_steps_fn(state.random_generator_arg)
key_momentum, key_integrator = jax.random.split(rng_key, 2)
momentum = generate_unit_vector(key_momentum, state.position)
proposal, info, _ = adjusted_mclmc_proposal(
integrator=integrators.with_isokinetic_maruyama(
integrator(
logdensity_fn=logdensity_fn, inverse_mass_matrix=inverse_mass_matrix
)
),
step_size=step_size,
L_proposal_factor=L_proposal_factor * (num_integration_steps * step_size),
num_integration_steps=num_integration_steps,
divergence_threshold=divergence_threshold,
)(
key_integrator,
integrators.IntegratorState(
state.position, momentum, state.logdensity, state.logdensity_grad
),
)
return (
DynamicHMCState(
proposal.position,
proposal.logdensity,
proposal.logdensity_grad,
next_random_arg_fn(state.random_generator_arg),
),
info,
)
|
Generate a new sample with the MHMCHMC kernel.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/adjusted_mclmc_dynamic.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/adjusted_mclmc_dynamic.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
step_size: float,
L_proposal_factor: float = jnp.inf,
inverse_mass_matrix=1.0,
*,
divergence_threshold: int = 1000,
integrator: Callable = integrators.isokinetic_mclachlan,
next_random_arg_fn: Callable = lambda key: jax.random.split(key)[1],
integration_steps_fn: Callable = lambda key: jax.random.randint(key, (), 1, 10),
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the dynamic MHMCHMC kernel.
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the trajectory.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(
integration_steps_fn=integration_steps_fn,
integrator=integrator,
next_random_arg_fn=next_random_arg_fn,
inverse_mass_matrix=inverse_mass_matrix,
divergence_threshold=divergence_threshold,
)
def init_fn(position: ArrayLikeTree, rng_key: Array):
return init(position, logdensity_fn, rng_key)
def update_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
logdensity_fn,
step_size,
L_proposal_factor,
)
return SamplingAlgorithm(init_fn, update_fn) # type: ignore[arg-type]
|
Implements the (basic) user interface for the dynamic MHMCHMC kernel.
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the trajectory.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/adjusted_mclmc_dynamic.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/adjusted_mclmc_dynamic.py
|
Apache-2.0
|
def adjusted_mclmc_proposal(
integrator: Callable,
step_size: Union[float, ArrayLikeTree],
L_proposal_factor: float,
num_integration_steps: int = 1,
divergence_threshold: float = 1000,
*,
sample_proposal: Callable = static_binomial_sampling,
) -> Callable:
"""Vanilla MHMCHMC algorithm.
The algorithm integrates the trajectory applying a integrator
`num_integration_steps` times in one direction to get a proposal and uses a
Metropolis-Hastings acceptance step to either reject or accept this
proposal. This is what people usually refer to when they talk about "the
HMC algorithm".
Parameters
----------
integrator
integrator used to build the trajectory step by step.
kinetic_energy
Function that computes the kinetic energy.
step_size
Size of the integration step.
num_integration_steps
Number of times we run the integrator to build the trajectory
divergence_threshold
Threshold above which we say that there is a divergence.
Returns
-------
A kernel that generates a new chain state and information about the transition.
"""
def step(i, vars):
state, kinetic_energy, rng_key = vars
rng_key, next_rng_key = jax.random.split(rng_key)
next_state, next_kinetic_energy = integrator(
state, step_size, L_proposal_factor, rng_key
)
return next_state, kinetic_energy + next_kinetic_energy, next_rng_key
def build_trajectory(state, num_integration_steps, rng_key):
return jax.lax.fori_loop(
0 * num_integration_steps, num_integration_steps, step, (state, 0, rng_key)
)
def generate(
rng_key, state: integrators.IntegratorState
) -> tuple[integrators.IntegratorState, HMCInfo, ArrayTree]:
"""Generate a new chain state."""
end_state, kinetic_energy, rng_key = build_trajectory(
state, num_integration_steps, rng_key
)
new_energy = -end_state.logdensity
delta_energy = -state.logdensity + end_state.logdensity - kinetic_energy
delta_energy = jnp.where(jnp.isnan(delta_energy), -jnp.inf, delta_energy)
is_diverging = -delta_energy > divergence_threshold
sampled_state, info = sample_proposal(rng_key, delta_energy, state, end_state)
do_accept, p_accept, other_proposal_info = info
info = HMCInfo(
state.momentum,
p_accept,
do_accept,
is_diverging,
new_energy,
end_state,
num_integration_steps,
)
return sampled_state, info, other_proposal_info
return generate
|
Vanilla MHMCHMC algorithm.
The algorithm integrates the trajectory applying a integrator
`num_integration_steps` times in one direction to get a proposal and uses a
Metropolis-Hastings acceptance step to either reject or accept this
proposal. This is what people usually refer to when they talk about "the
HMC algorithm".
Parameters
----------
integrator
integrator used to build the trajectory step by step.
kinetic_energy
Function that computes the kinetic energy.
step_size
Size of the integration step.
num_integration_steps
Number of times we run the integrator to build the trajectory
divergence_threshold
Threshold above which we say that there is a divergence.
Returns
-------
A kernel that generates a new chain state and information about the transition.
|
adjusted_mclmc_proposal
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/adjusted_mclmc_dynamic.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/adjusted_mclmc_dynamic.py
|
Apache-2.0
|
def rescale(mu):
"""returns s, such that
round(U(0, 1) * s + 0.5)
has expected value mu.
"""
k = jnp.floor(2 * mu - 1)
x = k * (mu - 0.5 * (k + 1)) / (k + 1 - mu)
return k + x
|
returns s, such that
round(U(0, 1) * s + 0.5)
has expected value mu.
|
rescale
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/adjusted_mclmc_dynamic.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/adjusted_mclmc_dynamic.py
|
Apache-2.0
|
def build_kernel():
"""Build a Barker's proposal kernel.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def _compute_acceptance_probability(
state: BarkerState, proposal: BarkerState, metric: Metric
) -> Numeric:
"""Compute the acceptance probability of the Barker's proposal kernel."""
x = state.position
y = proposal.position
log_x = state.logdensity_grad
log_y = proposal.logdensity_grad
y_minus_x = jax.tree_util.tree_map(lambda a, b: a - b, y, x)
x_minus_y = jax.tree_util.tree_map(lambda a: -a, y_minus_x)
z_tilde_x_to_y = metric.scale(x, y_minus_x, inv=True, trans=True)
z_tilde_y_to_x = metric.scale(y, x_minus_y, inv=True, trans=True)
c_x_to_y = metric.scale(x, log_x, inv=False, trans=True)
c_y_to_x = metric.scale(y, log_y, inv=False, trans=True)
z_tilde_x_to_y_flat, _ = ravel_pytree(z_tilde_x_to_y)
z_tilde_y_to_x_flat, _ = ravel_pytree(z_tilde_y_to_x)
c_x_to_y_flat, _ = ravel_pytree(c_x_to_y)
c_y_to_x_flat, _ = ravel_pytree(c_y_to_x)
num = metric.kinetic_energy(x_minus_y, y) - _log1pexp(
-z_tilde_y_to_x_flat * c_y_to_x_flat
)
denom = metric.kinetic_energy(y_minus_x, x) - _log1pexp(
-z_tilde_x_to_y_flat * c_x_to_y_flat
)
ratio_proposal = jnp.sum(num - denom)
return proposal.logdensity - state.logdensity + ratio_proposal
def kernel(
rng_key: PRNGKey,
state: BarkerState,
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: metrics.MetricTypes | None = None,
) -> tuple[BarkerState, BarkerInfo]:
"""Generate a new sample with the Barker kernel."""
if inverse_mass_matrix is None:
p, _ = ravel_pytree(state.position)
(m,) = p.shape
inverse_mass_matrix = jnp.ones((m,))
metric = metrics.default_metric(inverse_mass_matrix)
grad_fn = jax.value_and_grad(logdensity_fn)
key_sample, key_rmh = jax.random.split(rng_key)
proposed_pos = _barker_sample(
key_sample,
state.position,
state.logdensity_grad,
step_size,
metric,
)
proposed_logdensity, proposed_logdensity_grad = grad_fn(proposed_pos)
proposed_state = BarkerState(
proposed_pos, proposed_logdensity, proposed_logdensity_grad
)
log_p_accept = _compute_acceptance_probability(state, proposed_state, metric)
accepted_state, info = static_binomial_sampling(
key_rmh, log_p_accept, state, proposed_state
)
do_accept, p_accept, _ = info
return accepted_state, BarkerInfo(p_accept, do_accept, proposed_state)
return kernel
|
Build a Barker's proposal kernel.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/barker.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/barker.py
|
Apache-2.0
|
def _compute_acceptance_probability(
state: BarkerState, proposal: BarkerState, metric: Metric
) -> Numeric:
"""Compute the acceptance probability of the Barker's proposal kernel."""
x = state.position
y = proposal.position
log_x = state.logdensity_grad
log_y = proposal.logdensity_grad
y_minus_x = jax.tree_util.tree_map(lambda a, b: a - b, y, x)
x_minus_y = jax.tree_util.tree_map(lambda a: -a, y_minus_x)
z_tilde_x_to_y = metric.scale(x, y_minus_x, inv=True, trans=True)
z_tilde_y_to_x = metric.scale(y, x_minus_y, inv=True, trans=True)
c_x_to_y = metric.scale(x, log_x, inv=False, trans=True)
c_y_to_x = metric.scale(y, log_y, inv=False, trans=True)
z_tilde_x_to_y_flat, _ = ravel_pytree(z_tilde_x_to_y)
z_tilde_y_to_x_flat, _ = ravel_pytree(z_tilde_y_to_x)
c_x_to_y_flat, _ = ravel_pytree(c_x_to_y)
c_y_to_x_flat, _ = ravel_pytree(c_y_to_x)
num = metric.kinetic_energy(x_minus_y, y) - _log1pexp(
-z_tilde_y_to_x_flat * c_y_to_x_flat
)
denom = metric.kinetic_energy(y_minus_x, x) - _log1pexp(
-z_tilde_x_to_y_flat * c_x_to_y_flat
)
ratio_proposal = jnp.sum(num - denom)
return proposal.logdensity - state.logdensity + ratio_proposal
|
Compute the acceptance probability of the Barker's proposal kernel.
|
_compute_acceptance_probability
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/barker.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/barker.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: BarkerState,
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: metrics.MetricTypes | None = None,
) -> tuple[BarkerState, BarkerInfo]:
"""Generate a new sample with the Barker kernel."""
if inverse_mass_matrix is None:
p, _ = ravel_pytree(state.position)
(m,) = p.shape
inverse_mass_matrix = jnp.ones((m,))
metric = metrics.default_metric(inverse_mass_matrix)
grad_fn = jax.value_and_grad(logdensity_fn)
key_sample, key_rmh = jax.random.split(rng_key)
proposed_pos = _barker_sample(
key_sample,
state.position,
state.logdensity_grad,
step_size,
metric,
)
proposed_logdensity, proposed_logdensity_grad = grad_fn(proposed_pos)
proposed_state = BarkerState(
proposed_pos, proposed_logdensity, proposed_logdensity_grad
)
log_p_accept = _compute_acceptance_probability(state, proposed_state, metric)
accepted_state, info = static_binomial_sampling(
key_rmh, log_p_accept, state, proposed_state
)
do_accept, p_accept, _ = info
return accepted_state, BarkerInfo(p_accept, do_accept, proposed_state)
|
Generate a new sample with the Barker kernel.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/barker.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/barker.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: metrics.MetricTypes | None = None,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the Barker's proposal :cite:p:`Livingstone2022Barker` kernel with a
Gaussian base kernel.
The general Barker kernel builder (:meth:`blackjax.mcmc.barker.build_kernel`, alias `blackjax.barker.build_kernel`) can be
cumbersome to manipulate. Since most users only need to specify the kernel
parameters at initialization time, we provide a helper function that
specializes the general kernel.
We also add the general kernel and state generator as an attribute to this class so
users only need to pass `blackjax.barker` to SMC, adaptation, etc. algorithms.
Examples
--------
A new Barker kernel can be initialized and used with the following code:
.. code::
barker = blackjax.barker(logdensity_fn, step_size)
state = barker.init(position)
new_state, info = barker.step(rng_key, state)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(barker.step)
new_state, info = step(rng_key, state)
Should you need to you can always use the base kernel directly:
.. code::
kernel = blackjax.barker.build_kernel(logdensity_fn)
state = blackjax.barker.init(position, logdensity_fn)
state, info = kernel(rng_key, state, logdensity_fn, step_size)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value of the step_size correspnoding to the global scale of the proposal distribution.
inverse_mass_matrix
The inverse mass matrix to use for pre-conditioning (see Appendix G of :cite:p:`Livingstone2022Barker`).
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel()
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, logdensity_fn)
def step_fn(rng_key: PRNGKey, state):
return kernel(rng_key, state, logdensity_fn, step_size, inverse_mass_matrix)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the (basic) user interface for the Barker's proposal :cite:p:`Livingstone2022Barker` kernel with a
Gaussian base kernel.
The general Barker kernel builder (:meth:`blackjax.mcmc.barker.build_kernel`, alias `blackjax.barker.build_kernel`) can be
cumbersome to manipulate. Since most users only need to specify the kernel
parameters at initialization time, we provide a helper function that
specializes the general kernel.
We also add the general kernel and state generator as an attribute to this class so
users only need to pass `blackjax.barker` to SMC, adaptation, etc. algorithms.
Examples
--------
A new Barker kernel can be initialized and used with the following code:
.. code::
barker = blackjax.barker(logdensity_fn, step_size)
state = barker.init(position)
new_state, info = barker.step(rng_key, state)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(barker.step)
new_state, info = step(rng_key, state)
Should you need to you can always use the base kernel directly:
.. code::
kernel = blackjax.barker.build_kernel(logdensity_fn)
state = blackjax.barker.init(position, logdensity_fn)
state, info = kernel(rng_key, state, logdensity_fn, step_size)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value of the step_size correspnoding to the global scale of the proposal distribution.
inverse_mass_matrix
The inverse mass matrix to use for pre-conditioning (see Appendix G of :cite:p:`Livingstone2022Barker`).
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/barker.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/barker.py
|
Apache-2.0
|
def _barker_sample(key, mean, a, scale, metric):
r"""
Sample from a multivariate Barker's proposal distribution for PyTrees.
Parameters
----------
key
A PRNG key.
mean
The mean of the normal distribution, a PyTree. This corresponds to :math:`\mu` in the equation above.
a
The parameter :math:`a` in the equation above, the same PyTree as `mean`. This is a skewness parameter.
scale
The global scale, a scalar. This corresponds to :math:`\\sigma` in the equation above.
It encodes the step size of the proposal.
metric
A `metrics.MetricTypes` object encoding the mass matrix information.
"""
key1, key2 = jax.random.split(key)
z = generate_gaussian_noise(key1, mean, sigma=scale)
c = metric.scale(mean, a, inv=False, trans=True)
# Sample b=1 with probability p and 0 with probability 1 - p where
# p = 1 / (1 + exp(-a * (z - mean)))
log_p = jax.tree_util.tree_map(lambda x, y: -_log1pexp(-x * y), c, z)
p = jax.tree_util.tree_map(lambda x: jnp.exp(x), log_p)
b = _generate_bernoulli(key2, mean, p=p)
bz = jax.tree_util.tree_map(lambda x, y: x * y - (1 - x) * y, b, z)
return jax.tree_util.tree_map(
lambda a, b: a + b, mean, metric.scale(mean, bz, inv=False, trans=False)
)
|
Sample from a multivariate Barker's proposal distribution for PyTrees.
Parameters
----------
key
A PRNG key.
mean
The mean of the normal distribution, a PyTree. This corresponds to :math:`\mu` in the equation above.
a
The parameter :math:`a` in the equation above, the same PyTree as `mean`. This is a skewness parameter.
scale
The global scale, a scalar. This corresponds to :math:`\\sigma` in the equation above.
It encodes the step size of the proposal.
metric
A `metrics.MetricTypes` object encoding the mass matrix information.
|
_barker_sample
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/barker.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/barker.py
|
Apache-2.0
|
def overdamped_langevin(logdensity_grad_fn):
"""Euler solver for overdamped Langevin diffusion."""
def one_step(rng_key, state: DiffusionState, step_size: float, batch: tuple = ()):
position, _, logdensity_grad = state
noise = generate_gaussian_noise(rng_key, position)
position = jax.tree_util.tree_map(
lambda p, g, n: p + step_size * g + jnp.sqrt(2 * step_size) * n,
position,
logdensity_grad,
noise,
)
logdensity, logdensity_grad = logdensity_grad_fn(position, *batch)
return DiffusionState(position, logdensity, logdensity_grad)
return one_step
|
Euler solver for overdamped Langevin diffusion.
|
overdamped_langevin
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/diffusions.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/diffusions.py
|
Apache-2.0
|
def build_kernel(
integrator: Callable = integrators.velocity_verlet,
divergence_threshold: float = 1000,
next_random_arg_fn: Callable = lambda key: jax.random.split(key)[1],
integration_steps_fn: Callable = lambda key: jax.random.randint(key, (), 1, 10),
):
"""Build a Dynamic HMC kernel where the number of integration steps is chosen randomly.
Parameters
----------
integrator
The symplectic integrator to use to integrate the Hamiltonian dynamics.
divergence_threshold
Value of the difference in energy above which we consider that the transition is divergent.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`. Needs to return an `int`.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
hmc_base = build_static_hmc_kernel(integrator, divergence_threshold)
def kernel(
rng_key: PRNGKey,
state: DynamicHMCState,
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: Array,
**integration_steps_kwargs,
) -> tuple[DynamicHMCState, HMCInfo]:
"""Generate a new sample with the HMC kernel."""
num_integration_steps = integration_steps_fn(
state.random_generator_arg, **integration_steps_kwargs
)
hmc_state = HMCState(state.position, state.logdensity, state.logdensity_grad)
hmc_proposal, info = hmc_base(
rng_key,
hmc_state,
logdensity_fn,
step_size,
inverse_mass_matrix,
num_integration_steps,
)
next_random_arg = next_random_arg_fn(state.random_generator_arg)
return (
DynamicHMCState(
hmc_proposal.position,
hmc_proposal.logdensity,
hmc_proposal.logdensity_grad,
next_random_arg,
),
info,
)
return kernel
|
Build a Dynamic HMC kernel where the number of integration steps is chosen randomly.
Parameters
----------
integrator
The symplectic integrator to use to integrate the Hamiltonian dynamics.
divergence_threshold
Value of the difference in energy above which we consider that the transition is divergent.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`. Needs to return an `int`.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/dynamic_hmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/dynamic_hmc.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: DynamicHMCState,
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: Array,
**integration_steps_kwargs,
) -> tuple[DynamicHMCState, HMCInfo]:
"""Generate a new sample with the HMC kernel."""
num_integration_steps = integration_steps_fn(
state.random_generator_arg, **integration_steps_kwargs
)
hmc_state = HMCState(state.position, state.logdensity, state.logdensity_grad)
hmc_proposal, info = hmc_base(
rng_key,
hmc_state,
logdensity_fn,
step_size,
inverse_mass_matrix,
num_integration_steps,
)
next_random_arg = next_random_arg_fn(state.random_generator_arg)
return (
DynamicHMCState(
hmc_proposal.position,
hmc_proposal.logdensity,
hmc_proposal.logdensity_grad,
next_random_arg,
),
info,
)
|
Generate a new sample with the HMC kernel.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/dynamic_hmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/dynamic_hmc.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: Array,
*,
divergence_threshold: int = 1000,
integrator: Callable = integrators.velocity_verlet,
next_random_arg_fn: Callable = lambda key: jax.random.split(key)[1],
integration_steps_fn: Callable = lambda key: jax.random.randint(key, (), 1, 10),
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the dynamic HMC kernel.
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
inverse_mass_matrix
The value to use for the inverse mass matrix when drawing a value for
the momentum and computing the kinetic energy.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the trajectory.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(
integrator, divergence_threshold, next_random_arg_fn, integration_steps_fn
)
def init_fn(position: ArrayLikeTree, rng_key: Array):
# Note that rng_key here is not necessarily a PRNGKey, could be a Array that
# for generates a sequence of pseudo or quasi-random numbers (previously
# named as `random_generator_arg`)
return init(position, logdensity_fn, rng_key)
def step_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
logdensity_fn,
step_size,
inverse_mass_matrix,
)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the (basic) user interface for the dynamic HMC kernel.
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
inverse_mass_matrix
The value to use for the inverse mass matrix when drawing a value for
the momentum and computing the kinetic energy.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the trajectory.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
integration_steps_fn
Function that generates the next pseudo or quasi-random number of integration steps in the
sequence, given the current `random_generator_arg`.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/dynamic_hmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/dynamic_hmc.py
|
Apache-2.0
|
def halton_trajectory_length(
i: Array, trajectory_length_adjustment: float, max_bits: int = 10
) -> int:
"""Generate a quasi-random number of integration steps."""
s = rescale(trajectory_length_adjustment)
return jnp.asarray(jnp.rint(0.5 + halton_sequence(i, max_bits) * s), dtype=int)
|
Generate a quasi-random number of integration steps.
|
halton_trajectory_length
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/dynamic_hmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/dynamic_hmc.py
|
Apache-2.0
|
def build_kernel(cov_matrix: Array, mean: Array):
"""Build an Elliptical Slice sampling kernel :cite:p:`murray2010elliptical`.
Parameters
----------
cov_matrix
The value of the covariance matrix of the gaussian prior distribution from
the posterior we wish to sample.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
ndim = jnp.ndim(cov_matrix) # type: ignore[arg-type]
if ndim == 1: # diagonal covariance matrix
cov_matrix_sqrt = jnp.sqrt(cov_matrix)
elif ndim == 2:
cov_matrix_sqrt = jax.lax.linalg.cholesky(cov_matrix)
else:
raise ValueError(
"The mass matrix has the wrong number of dimensions:"
f" expected 1 or 2, got {jnp.ndim(cov_matrix)}." # type: ignore[arg-type]
)
def momentum_generator(rng_key, position):
return generate_gaussian_noise(rng_key, position, mean, cov_matrix_sqrt)
def kernel(
rng_key: PRNGKey,
state: EllipSliceState,
logdensity_fn: Callable,
) -> tuple[EllipSliceState, EllipSliceInfo]:
proposal_generator = elliptical_proposal(
logdensity_fn, momentum_generator, mean
)
return proposal_generator(rng_key, state)
return kernel
|
Build an Elliptical Slice sampling kernel :cite:p:`murray2010elliptical`.
Parameters
----------
cov_matrix
The value of the covariance matrix of the gaussian prior distribution from
the posterior we wish to sample.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/elliptical_slice.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/elliptical_slice.py
|
Apache-2.0
|
def as_top_level_api(
loglikelihood_fn: Callable,
*,
mean: Array,
cov: Array,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the Elliptical Slice sampling kernel.
Examples
--------
A new Elliptical Slice sampling kernel can be initialized and used with the following code:
.. code::
ellip_slice = blackjax.elliptical_slice(loglikelihood_fn, cov_matrix)
state = ellip_slice.init(position)
new_state, info = ellip_slice.step(rng_key, state)
We can JIT-compile the step function for better performance
.. code::
step = jax.jit(ellip_slice.step)
new_state, info = step(rng_key, state)
Parameters
----------
loglikelihood_fn
Only the log likelihood function from the posterior distributon we wish to sample.
cov_matrix
The value of the covariance matrix of the gaussian prior distribution from the posterior we wish to sample.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(cov, mean)
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, loglikelihood_fn)
def step_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
loglikelihood_fn,
)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the (basic) user interface for the Elliptical Slice sampling kernel.
Examples
--------
A new Elliptical Slice sampling kernel can be initialized and used with the following code:
.. code::
ellip_slice = blackjax.elliptical_slice(loglikelihood_fn, cov_matrix)
state = ellip_slice.init(position)
new_state, info = ellip_slice.step(rng_key, state)
We can JIT-compile the step function for better performance
.. code::
step = jax.jit(ellip_slice.step)
new_state, info = step(rng_key, state)
Parameters
----------
loglikelihood_fn
Only the log likelihood function from the posterior distributon we wish to sample.
cov_matrix
The value of the covariance matrix of the gaussian prior distribution from the posterior we wish to sample.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/elliptical_slice.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/elliptical_slice.py
|
Apache-2.0
|
def elliptical_proposal(
logdensity_fn: Callable,
momentum_generator: Callable,
mean: Array,
) -> Callable:
"""Build an Ellitpical slice sampling kernel.
The algorithm samples a latent parameter, traces an ellipse connecting the
initial position and the latent parameter and does slice sampling on this
ellipse to output a new sample from the posterior distribution.
Parameters
----------
logdensity_fn
A function that returns the log-likelihood at a given position.
momentum_generator
A function that generates a new latent momentum variable.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def generate(
rng_key: PRNGKey, state: EllipSliceState
) -> tuple[EllipSliceState, EllipSliceInfo]:
position, logdensity = state
key_slice, key_momentum, key_uniform, key_theta = jax.random.split(rng_key, 4)
# step 1: sample momentum
momentum = momentum_generator(key_momentum, position)
# step 2: get slice (y)
logy = logdensity + jnp.log(jax.random.uniform(key_uniform))
# step 3: get theta (ellipsis move), set inital interval
theta = 2 * jnp.pi * jax.random.uniform(key_theta)
theta_min = theta - 2 * jnp.pi
theta_max = theta
# step 4: proposal
p, m = ellipsis(position, momentum, theta, mean)
# step 5: acceptance
logdensity = logdensity_fn(p)
def slice_fn(vals):
"""Perform slice sampling around the ellipsis.
Checks if the proposed position's likelihood is larger than the slice
variable. Returns the position if True, shrinks the bracket for sampling
`theta` and samples a new proposal if False.
As the bracket `[theta_min, theta_max]` shrinks, the proposal gets closer
to the original position, which has likelihood larger than the slice variable.
It is guaranteed to stop in a finite number of iterations as long as the
likelihood is continuous with respect to the parameter being sampled.
"""
_, subiter, theta, theta_min, theta_max, *_ = vals
thetak = jax.random.fold_in(key_slice, subiter)
theta = jax.random.uniform(thetak, minval=theta_min, maxval=theta_max)
p, m = ellipsis(position, momentum, theta, mean)
logdensity = logdensity_fn(p)
theta_min = jnp.where(theta < 0, theta, theta_min)
theta_max = jnp.where(theta > 0, theta, theta_max)
subiter += 1
return logdensity, subiter, theta, theta_min, theta_max, p, m
logdensity, subiter, theta, *_, position, momentum = jax.lax.while_loop(
lambda vals: vals[0] <= logy,
slice_fn,
(logdensity, 1, theta, theta_min, theta_max, p, m),
)
return (
EllipSliceState(position, logdensity),
EllipSliceInfo(momentum, theta, subiter),
)
return generate
|
Build an Ellitpical slice sampling kernel.
The algorithm samples a latent parameter, traces an ellipse connecting the
initial position and the latent parameter and does slice sampling on this
ellipse to output a new sample from the posterior distribution.
Parameters
----------
logdensity_fn
A function that returns the log-likelihood at a given position.
momentum_generator
A function that generates a new latent momentum variable.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
elliptical_proposal
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/elliptical_slice.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/elliptical_slice.py
|
Apache-2.0
|
def slice_fn(vals):
"""Perform slice sampling around the ellipsis.
Checks if the proposed position's likelihood is larger than the slice
variable. Returns the position if True, shrinks the bracket for sampling
`theta` and samples a new proposal if False.
As the bracket `[theta_min, theta_max]` shrinks, the proposal gets closer
to the original position, which has likelihood larger than the slice variable.
It is guaranteed to stop in a finite number of iterations as long as the
likelihood is continuous with respect to the parameter being sampled.
"""
_, subiter, theta, theta_min, theta_max, *_ = vals
thetak = jax.random.fold_in(key_slice, subiter)
theta = jax.random.uniform(thetak, minval=theta_min, maxval=theta_max)
p, m = ellipsis(position, momentum, theta, mean)
logdensity = logdensity_fn(p)
theta_min = jnp.where(theta < 0, theta, theta_min)
theta_max = jnp.where(theta > 0, theta, theta_max)
subiter += 1
return logdensity, subiter, theta, theta_min, theta_max, p, m
|
Perform slice sampling around the ellipsis.
Checks if the proposed position's likelihood is larger than the slice
variable. Returns the position if True, shrinks the bracket for sampling
`theta` and samples a new proposal if False.
As the bracket `[theta_min, theta_max]` shrinks, the proposal gets closer
to the original position, which has likelihood larger than the slice variable.
It is guaranteed to stop in a finite number of iterations as long as the
likelihood is continuous with respect to the parameter being sampled.
|
slice_fn
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/elliptical_slice.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/elliptical_slice.py
|
Apache-2.0
|
def ellipsis(position, momentum, theta, mean):
"""Generate proposal from the ellipsis.
Given a scalar theta indicating a point on the circumference of the ellipsis
and the shared mean vector for both position and momentum variables,
generate proposed position and momentum to later accept or reject
depending on the slice variable.
"""
position, unravel_fn = jax.flatten_util.ravel_pytree(position)
momentum, _ = jax.flatten_util.ravel_pytree(momentum)
position_centered = position - mean
momentum_centered = momentum - mean
return (
unravel_fn(
position_centered * jnp.cos(theta)
+ momentum_centered * jnp.sin(theta)
+ mean
),
unravel_fn(
momentum_centered * jnp.cos(theta)
- position_centered * jnp.sin(theta)
+ mean
),
)
|
Generate proposal from the ellipsis.
Given a scalar theta indicating a point on the circumference of the ellipsis
and the shared mean vector for both position and momentum variables,
generate proposed position and momentum to later accept or reject
depending on the slice variable.
|
ellipsis
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/elliptical_slice.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/elliptical_slice.py
|
Apache-2.0
|
def build_kernel(
noise_fn: Callable = lambda _: 0.0,
divergence_threshold: float = 1000,
):
"""Build a Generalized HMC kernel.
The Generalized HMC kernel performs a similar procedure to the standard HMC
kernel with the difference of a persistent momentum variable and a non-reversible
Metropolis-Hastings step instead of the standard Metropolis-Hastings acceptance
step. This means that; apart from momentum and slice variables that are dependent
on the previous momentum and slice variables, and a Metropolis-Hastings step
performed (equivalently) as slice sampling; the standard HMC's implementation can
be re-used to perform Generalized HMC sampling.
Parameters
----------
noise_fn
A function that takes as input the slice variable and outputs a random
variable used as a noise correction of the persistent slice update.
The parameter defaults to a random variable with a single atom at 0.
divergence_threshold
Value of the difference in energy above which we consider that the
transition is divergent.
Returns
-------
A kernel that takes a rng_key, a Pytree that contains the current state
of the chain, and free parameters of the sampling mechanism; and that
returns a new state of the chain along with information about the transition.
"""
def kernel(
rng_key: PRNGKey,
state: GHMCState,
logdensity_fn: Callable,
step_size: float,
momentum_inverse_scale: ArrayLikeTree,
alpha: float,
delta: float,
) -> tuple[GHMCState, hmc.HMCInfo]:
"""Generate new sample with the Generalized HMC kernel.
Parameters
----------
rng_key
JAX's pseudo random number generating key.
state
Current state of the chain.
logdensity_fn
(Unnormalized) Log density function being targeted.
step_size
Variable specifying the size of the integration step.
momentum_inverse_scale
Pytree with the same structure as the targeted position variable
specifying the per dimension inverse scaling transformation applied
to the persistent momentum variable prior to the integration step.
alpha
Variable specifying the degree of persistent momentum, complementary
to independent new momentum.
delta
Fixed (non-random) amount of translation added at each new iteration
to the slice variable for non-reversible slice sampling.
"""
flat_inverse_scale = ravel_pytree(momentum_inverse_scale)[0]
momentum_generator, kinetic_energy_fn, *_ = metrics.gaussian_euclidean(
flat_inverse_scale**2
)
symplectic_integrator = integrators.velocity_verlet(
logdensity_fn, kinetic_energy_fn
)
proposal_generator = hmc.hmc_proposal(
symplectic_integrator,
kinetic_energy_fn,
step_size,
divergence_threshold=divergence_threshold,
sample_proposal=nonreversible_slice_sampling,
)
key_momentum, key_noise = jax.random.split(rng_key)
position, momentum, logdensity, logdensity_grad, slice = state
# New momentum is persistent
momentum = update_momentum(key_momentum, state, alpha, momentum_generator)
# Slice is non-reversible
slice = ((slice + 1.0 + delta + noise_fn(key_noise)) % 2) - 1.0
integrator_state = integrators.IntegratorState(
position, momentum, logdensity, logdensity_grad
)
# Note that ghmc use nonreversible_slice_sampling, which overloads the pattern
# of SampleProposal and do not actually return the acceptance rate.
proposal, info, slice_next = proposal_generator(slice, integrator_state)
proposal = hmc.flip_momentum(proposal)
state = GHMCState(
position=proposal.position,
momentum=proposal.momentum,
logdensity=proposal.logdensity,
logdensity_grad=proposal.logdensity_grad,
slice=slice_next,
)
return state, info
return kernel
|
Build a Generalized HMC kernel.
The Generalized HMC kernel performs a similar procedure to the standard HMC
kernel with the difference of a persistent momentum variable and a non-reversible
Metropolis-Hastings step instead of the standard Metropolis-Hastings acceptance
step. This means that; apart from momentum and slice variables that are dependent
on the previous momentum and slice variables, and a Metropolis-Hastings step
performed (equivalently) as slice sampling; the standard HMC's implementation can
be re-used to perform Generalized HMC sampling.
Parameters
----------
noise_fn
A function that takes as input the slice variable and outputs a random
variable used as a noise correction of the persistent slice update.
The parameter defaults to a random variable with a single atom at 0.
divergence_threshold
Value of the difference in energy above which we consider that the
transition is divergent.
Returns
-------
A kernel that takes a rng_key, a Pytree that contains the current state
of the chain, and free parameters of the sampling mechanism; and that
returns a new state of the chain along with information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/ghmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/ghmc.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: GHMCState,
logdensity_fn: Callable,
step_size: float,
momentum_inverse_scale: ArrayLikeTree,
alpha: float,
delta: float,
) -> tuple[GHMCState, hmc.HMCInfo]:
"""Generate new sample with the Generalized HMC kernel.
Parameters
----------
rng_key
JAX's pseudo random number generating key.
state
Current state of the chain.
logdensity_fn
(Unnormalized) Log density function being targeted.
step_size
Variable specifying the size of the integration step.
momentum_inverse_scale
Pytree with the same structure as the targeted position variable
specifying the per dimension inverse scaling transformation applied
to the persistent momentum variable prior to the integration step.
alpha
Variable specifying the degree of persistent momentum, complementary
to independent new momentum.
delta
Fixed (non-random) amount of translation added at each new iteration
to the slice variable for non-reversible slice sampling.
"""
flat_inverse_scale = ravel_pytree(momentum_inverse_scale)[0]
momentum_generator, kinetic_energy_fn, *_ = metrics.gaussian_euclidean(
flat_inverse_scale**2
)
symplectic_integrator = integrators.velocity_verlet(
logdensity_fn, kinetic_energy_fn
)
proposal_generator = hmc.hmc_proposal(
symplectic_integrator,
kinetic_energy_fn,
step_size,
divergence_threshold=divergence_threshold,
sample_proposal=nonreversible_slice_sampling,
)
key_momentum, key_noise = jax.random.split(rng_key)
position, momentum, logdensity, logdensity_grad, slice = state
# New momentum is persistent
momentum = update_momentum(key_momentum, state, alpha, momentum_generator)
# Slice is non-reversible
slice = ((slice + 1.0 + delta + noise_fn(key_noise)) % 2) - 1.0
integrator_state = integrators.IntegratorState(
position, momentum, logdensity, logdensity_grad
)
# Note that ghmc use nonreversible_slice_sampling, which overloads the pattern
# of SampleProposal and do not actually return the acceptance rate.
proposal, info, slice_next = proposal_generator(slice, integrator_state)
proposal = hmc.flip_momentum(proposal)
state = GHMCState(
position=proposal.position,
momentum=proposal.momentum,
logdensity=proposal.logdensity,
logdensity_grad=proposal.logdensity_grad,
slice=slice_next,
)
return state, info
|
Generate new sample with the Generalized HMC kernel.
Parameters
----------
rng_key
JAX's pseudo random number generating key.
state
Current state of the chain.
logdensity_fn
(Unnormalized) Log density function being targeted.
step_size
Variable specifying the size of the integration step.
momentum_inverse_scale
Pytree with the same structure as the targeted position variable
specifying the per dimension inverse scaling transformation applied
to the persistent momentum variable prior to the integration step.
alpha
Variable specifying the degree of persistent momentum, complementary
to independent new momentum.
delta
Fixed (non-random) amount of translation added at each new iteration
to the slice variable for non-reversible slice sampling.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/ghmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/ghmc.py
|
Apache-2.0
|
def update_momentum(rng_key, state, alpha, momentum_generator):
"""Persistent update of the momentum variable.
Performs a persistent update of the momentum, taking as input the previous
momentum, a random number generating key, the parameter alpha and the
momentum generator function. Outputs
an updated momentum that is a mixture of the previous momentum a new sample
from a Gaussian density (dependent on alpha). The weights of the mixture of
these two components are a function of alpha.
"""
position, momentum, *_ = state
momentum = jax.tree.map(
lambda prev_momentum, shifted_momentum: prev_momentum * jnp.sqrt(1.0 - alpha)
+ jnp.sqrt(alpha) * shifted_momentum,
momentum,
momentum_generator(rng_key, position),
)
return momentum
|
Persistent update of the momentum variable.
Performs a persistent update of the momentum, taking as input the previous
momentum, a random number generating key, the parameter alpha and the
momentum generator function. Outputs
an updated momentum that is a mixture of the previous momentum a new sample
from a Gaussian density (dependent on alpha). The weights of the mixture of
these two components are a function of alpha.
|
update_momentum
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/ghmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/ghmc.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
step_size: float,
momentum_inverse_scale: ArrayLikeTree,
alpha: float,
delta: float,
*,
divergence_threshold: int = 1000,
noise_gn: Callable = lambda _: 0.0,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the Generalized HMC kernel.
The Generalized HMC kernel performs a similar procedure to the standard HMC
kernel with the difference of a persistent momentum variable and a non-reversible
Metropolis-Hastings step instead of the standard Metropolis-Hastings acceptance
step.
This means that the sampling of the momentum variable depends on the previous
momentum, the rate of persistence depends on the alpha parameter, and that the
Metropolis-Hastings accept/reject step is done through slice sampling with a
non-reversible slice variable also dependent on the previous slice, the determinisitc
transformation is defined by the delta parameter.
The Generalized HMC does not have a trajectory length parameter, it always performs
one iteration of the velocity verlet integrator with a given step size, making
the algorithm a good candiate for running many chains in parallel.
Examples
--------
A new Generalized HMC kernel can be initialized and used with the following code:
.. code::
ghmc_kernel = blackjax.ghmc(logdensity_fn, step_size, alpha, delta)
state = ghmc_kernel.init(rng_key, position)
new_state, info = ghmc_kernel.step(rng_key, state)
We can JIT-compile the step function for better performance
.. code::
step = jax.jit(ghmc_kernel.step)
new_state, info = step(rng_key, state)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
A PyTree of the same structure as the target PyTree (position) with the
values used for as a step size for each dimension of the target space in
the velocity verlet integrator.
momentum_inverse_scale
Pytree with the same structure as the targeted position variable
specifying the per dimension inverse scaling transformation applied
to the persistent momentum variable prior to the integration step.
alpha
The value defining the persistence of the momentum variable.
delta
The value defining the deterministic translation of the slice variable.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
noise_gn
A function that takes as input the slice variable and outputs a random
variable used as a noise correction of the persistent slice update.
The parameter defaults to a random variable with a single atom at 0.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(noise_gn, divergence_threshold)
def init_fn(position: ArrayLikeTree, rng_key: PRNGKey):
return init(position, rng_key, logdensity_fn)
def step_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
logdensity_fn,
step_size,
momentum_inverse_scale,
alpha,
delta,
)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the (basic) user interface for the Generalized HMC kernel.
The Generalized HMC kernel performs a similar procedure to the standard HMC
kernel with the difference of a persistent momentum variable and a non-reversible
Metropolis-Hastings step instead of the standard Metropolis-Hastings acceptance
step.
This means that the sampling of the momentum variable depends on the previous
momentum, the rate of persistence depends on the alpha parameter, and that the
Metropolis-Hastings accept/reject step is done through slice sampling with a
non-reversible slice variable also dependent on the previous slice, the determinisitc
transformation is defined by the delta parameter.
The Generalized HMC does not have a trajectory length parameter, it always performs
one iteration of the velocity verlet integrator with a given step size, making
the algorithm a good candiate for running many chains in parallel.
Examples
--------
A new Generalized HMC kernel can be initialized and used with the following code:
.. code::
ghmc_kernel = blackjax.ghmc(logdensity_fn, step_size, alpha, delta)
state = ghmc_kernel.init(rng_key, position)
new_state, info = ghmc_kernel.step(rng_key, state)
We can JIT-compile the step function for better performance
.. code::
step = jax.jit(ghmc_kernel.step)
new_state, info = step(rng_key, state)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
A PyTree of the same structure as the target PyTree (position) with the
values used for as a step size for each dimension of the target space in
the velocity verlet integrator.
momentum_inverse_scale
Pytree with the same structure as the targeted position variable
specifying the per dimension inverse scaling transformation applied
to the persistent momentum variable prior to the integration step.
alpha
The value defining the persistence of the momentum variable.
delta
The value defining the deterministic translation of the slice variable.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
noise_gn
A function that takes as input the slice variable and outputs a random
variable used as a noise correction of the persistent slice update.
The parameter defaults to a random variable with a single atom at 0.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/ghmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/ghmc.py
|
Apache-2.0
|
def build_kernel(
integrator: Callable = integrators.velocity_verlet,
divergence_threshold: float = 1000,
):
"""Build a HMC kernel.
Parameters
----------
integrator
The symplectic integrator to use to integrate the Hamiltonian dynamics.
divergence_threshold
Value of the difference in energy above which we consider that the transition is
divergent.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def kernel(
rng_key: PRNGKey,
state: HMCState,
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: metrics.MetricTypes,
num_integration_steps: int,
) -> tuple[HMCState, HMCInfo]:
"""Generate a new sample with the HMC kernel."""
metric = metrics.default_metric(inverse_mass_matrix)
symplectic_integrator = integrator(logdensity_fn, metric.kinetic_energy)
proposal_generator = hmc_proposal(
symplectic_integrator,
metric.kinetic_energy,
step_size,
num_integration_steps,
divergence_threshold,
)
key_momentum, key_integrator = jax.random.split(rng_key, 2)
position, logdensity, logdensity_grad = state
momentum = metric.sample_momentum(key_momentum, position)
integrator_state = integrators.IntegratorState(
position, momentum, logdensity, logdensity_grad
)
proposal, info, _ = proposal_generator(key_integrator, integrator_state)
proposal = HMCState(
proposal.position, proposal.logdensity, proposal.logdensity_grad
)
return proposal, info
return kernel
|
Build a HMC kernel.
Parameters
----------
integrator
The symplectic integrator to use to integrate the Hamiltonian dynamics.
divergence_threshold
Value of the difference in energy above which we consider that the transition is
divergent.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/hmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/hmc.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: HMCState,
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: metrics.MetricTypes,
num_integration_steps: int,
) -> tuple[HMCState, HMCInfo]:
"""Generate a new sample with the HMC kernel."""
metric = metrics.default_metric(inverse_mass_matrix)
symplectic_integrator = integrator(logdensity_fn, metric.kinetic_energy)
proposal_generator = hmc_proposal(
symplectic_integrator,
metric.kinetic_energy,
step_size,
num_integration_steps,
divergence_threshold,
)
key_momentum, key_integrator = jax.random.split(rng_key, 2)
position, logdensity, logdensity_grad = state
momentum = metric.sample_momentum(key_momentum, position)
integrator_state = integrators.IntegratorState(
position, momentum, logdensity, logdensity_grad
)
proposal, info, _ = proposal_generator(key_integrator, integrator_state)
proposal = HMCState(
proposal.position, proposal.logdensity, proposal.logdensity_grad
)
return proposal, info
|
Generate a new sample with the HMC kernel.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/hmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/hmc.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: metrics.MetricTypes,
num_integration_steps: int,
*,
divergence_threshold: int = 1000,
integrator: Callable = integrators.velocity_verlet,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the HMC kernel.
The general hmc kernel builder (:meth:`blackjax.mcmc.hmc.build_kernel`, alias
`blackjax.hmc.build_kernel`) can be cumbersome to manipulate. Since most users only
need to specify the kernel parameters at initialization time, we provide a helper
function that specializes the general kernel.
We also add the general kernel and state generator as an attribute to this class so
users only need to pass `blackjax.hmc` to SMC, adaptation, etc. algorithms.
Examples
--------
A new HMC kernel can be initialized and used with the following code:
.. code::
hmc = blackjax.hmc(
logdensity_fn, step_size, inverse_mass_matrix, num_integration_steps
)
state = hmc.init(position)
new_state, info = hmc.step(rng_key, state)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(hmc.step)
new_state, info = step(rng_key, state)
Should you need to you can always use the base kernel directly:
.. code::
import blackjax.mcmc.integrators as integrators
kernel = blackjax.hmc.build_kernel(integrators.mclachlan)
state = blackjax.hmc.init(position, logdensity_fn)
state, info = kernel(
rng_key,
state,
logdensity_fn,
step_size,
inverse_mass_matrix,
num_integration_steps,
)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
inverse_mass_matrix
The value to use for the inverse mass matrix when drawing a value for
the momentum and computing the kinetic energy. This argument will be
passed to the ``metrics.default_metric`` function so it supports the
full interface presented there.
num_integration_steps
The number of steps we take with the symplectic integrator at each
sample step before returning a sample.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the
trajectory.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(integrator, divergence_threshold)
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, logdensity_fn)
def step_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
logdensity_fn,
step_size,
inverse_mass_matrix,
num_integration_steps,
)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the (basic) user interface for the HMC kernel.
The general hmc kernel builder (:meth:`blackjax.mcmc.hmc.build_kernel`, alias
`blackjax.hmc.build_kernel`) can be cumbersome to manipulate. Since most users only
need to specify the kernel parameters at initialization time, we provide a helper
function that specializes the general kernel.
We also add the general kernel and state generator as an attribute to this class so
users only need to pass `blackjax.hmc` to SMC, adaptation, etc. algorithms.
Examples
--------
A new HMC kernel can be initialized and used with the following code:
.. code::
hmc = blackjax.hmc(
logdensity_fn, step_size, inverse_mass_matrix, num_integration_steps
)
state = hmc.init(position)
new_state, info = hmc.step(rng_key, state)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(hmc.step)
new_state, info = step(rng_key, state)
Should you need to you can always use the base kernel directly:
.. code::
import blackjax.mcmc.integrators as integrators
kernel = blackjax.hmc.build_kernel(integrators.mclachlan)
state = blackjax.hmc.init(position, logdensity_fn)
state, info = kernel(
rng_key,
state,
logdensity_fn,
step_size,
inverse_mass_matrix,
num_integration_steps,
)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
inverse_mass_matrix
The value to use for the inverse mass matrix when drawing a value for
the momentum and computing the kinetic energy. This argument will be
passed to the ``metrics.default_metric`` function so it supports the
full interface presented there.
num_integration_steps
The number of steps we take with the symplectic integrator at each
sample step before returning a sample.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the
trajectory.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/hmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/hmc.py
|
Apache-2.0
|
def hmc_proposal(
integrator: Callable,
kinetic_energy: metrics.KineticEnergy,
step_size: Union[float, ArrayLikeTree],
num_integration_steps: int = 1,
divergence_threshold: float = 1000,
*,
sample_proposal: Callable = static_binomial_sampling,
) -> Callable:
"""Vanilla HMC algorithm.
The algorithm integrates the trajectory applying a symplectic integrator
`num_integration_steps` times in one direction to get a proposal and uses a
Metropolis-Hastings acceptance step to either reject or accept this
proposal. This is what people usually refer to when they talk about "the
HMC algorithm".
Parameters
----------
integrator
Symplectic integrator used to build the trajectory step by step.
kinetic_energy
Function that computes the kinetic energy.
step_size
Size of the integration step.
num_integration_steps
Number of times we run the symplectic integrator to build the trajectory
divergence_threshold
Threshold above which we say that there is a divergence.
Returns
-------
A kernel that generates a new chain state and information about the transition.
"""
build_trajectory = trajectory.static_integration(integrator)
hmc_energy_fn = hmc_energy(kinetic_energy)
def generate(
rng_key, state: integrators.IntegratorState
) -> tuple[integrators.IntegratorState, HMCInfo, ArrayTree]:
"""Generate a new chain state."""
end_state = build_trajectory(state, step_size, num_integration_steps)
end_state = flip_momentum(end_state)
proposal_energy = hmc_energy_fn(state)
new_energy = hmc_energy_fn(end_state)
delta_energy = safe_energy_diff(proposal_energy, new_energy)
is_diverging = -delta_energy > divergence_threshold
sampled_state, info = sample_proposal(rng_key, delta_energy, state, end_state)
do_accept, p_accept, other_proposal_info = info
info = HMCInfo(
state.momentum,
p_accept,
do_accept,
is_diverging,
new_energy,
end_state,
num_integration_steps,
)
return sampled_state, info, other_proposal_info
return generate
|
Vanilla HMC algorithm.
The algorithm integrates the trajectory applying a symplectic integrator
`num_integration_steps` times in one direction to get a proposal and uses a
Metropolis-Hastings acceptance step to either reject or accept this
proposal. This is what people usually refer to when they talk about "the
HMC algorithm".
Parameters
----------
integrator
Symplectic integrator used to build the trajectory step by step.
kinetic_energy
Function that computes the kinetic energy.
step_size
Size of the integration step.
num_integration_steps
Number of times we run the symplectic integrator to build the trajectory
divergence_threshold
Threshold above which we say that there is a divergence.
Returns
-------
A kernel that generates a new chain state and information about the transition.
|
hmc_proposal
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/hmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/hmc.py
|
Apache-2.0
|
def flip_momentum(
state: integrators.IntegratorState,
) -> integrators.IntegratorState:
"""Flip the momentum at the end of the trajectory.
To guarantee time-reversibility (hence detailed balance) we
need to flip the last state's momentum. If we run the hamiltonian
dynamics starting from the last state with flipped momentum we
should indeed retrieve the initial state (with flipped momentum).
"""
flipped_momentum = jax.tree_util.tree_map(lambda m: -1.0 * m, state.momentum)
return integrators.IntegratorState(
state.position,
flipped_momentum,
state.logdensity,
state.logdensity_grad,
)
|
Flip the momentum at the end of the trajectory.
To guarantee time-reversibility (hence detailed balance) we
need to flip the last state's momentum. If we run the hamiltonian
dynamics starting from the last state with flipped momentum we
should indeed retrieve the initial state (with flipped momentum).
|
flip_momentum
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/hmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/hmc.py
|
Apache-2.0
|
def generalized_two_stage_integrator(
operator1: Callable,
operator2: Callable,
coefficients: list[float],
format_output_fn: Callable = lambda x: x,
):
"""Generalized numerical integrator for solving ODEs.
The generalized integrator performs numerical integration of a ODE system by
alernating between stage 1 and stage 2 updates.
The update scheme is decided by the coefficients, The scheme should be palindromic,
i.e. the coefficients of the update scheme should be symmetric with respect to the
middle of the scheme.
For instance, for *any* differential equation of the form:
.. math:: \\frac{d}{dt}f = (O_1+O_2)f
The velocity_verlet operator can be seen as approximating :math:`e^{\\epsilon(O_1 + O_2)}`
by :math:`e^{\\epsilon O_1/2}e^{\\epsilon O_2}e^{\\epsilon O_1/2}`.
In a standard Hamiltonian, the forms of :math:`e^{\\epsilon O_2}` and
:math:`e^{\\epsilon O_1}` are simple, but for other differential equations,
they may be more complex.
Parameters
----------
operator1
Stage 1 operator, a function that updates the momentum.
operator2
Stage 2 operator, a function that updates the position.
coefficients
Coefficients of the integrator.
format_output_fn
Function that formats the output of the integrator.
Returns
-------
integrator
Integrator function.
"""
def one_step(state: IntegratorState, step_size: float):
position, momentum, _, logdensity_grad = state
# auxiliary infomation generated during integration for diagnostics. It is
# updated by the operator1 and operator2 at each call.
momentum_update_info = None
position_update_info = None
for i, coef in enumerate(coefficients[:-1]):
if i % 2 == 0:
momentum, kinetic_grad, momentum_update_info = operator1(
momentum,
logdensity_grad,
step_size,
coef,
momentum_update_info,
is_last_call=False,
)
else:
(
position,
logdensity,
logdensity_grad,
position_update_info,
) = operator2(
position,
kinetic_grad,
step_size,
coef,
position_update_info,
)
# Separate the last steps to short circuit the computation of the kinetic_grad.
momentum, kinetic_grad, momentum_update_info = operator1(
momentum,
logdensity_grad,
step_size,
coefficients[-1],
momentum_update_info,
is_last_call=True,
)
return format_output_fn(
position,
momentum,
logdensity,
logdensity_grad,
kinetic_grad,
position_update_info,
momentum_update_info,
)
return one_step
|
Generalized numerical integrator for solving ODEs.
The generalized integrator performs numerical integration of a ODE system by
alernating between stage 1 and stage 2 updates.
The update scheme is decided by the coefficients, The scheme should be palindromic,
i.e. the coefficients of the update scheme should be symmetric with respect to the
middle of the scheme.
For instance, for *any* differential equation of the form:
.. math:: \frac{d}{dt}f = (O_1+O_2)f
The velocity_verlet operator can be seen as approximating :math:`e^{\epsilon(O_1 + O_2)}`
by :math:`e^{\epsilon O_1/2}e^{\epsilon O_2}e^{\epsilon O_1/2}`.
In a standard Hamiltonian, the forms of :math:`e^{\epsilon O_2}` and
:math:`e^{\epsilon O_1}` are simple, but for other differential equations,
they may be more complex.
Parameters
----------
operator1
Stage 1 operator, a function that updates the momentum.
operator2
Stage 2 operator, a function that updates the position.
coefficients
Coefficients of the integrator.
format_output_fn
Function that formats the output of the integrator.
Returns
-------
integrator
Integrator function.
|
generalized_two_stage_integrator
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/integrators.py
|
Apache-2.0
|
def generate_euclidean_integrator(coefficients):
"""Generate symplectic integrator for solving a Hamiltonian system.
The resulting integrator is volume-preserve and preserves the symplectic structure
of phase space.
"""
def euclidean_integrator(
logdensity_fn: Callable, kinetic_energy_fn: KineticEnergy
) -> Integrator:
position_update_fn = euclidean_position_update_fn(logdensity_fn)
momentum_update_fn = euclidean_momentum_update_fn(kinetic_energy_fn)
one_step = generalized_two_stage_integrator(
momentum_update_fn,
position_update_fn,
coefficients,
format_output_fn=format_euclidean_state_output,
)
return one_step
return euclidean_integrator
|
Generate symplectic integrator for solving a Hamiltonian system.
The resulting integrator is volume-preserve and preserves the symplectic structure
of phase space.
|
generate_euclidean_integrator
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/integrators.py
|
Apache-2.0
|
def update(
momentum: ArrayTree,
logdensity_grad: ArrayTree,
step_size: float,
coef: float,
previous_kinetic_energy_change=None,
is_last_call=False,
):
"""Momentum update based on Esh dynamics.
The momentum updating map of the esh dynamics as derived in :cite:p:`steeg2021hamiltonian`
There are no exponentials e^delta, which prevents overflows when the gradient norm
is large.
"""
del is_last_call
logdensity_grad = logdensity_grad
flatten_grads, unravel_fn = ravel_pytree(logdensity_grad)
flatten_grads = flatten_grads * sqrt_inverse_mass_matrix
flatten_momentum, _ = ravel_pytree(momentum)
dims = flatten_momentum.shape[0]
normalized_gradient, gradient_norm = _normalized_flatten_array(flatten_grads)
momentum_proj = jnp.dot(flatten_momentum, normalized_gradient)
delta = step_size * coef * gradient_norm / (dims - 1)
zeta = jnp.exp(-delta)
new_momentum_raw = (
normalized_gradient * (1 - zeta) * (1 + zeta + momentum_proj * (1 - zeta))
+ 2 * zeta * flatten_momentum
)
new_momentum_normalized, _ = _normalized_flatten_array(new_momentum_raw)
gr = unravel_fn(new_momentum_normalized * sqrt_inverse_mass_matrix)
next_momentum = unravel_fn(new_momentum_normalized)
kinetic_energy_change = (
delta
- jnp.log(2)
+ jnp.log(1 + momentum_proj + (1 - momentum_proj) * zeta**2)
) * (dims - 1)
if previous_kinetic_energy_change is not None:
kinetic_energy_change += previous_kinetic_energy_change
return next_momentum, gr, kinetic_energy_change
|
Momentum update based on Esh dynamics.
The momentum updating map of the esh dynamics as derived in :cite:p:`steeg2021hamiltonian`
There are no exponentials e^delta, which prevents overflows when the gradient norm
is large.
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/integrators.py
|
Apache-2.0
|
def partially_refresh_momentum(momentum, rng_key, step_size, L):
"""Adds a small noise to momentum and normalizes.
Parameters
----------
rng_key
The pseudo-random number generator key used to generate random numbers.
momentum
PyTree that the structure the output should to match.
step_size
Step size
L
controls rate of momentum change
Returns
-------
momentum with random change in angle
"""
m, unravel_fn = ravel_pytree(momentum)
dim = m.shape[0]
nu = jnp.sqrt((jnp.exp(2 * step_size / L) - 1.0) / dim)
z = nu * normal(rng_key, shape=m.shape, dtype=m.dtype)
new_momentum = unravel_fn((m + z) / jnp.linalg.norm(m + z))
# return new_momentum
return jax.lax.cond(
jnp.isinf(L),
lambda _: momentum,
lambda _: new_momentum,
operand=None,
)
|
Adds a small noise to momentum and normalizes.
Parameters
----------
rng_key
The pseudo-random number generator key used to generate random numbers.
momentum
PyTree that the structure the output should to match.
step_size
Step size
L
controls rate of momentum change
Returns
-------
momentum with random change in angle
|
partially_refresh_momentum
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/integrators.py
|
Apache-2.0
|
def solve_fixed_point_iteration(
func: Callable[[ArrayTree], Tuple[ArrayTree, ArrayTree]],
x0: ArrayTree,
*,
convergence_tol: float = 1e-6,
divergence_tol: float = 1e10,
max_iters: int = 100,
norm_fn: Callable[[ArrayTree], float] = lambda x: jnp.max(jnp.abs(x)),
) -> Tuple[ArrayTree, ArrayTree, FixedPointIterationInfo]:
"""Solve for x = func(x) using a fixed point iteration"""
def compute_norm(x: ArrayTree, xp: ArrayTree) -> float:
return norm_fn(ravel_pytree(jax.tree_util.tree_map(jnp.subtract, x, xp))[0])
def cond_fn(args: Tuple[int, ArrayTree, ArrayTree, float]) -> bool:
n, _, _, norm = args
return (
(n < max_iters)
& jnp.isfinite(norm)
& (norm < divergence_tol)
& (norm > convergence_tol)
)
def body_fn(
args: Tuple[int, ArrayTree, ArrayTree, float]
) -> Tuple[int, ArrayTree, ArrayTree, float]:
n, x, _, _ = args
xn, aux = func(x)
norm = compute_norm(xn, x)
return n + 1, xn, aux, norm
x, aux = func(x0)
iters, x, aux, norm = jax.lax.while_loop(
cond_fn, body_fn, (0, x, aux, compute_norm(x, x0))
)
success = jnp.isfinite(norm) & (norm <= convergence_tol)
return x, aux, FixedPointIterationInfo(success, norm, iters)
|
Solve for x = func(x) using a fixed point iteration
|
solve_fixed_point_iteration
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/integrators.py
|
Apache-2.0
|
def implicit_midpoint(
logdensity_fn: Callable,
kinetic_energy_fn: KineticEnergy,
*,
solver: FixedPointSolver = solve_fixed_point_iteration,
**solver_kwargs: Any,
) -> Integrator:
"""The implicit midpoint integrator with support for non-stationary kinetic energy
This is an integrator based on :cite:t:`brofos2021evaluating`, which provides
support for kinetic energies that depend on position. This integrator requires that
the kinetic energy function takes two arguments: position and momentum.
The ``solver`` parameter allows overloading of the fixed point solver. By default, a
simple fixed point iteration is used, but more advanced solvers could be implemented
in the future.
"""
logdensity_and_grad_fn = jax.value_and_grad(logdensity_fn)
kinetic_energy_grad_fn = jax.grad(
lambda q, p: kinetic_energy_fn(p, position=q), argnums=(0, 1)
)
def one_step(state: IntegratorState, step_size: float) -> IntegratorState:
position, momentum, _, _ = state
def _update(
q: ArrayTree,
p: ArrayTree,
dUdq: ArrayTree,
initial: Tuple[ArrayTree, ArrayTree] = (position, momentum),
) -> Tuple[ArrayTree, ArrayTree]:
dTdq, dHdp = kinetic_energy_grad_fn(q, p)
dHdq = jax.tree_util.tree_map(jnp.subtract, dTdq, dUdq)
# Take a step from the _initial coordinates_ using the gradients of the
# Hamiltonian evaluated at the current guess for the midpoint
q = jax.tree_util.tree_map(
lambda q_, d_: q_ + 0.5 * step_size * d_, initial[0], dHdp
)
p = jax.tree_util.tree_map(
lambda p_, d_: p_ - 0.5 * step_size * d_, initial[1], dHdq
)
return q, p
# Solve for the midpoint numerically
def _step(args: ArrayTree) -> Tuple[ArrayTree, ArrayTree]:
q, p = args
_, dLdq = logdensity_and_grad_fn(q)
return _update(q, p, dLdq), dLdq
(q, p), dLdq, info = solver(_step, (position, momentum), **solver_kwargs)
del info # TODO: Track the returned info
# Take an explicit update as recommended by Brofos & Lederman
_, dLdq = logdensity_and_grad_fn(q)
q, p = _update(q, p, dLdq, initial=(q, p))
return IntegratorState(q, p, *logdensity_and_grad_fn(q))
return one_step
|
The implicit midpoint integrator with support for non-stationary kinetic energy
This is an integrator based on :cite:t:`brofos2021evaluating`, which provides
support for kinetic energies that depend on position. This integrator requires that
the kinetic energy function takes two arguments: position and momentum.
The ``solver`` parameter allows overloading of the fixed point solver. By default, a
simple fixed point iteration is used, but more advanced solvers could be implemented
in the future.
|
implicit_midpoint
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/integrators.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/integrators.py
|
Apache-2.0
|
def build_kernel():
"""Build a MALA kernel.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def transition_energy(state, new_state, step_size):
"""Transition energy to go from `state` to `new_state`"""
theta = jax.tree_util.tree_map(
lambda x, new_x, g: x - new_x - step_size * g,
state.position,
new_state.position,
new_state.logdensity_grad,
)
theta_dot = jax.tree_util.tree_reduce(
operator.add, jax.tree_util.tree_map(lambda x: jnp.sum(x * x), theta)
)
return -new_state.logdensity + 0.25 * (1.0 / step_size) * theta_dot
compute_acceptance_ratio = proposal.compute_asymmetric_acceptance_ratio(
transition_energy
)
sample_proposal = proposal.static_binomial_sampling
def kernel(
rng_key: PRNGKey, state: MALAState, logdensity_fn: Callable, step_size: float
) -> tuple[MALAState, MALAInfo]:
"""Generate a new sample with the MALA kernel."""
grad_fn = jax.value_and_grad(logdensity_fn)
integrator = diffusions.overdamped_langevin(grad_fn)
key_integrator, key_rmh = jax.random.split(rng_key)
new_state = integrator(key_integrator, state, step_size)
new_state = MALAState(*new_state)
log_p_accept = compute_acceptance_ratio(state, new_state, step_size=step_size)
accepted_state, info = sample_proposal(key_rmh, log_p_accept, state, new_state)
do_accept, p_accept, _ = info
info = MALAInfo(p_accept, do_accept)
return accepted_state, info
return kernel
|
Build a MALA kernel.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/mala.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/mala.py
|
Apache-2.0
|
def transition_energy(state, new_state, step_size):
"""Transition energy to go from `state` to `new_state`"""
theta = jax.tree_util.tree_map(
lambda x, new_x, g: x - new_x - step_size * g,
state.position,
new_state.position,
new_state.logdensity_grad,
)
theta_dot = jax.tree_util.tree_reduce(
operator.add, jax.tree_util.tree_map(lambda x: jnp.sum(x * x), theta)
)
return -new_state.logdensity + 0.25 * (1.0 / step_size) * theta_dot
|
Transition energy to go from `state` to `new_state`
|
transition_energy
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/mala.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/mala.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey, state: MALAState, logdensity_fn: Callable, step_size: float
) -> tuple[MALAState, MALAInfo]:
"""Generate a new sample with the MALA kernel."""
grad_fn = jax.value_and_grad(logdensity_fn)
integrator = diffusions.overdamped_langevin(grad_fn)
key_integrator, key_rmh = jax.random.split(rng_key)
new_state = integrator(key_integrator, state, step_size)
new_state = MALAState(*new_state)
log_p_accept = compute_acceptance_ratio(state, new_state, step_size=step_size)
accepted_state, info = sample_proposal(key_rmh, log_p_accept, state, new_state)
do_accept, p_accept, _ = info
info = MALAInfo(p_accept, do_accept)
return accepted_state, info
|
Generate a new sample with the MALA kernel.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/mala.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/mala.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
step_size: float,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the MALA kernel.
The general mala kernel builder (:meth:`blackjax.mcmc.mala.build_kernel`, alias `blackjax.mala.build_kernel`) can be
cumbersome to manipulate. Since most users only need to specify the kernel
parameters at initialization time, we provide a helper function that
specializes the general kernel.
We also add the general kernel and state generator as an attribute to this class so
users only need to pass `blackjax.mala` to SMC, adaptation, etc. algorithms.
Examples
--------
A new MALA kernel can be initialized and used with the following code:
.. code::
mala = blackjax.mala(logdensity_fn, step_size)
state = mala.init(position)
new_state, info = mala.step(rng_key, state)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(mala.step)
new_state, info = step(rng_key, state)
Should you need to you can always use the base kernel directly:
.. code::
kernel = blackjax.mala.build_kernel(logdensity_fn)
state = blackjax.mala.init(position, logdensity_fn)
state, info = kernel(rng_key, state, logdensity_fn, step_size)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel()
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, logdensity_fn)
def step_fn(rng_key: PRNGKey, state):
return kernel(rng_key, state, logdensity_fn, step_size)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the (basic) user interface for the MALA kernel.
The general mala kernel builder (:meth:`blackjax.mcmc.mala.build_kernel`, alias `blackjax.mala.build_kernel`) can be
cumbersome to manipulate. Since most users only need to specify the kernel
parameters at initialization time, we provide a helper function that
specializes the general kernel.
We also add the general kernel and state generator as an attribute to this class so
users only need to pass `blackjax.mala` to SMC, adaptation, etc. algorithms.
Examples
--------
A new MALA kernel can be initialized and used with the following code:
.. code::
mala = blackjax.mala(logdensity_fn, step_size)
state = mala.init(position)
new_state, info = mala.step(rng_key, state)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(mala.step)
new_state, info = step(rng_key, state)
Should you need to you can always use the base kernel directly:
.. code::
kernel = blackjax.mala.build_kernel(logdensity_fn)
state = blackjax.mala.init(position, logdensity_fn)
state, info = kernel(rng_key, state, logdensity_fn, step_size)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/mala.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/mala.py
|
Apache-2.0
|
def svd_from_covariance(covariance: Array) -> CovarianceSVD:
"""Compute the singular value decomposition of the covariance matrix.
Parameters
----------
covariance
The covariance matrix.
Returns
-------
A ``CovarianceSVD`` object.
"""
U, Gamma, U_t = jnp.linalg.svd(covariance, hermitian=True)
return CovarianceSVD(U, Gamma, U_t)
|
Compute the singular value decomposition of the covariance matrix.
Parameters
----------
covariance
The covariance matrix.
Returns
-------
A ``CovarianceSVD`` object.
|
svd_from_covariance
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/marginal_latent_gaussian.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/marginal_latent_gaussian.py
|
Apache-2.0
|
def generate_mean_shifted_logprob(logdensity_fn, mean, covariance):
"""Generate a log-density function that is shifted by a constant
Parameters
----------
logdensity_fn
The original log-density function
mean
The mean of the prior Gaussian density
covariance
The covariance of the prior Gaussian density.
Returns
-------
A log-density function that is shifted by a constant
"""
shift = linalg.solve(covariance, mean, assume_a="pos")
def shifted_logdensity_fn(x):
return logdensity_fn(x) + jnp.dot(x, shift)
return shifted_logdensity_fn
|
Generate a log-density function that is shifted by a constant
Parameters
----------
logdensity_fn
The original log-density function
mean
The mean of the prior Gaussian density
covariance
The covariance of the prior Gaussian density.
Returns
-------
A log-density function that is shifted by a constant
|
generate_mean_shifted_logprob
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/marginal_latent_gaussian.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/marginal_latent_gaussian.py
|
Apache-2.0
|
def init(position, logdensity_fn, U_t):
"""Initialize the marginal version of the auxiliary gradient-based sampler.
Parameters
----------
position
The initial position of the chain.
logdensity_fn
The logarithm of the likelihood function for the latent Gaussian model.
U_t
The unitary array of the covariance matrix.
"""
logdensity, logdensity_grad = jax.value_and_grad(logdensity_fn)(position)
return MarginalState(
position, logdensity, logdensity_grad, U_t @ position, U_t @ logdensity_grad
)
|
Initialize the marginal version of the auxiliary gradient-based sampler.
Parameters
----------
position
The initial position of the chain.
logdensity_fn
The logarithm of the likelihood function for the latent Gaussian model.
U_t
The unitary array of the covariance matrix.
|
init
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/marginal_latent_gaussian.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/marginal_latent_gaussian.py
|
Apache-2.0
|
def build_kernel(cov_svd: CovarianceSVD):
"""Build the marginal version of the auxiliary gradient-based sampler.
Parameters
----------
cov_svd
The singular value decomposition of the covariance matrix.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
U, Gamma, U_t = cov_svd
def kernel(key: PRNGKey, state: MarginalState, logdensity_fn, delta):
y_key, u_key = jax.random.split(key, 2)
position, logdensity, logdensity_grad, U_x, U_grad_x = state
# Update Gamma(delta)
# TODO: Ideally, we could have a dichotomy, where we only update Gamma(delta) if delta changes,
# but this is hardly the most expensive part of the algorithm (the multiplication by U below is).
Gamma_1 = Gamma * delta / (delta + 2 * Gamma)
Gamma_3 = (delta + 2 * Gamma) / (delta + 4 * Gamma)
Gamma_2 = Gamma_1 / Gamma_3
# Propose a new y
temp = Gamma_1 * (U_x / (0.5 * delta) + U_grad_x)
temp = temp + jnp.sqrt(Gamma_2) * jax.random.normal(y_key, position.shape)
y = U @ temp
# Bookkeeping
log_p_y, grad_y = jax.value_and_grad(logdensity_fn)(y)
U_y = U_t @ y
U_grad_y = U_t @ grad_y
# Acceptance step
temp_x = Gamma_1 * (U_x / (0.5 * delta) + 0.5 * U_grad_x)
temp_y = Gamma_1 * (U_y / (0.5 * delta) + 0.5 * U_grad_y)
hxy = jnp.dot(U_x - temp_y, Gamma_3 * U_grad_y)
hyx = jnp.dot(U_y - temp_x, Gamma_3 * U_grad_x)
log_p_accept = log_p_y - logdensity + hxy - hyx
proposed_state = MarginalState(y, log_p_y, grad_y, U_y, U_grad_y)
accepted_state, info = static_binomial_sampling(
u_key, log_p_accept, state, proposed_state
)
do_accept, p_accept, _ = info
info = MarginalInfo(p_accept, do_accept, proposed_state)
return accepted_state, info
return kernel
|
Build the marginal version of the auxiliary gradient-based sampler.
Parameters
----------
cov_svd
The singular value decomposition of the covariance matrix.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/marginal_latent_gaussian.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/marginal_latent_gaussian.py
|
Apache-2.0
|
def build_kernel(
logdensity_fn,
inverse_mass_matrix,
integrator,
desired_energy_var_max_ratio=jnp.inf,
desired_energy_var=5e-4,
):
"""Build a HMC kernel.
Parameters
----------
integrator
The symplectic integrator to use to integrate the Hamiltonian dynamics.
L
the momentum decoherence rate.
step_size
step size of the integrator.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
step = with_isokinetic_maruyama(
integrator(logdensity_fn=logdensity_fn, inverse_mass_matrix=inverse_mass_matrix)
)
def kernel(
rng_key: PRNGKey, state: IntegratorState, L: float, step_size: float
) -> tuple[IntegratorState, MCLMCInfo]:
(position, momentum, logdensity, logdensitygrad), kinetic_change = step(
state, step_size, L, rng_key
)
energy_error = kinetic_change - logdensity + state.logdensity
eev_max_per_dim = desired_energy_var_max_ratio * desired_energy_var
ndims = pytree_size(position)
new_state, new_info = jax.lax.cond(
jnp.abs(energy_error) > jnp.sqrt(ndims * eev_max_per_dim),
lambda: (
state,
MCLMCInfo(
logdensity=state.logdensity,
energy_change=0.0,
kinetic_change=0.0,
),
),
lambda: (
IntegratorState(position, momentum, logdensity, logdensitygrad),
MCLMCInfo(
logdensity=logdensity,
energy_change=energy_error,
kinetic_change=kinetic_change,
),
),
)
return new_state, new_info
return kernel
|
Build a HMC kernel.
Parameters
----------
integrator
The symplectic integrator to use to integrate the Hamiltonian dynamics.
L
the momentum decoherence rate.
step_size
step size of the integrator.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/mclmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/mclmc.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
L,
step_size,
integrator=isokinetic_mclachlan,
inverse_mass_matrix=1.0,
desired_energy_var_max_ratio=jnp.inf,
) -> SamplingAlgorithm:
"""The general mclmc kernel builder (:meth:`blackjax.mcmc.mclmc.build_kernel`, alias `blackjax.mclmc.build_kernel`) can be
cumbersome to manipulate. Since most users only need to specify the kernel
parameters at initialization time, we provide a helper function that
specializes the general kernel.
We also add the general kernel and state generator as an attribute to this class so
users only need to pass `blackjax.mclmc` to SMC, adaptation, etc. algorithms.
Examples
--------
A new mclmc kernel can be initialized and used with the following code:
.. code::
mclmc = blackjax.mcmc.mclmc.mclmc(
logdensity_fn=logdensity_fn,
L=L,
step_size=step_size
)
state = mclmc.init(position)
new_state, info = mclmc.step(rng_key, state)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(mclmc.step)
new_state, info = step(rng_key, state)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
L
the momentum decoherence rate
step_size
step size of the integrator
integrator
an integrator. We recommend using the default here.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(
logdensity_fn,
inverse_mass_matrix,
integrator,
desired_energy_var_max_ratio=desired_energy_var_max_ratio,
)
def init_fn(position: ArrayLike, rng_key: PRNGKey):
return init(position, logdensity_fn, rng_key)
def update_fn(rng_key, state):
return kernel(rng_key, state, L, step_size)
return SamplingAlgorithm(init_fn, update_fn)
|
The general mclmc kernel builder (:meth:`blackjax.mcmc.mclmc.build_kernel`, alias `blackjax.mclmc.build_kernel`) can be
cumbersome to manipulate. Since most users only need to specify the kernel
parameters at initialization time, we provide a helper function that
specializes the general kernel.
We also add the general kernel and state generator as an attribute to this class so
users only need to pass `blackjax.mclmc` to SMC, adaptation, etc. algorithms.
Examples
--------
A new mclmc kernel can be initialized and used with the following code:
.. code::
mclmc = blackjax.mcmc.mclmc.mclmc(
logdensity_fn=logdensity_fn,
L=L,
step_size=step_size
)
state = mclmc.init(position)
new_state, info = mclmc.step(rng_key, state)
Kernels are not jit-compiled by default so you will need to do it manually:
.. code::
step = jax.jit(mclmc.step)
new_state, info = step(rng_key, state)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
L
the momentum decoherence rate
step_size
step size of the integrator
integrator
an integrator. We recommend using the default here.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/mclmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/mclmc.py
|
Apache-2.0
|
def default_metric(metric: MetricTypes) -> Metric:
"""Convert an input metric into a ``Metric`` object following sensible default rules
The metric can be specified in three different ways:
- A ``Metric`` object that implements the full interface
- An ``Array`` which is assumed to specify the inverse mass matrix of a static
metric
- A function that takes a coordinate position and returns the mass matrix at that
location
"""
if isinstance(metric, Metric):
return metric
# If the argument is a callable, we assume that it returns the mass matrix
# at the given position and return the corresponding Riemannian metric.
if callable(metric):
return gaussian_riemannian(metric)
# If we make it here then the argument should be an array, and we'll assume
# that it specifies a static inverse mass matrix.
return gaussian_euclidean(metric)
|
Convert an input metric into a ``Metric`` object following sensible default rules
The metric can be specified in three different ways:
- A ``Metric`` object that implements the full interface
- An ``Array`` which is assumed to specify the inverse mass matrix of a static
metric
- A function that takes a coordinate position and returns the mass matrix at that
location
|
default_metric
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/metrics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/metrics.py
|
Apache-2.0
|
def gaussian_euclidean(
inverse_mass_matrix: Array,
) -> Metric:
r"""Hamiltonian dynamic on euclidean manifold with normally-distributed momentum
:cite:p:`betancourt2013general`.
The gaussian euclidean metric is a euclidean metric further characterized
by setting the conditional probability density :math:`\pi(momentum|position)`
to follow a standard gaussian distribution. A Newtonian hamiltonian
dynamics is assumed.
Parameters
----------
inverse_mass_matrix
One or two-dimensional array corresponding respectively to a diagonal
or dense mass matrix. The inverse mass matrix is multiplied to a
flattened version of the Pytree in which the chain position is stored
(the current value of the random variables). The order of the variables
should thus match JAX's tree flattening order, and more specifically
that of `ravel_pytree`.
In particular, JAX sorts dictionaries by key when flattening them. The
value of each variables will appear in the flattened Pytree following
the order given by `sort(keys)`.
Returns
-------
momentum_generator
A function that generates a value for the momentum at random.
kinetic_energy
A function that returns the kinetic energy given the momentum.
is_turning
A function that determines whether a trajectory is turning back on
itself given the values of the momentum along the trajectory.
"""
mass_matrix_sqrt, inv_mass_matrix_sqrt, diag = _format_covariance(
inverse_mass_matrix, is_inv=True
)
def momentum_generator(rng_key: PRNGKey, position: ArrayLikeTree) -> ArrayTree:
return generate_gaussian_noise(rng_key, position, sigma=mass_matrix_sqrt)
def kinetic_energy(
momentum: ArrayLikeTree, position: Optional[ArrayLikeTree] = None
) -> Numeric:
del position
momentum, _ = ravel_pytree(momentum)
velocity = linear_map(inverse_mass_matrix, momentum)
kinetic_energy_val = 0.5 * jnp.dot(velocity, momentum)
return kinetic_energy_val
def is_turning(
momentum_left: ArrayLikeTree,
momentum_right: ArrayLikeTree,
momentum_sum: ArrayLikeTree,
position_left: Optional[ArrayLikeTree] = None,
position_right: Optional[ArrayLikeTree] = None,
) -> bool:
"""Generalized U-turn criterion :cite:p:`betancourt2013generalizing,nuts_uturn`.
Parameters
----------
momentum_left
Momentum of the leftmost point of the trajectory.
momentum_right
Momentum of the rightmost point of the trajectory.
momentum_sum
Sum of the momenta along the trajectory.
"""
del position_left, position_right
m_left, _ = ravel_pytree(momentum_left)
m_right, _ = ravel_pytree(momentum_right)
m_sum, _ = ravel_pytree(momentum_sum)
velocity_left = linear_map(inverse_mass_matrix, m_left)
velocity_right = linear_map(inverse_mass_matrix, m_right)
# rho = m_sum
rho = m_sum - (m_right + m_left) / 2
turning_at_left = jnp.dot(velocity_left, rho) <= 0
turning_at_right = jnp.dot(velocity_right, rho) <= 0
return turning_at_left | turning_at_right
def scale(
position: ArrayLikeTree,
element: ArrayLikeTree,
*,
inv: bool,
trans: bool,
) -> ArrayLikeTree:
"""Scale elements by the mass matrix.
Parameters
----------
position
The current position. Not used in this metric.
elements
Elements to scale
inv
Whether to scale the elements by the inverse mass matrix or the mass matrix.
If True, the element is scaled by the inverse square root mass matrix, i.e., elem <- (M^{1/2})^{-1} elem.
trans
whether to transpose mass matrix when scaling
Returns
-------
scaled_elements
The scaled elements.
"""
ravelled_element, unravel_fn = ravel_pytree(element)
if inv:
left_hand_side_matrix = inv_mass_matrix_sqrt
else:
left_hand_side_matrix = mass_matrix_sqrt
if trans:
left_hand_side_matrix = left_hand_side_matrix.T
scaled = linear_map(left_hand_side_matrix, ravelled_element)
return unravel_fn(scaled)
return Metric(momentum_generator, kinetic_energy, is_turning, scale)
|
Hamiltonian dynamic on euclidean manifold with normally-distributed momentum
:cite:p:`betancourt2013general`.
The gaussian euclidean metric is a euclidean metric further characterized
by setting the conditional probability density :math:`\pi(momentum|position)`
to follow a standard gaussian distribution. A Newtonian hamiltonian
dynamics is assumed.
Parameters
----------
inverse_mass_matrix
One or two-dimensional array corresponding respectively to a diagonal
or dense mass matrix. The inverse mass matrix is multiplied to a
flattened version of the Pytree in which the chain position is stored
(the current value of the random variables). The order of the variables
should thus match JAX's tree flattening order, and more specifically
that of `ravel_pytree`.
In particular, JAX sorts dictionaries by key when flattening them. The
value of each variables will appear in the flattened Pytree following
the order given by `sort(keys)`.
Returns
-------
momentum_generator
A function that generates a value for the momentum at random.
kinetic_energy
A function that returns the kinetic energy given the momentum.
is_turning
A function that determines whether a trajectory is turning back on
itself given the values of the momentum along the trajectory.
|
gaussian_euclidean
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/metrics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/metrics.py
|
Apache-2.0
|
def is_turning(
momentum_left: ArrayLikeTree,
momentum_right: ArrayLikeTree,
momentum_sum: ArrayLikeTree,
position_left: Optional[ArrayLikeTree] = None,
position_right: Optional[ArrayLikeTree] = None,
) -> bool:
"""Generalized U-turn criterion :cite:p:`betancourt2013generalizing,nuts_uturn`.
Parameters
----------
momentum_left
Momentum of the leftmost point of the trajectory.
momentum_right
Momentum of the rightmost point of the trajectory.
momentum_sum
Sum of the momenta along the trajectory.
"""
del position_left, position_right
m_left, _ = ravel_pytree(momentum_left)
m_right, _ = ravel_pytree(momentum_right)
m_sum, _ = ravel_pytree(momentum_sum)
velocity_left = linear_map(inverse_mass_matrix, m_left)
velocity_right = linear_map(inverse_mass_matrix, m_right)
# rho = m_sum
rho = m_sum - (m_right + m_left) / 2
turning_at_left = jnp.dot(velocity_left, rho) <= 0
turning_at_right = jnp.dot(velocity_right, rho) <= 0
return turning_at_left | turning_at_right
|
Generalized U-turn criterion :cite:p:`betancourt2013generalizing,nuts_uturn`.
Parameters
----------
momentum_left
Momentum of the leftmost point of the trajectory.
momentum_right
Momentum of the rightmost point of the trajectory.
momentum_sum
Sum of the momenta along the trajectory.
|
is_turning
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/metrics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/metrics.py
|
Apache-2.0
|
def scale(
position: ArrayLikeTree,
element: ArrayLikeTree,
*,
inv: bool,
trans: bool,
) -> ArrayLikeTree:
"""Scale elements by the mass matrix.
Parameters
----------
position
The current position. Not used in this metric.
elements
Elements to scale
inv
Whether to scale the elements by the inverse mass matrix or the mass matrix.
If True, the element is scaled by the inverse square root mass matrix, i.e., elem <- (M^{1/2})^{-1} elem.
trans
whether to transpose mass matrix when scaling
Returns
-------
scaled_elements
The scaled elements.
"""
ravelled_element, unravel_fn = ravel_pytree(element)
if inv:
left_hand_side_matrix = inv_mass_matrix_sqrt
else:
left_hand_side_matrix = mass_matrix_sqrt
if trans:
left_hand_side_matrix = left_hand_side_matrix.T
scaled = linear_map(left_hand_side_matrix, ravelled_element)
return unravel_fn(scaled)
|
Scale elements by the mass matrix.
Parameters
----------
position
The current position. Not used in this metric.
elements
Elements to scale
inv
Whether to scale the elements by the inverse mass matrix or the mass matrix.
If True, the element is scaled by the inverse square root mass matrix, i.e., elem <- (M^{1/2})^{-1} elem.
trans
whether to transpose mass matrix when scaling
Returns
-------
scaled_elements
The scaled elements.
|
scale
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/metrics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/metrics.py
|
Apache-2.0
|
def scale(
position: ArrayLikeTree,
element: ArrayLikeTree,
*,
inv: bool,
trans: bool,
) -> ArrayLikeTree:
"""Scale elements by the mass matrix.
Parameters
----------
position
The current position.
Returns
-------
scaled_elements
The scaled elements.
"""
mass_matrix = mass_matrix_fn(position)
mass_matrix_sqrt, inv_mass_matrix_sqrt, diag = _format_covariance(
mass_matrix, is_inv=False
)
ravelled_element, unravel_fn = ravel_pytree(element)
if inv:
left_hand_side_matrix = inv_mass_matrix_sqrt
else:
left_hand_side_matrix = mass_matrix_sqrt
if trans:
left_hand_side_matrix = left_hand_side_matrix.T
scaled = linear_map(left_hand_side_matrix, ravelled_element)
return unravel_fn(scaled)
|
Scale elements by the mass matrix.
Parameters
----------
position
The current position.
Returns
-------
scaled_elements
The scaled elements.
|
scale
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/metrics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/metrics.py
|
Apache-2.0
|
def build_kernel(
integrator: Callable = integrators.velocity_verlet,
divergence_threshold: int = 1000,
):
"""Build an iterative NUTS kernel.
This algorithm is an iteration on the original NUTS algorithm :cite:p:`hoffman2014no`
with two major differences:
- We do not use slice samplig but multinomial sampling for the proposal
:cite:p:`betancourt2017conceptual`;
- The trajectory expansion is not recursive but iterative :cite:p:`phan2019composable`,
:cite:p:`lao2020tfp`.
The implementation can seem unusual for those familiar with similar
algorithms. Indeed, we do not conceptualize the trajectory construction as
building a tree. We feel that the tree lingo, inherited from the recursive
version, is unnecessarily complicated and hides the more general concepts
upon which the NUTS algorithm is built.
NUTS, in essence, consists in sampling a trajectory by iteratively choosing
a direction at random and integrating in this direction a number of times
that doubles at every step. From this trajectory we continuously sample a
proposal. When the trajectory turns on itself or when we have reached the
maximum trajectory length we return the current proposal.
Parameters
----------
integrator
The simplectic integrator used to build trajectories.
divergence_threshold
The absolute difference in energy above which we consider
a transition "divergent".
"""
def kernel(
rng_key: PRNGKey,
state: hmc.HMCState,
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: metrics.MetricTypes,
max_num_doublings: int = 10,
) -> tuple[hmc.HMCState, NUTSInfo]:
"""Generate a new sample with the NUTS kernel."""
metric = metrics.default_metric(inverse_mass_matrix)
symplectic_integrator = integrator(logdensity_fn, metric.kinetic_energy)
proposal_generator = iterative_nuts_proposal(
symplectic_integrator,
metric.kinetic_energy,
metric.check_turning,
max_num_doublings,
divergence_threshold,
)
key_momentum, key_integrator = jax.random.split(rng_key, 2)
position, logdensity, logdensity_grad = state
momentum = metric.sample_momentum(key_momentum, position)
integrator_state = integrators.IntegratorState(
position, momentum, logdensity, logdensity_grad
)
proposal, info = proposal_generator(key_integrator, integrator_state, step_size)
proposal = hmc.HMCState(
proposal.position, proposal.logdensity, proposal.logdensity_grad
)
return proposal, info
return kernel
|
Build an iterative NUTS kernel.
This algorithm is an iteration on the original NUTS algorithm :cite:p:`hoffman2014no`
with two major differences:
- We do not use slice samplig but multinomial sampling for the proposal
:cite:p:`betancourt2017conceptual`;
- The trajectory expansion is not recursive but iterative :cite:p:`phan2019composable`,
:cite:p:`lao2020tfp`.
The implementation can seem unusual for those familiar with similar
algorithms. Indeed, we do not conceptualize the trajectory construction as
building a tree. We feel that the tree lingo, inherited from the recursive
version, is unnecessarily complicated and hides the more general concepts
upon which the NUTS algorithm is built.
NUTS, in essence, consists in sampling a trajectory by iteratively choosing
a direction at random and integrating in this direction a number of times
that doubles at every step. From this trajectory we continuously sample a
proposal. When the trajectory turns on itself or when we have reached the
maximum trajectory length we return the current proposal.
Parameters
----------
integrator
The simplectic integrator used to build trajectories.
divergence_threshold
The absolute difference in energy above which we consider
a transition "divergent".
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/nuts.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/nuts.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: hmc.HMCState,
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: metrics.MetricTypes,
max_num_doublings: int = 10,
) -> tuple[hmc.HMCState, NUTSInfo]:
"""Generate a new sample with the NUTS kernel."""
metric = metrics.default_metric(inverse_mass_matrix)
symplectic_integrator = integrator(logdensity_fn, metric.kinetic_energy)
proposal_generator = iterative_nuts_proposal(
symplectic_integrator,
metric.kinetic_energy,
metric.check_turning,
max_num_doublings,
divergence_threshold,
)
key_momentum, key_integrator = jax.random.split(rng_key, 2)
position, logdensity, logdensity_grad = state
momentum = metric.sample_momentum(key_momentum, position)
integrator_state = integrators.IntegratorState(
position, momentum, logdensity, logdensity_grad
)
proposal, info = proposal_generator(key_integrator, integrator_state, step_size)
proposal = hmc.HMCState(
proposal.position, proposal.logdensity, proposal.logdensity_grad
)
return proposal, info
|
Generate a new sample with the NUTS kernel.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/nuts.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/nuts.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: metrics.MetricTypes,
*,
max_num_doublings: int = 10,
divergence_threshold: int = 1000,
integrator: Callable = integrators.velocity_verlet,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the nuts kernel.
Examples
--------
A new NUTS kernel can be initialized and used with the following code:
.. code::
nuts = blackjax.nuts(logdensity_fn, step_size, inverse_mass_matrix)
state = nuts.init(position)
new_state, info = nuts.step(rng_key, state)
We can JIT-compile the step function for more speed:
.. code::
step = jax.jit(nuts.step)
new_state, info = step(rng_key, state)
You can always use the base kernel should you need to:
.. code::
import blackjax.mcmc.integrators as integrators
kernel = blackjax.nuts.build_kernel(integrators.yoshida)
state = blackjax.nuts.init(position, logdensity_fn)
state, info = kernel(rng_key, state, logdensity_fn, step_size, inverse_mass_matrix)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
inverse_mass_matrix
The value to use for the inverse mass matrix when drawing a value for
the momentum and computing the kinetic energy.
max_num_doublings
The maximum number of times we double the length of the trajectory before
returning if no U-turn has been obserbed or no divergence has occured.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the trajectory.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(integrator, divergence_threshold)
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, logdensity_fn)
def step_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
logdensity_fn,
step_size,
inverse_mass_matrix,
max_num_doublings,
)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the (basic) user interface for the nuts kernel.
Examples
--------
A new NUTS kernel can be initialized and used with the following code:
.. code::
nuts = blackjax.nuts(logdensity_fn, step_size, inverse_mass_matrix)
state = nuts.init(position)
new_state, info = nuts.step(rng_key, state)
We can JIT-compile the step function for more speed:
.. code::
step = jax.jit(nuts.step)
new_state, info = step(rng_key, state)
You can always use the base kernel should you need to:
.. code::
import blackjax.mcmc.integrators as integrators
kernel = blackjax.nuts.build_kernel(integrators.yoshida)
state = blackjax.nuts.init(position, logdensity_fn)
state, info = kernel(rng_key, state, logdensity_fn, step_size, inverse_mass_matrix)
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
inverse_mass_matrix
The value to use for the inverse mass matrix when drawing a value for
the momentum and computing the kinetic energy.
max_num_doublings
The maximum number of times we double the length of the trajectory before
returning if no U-turn has been obserbed or no divergence has occured.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the trajectory.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/nuts.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/nuts.py
|
Apache-2.0
|
def iterative_nuts_proposal(
integrator: Callable,
kinetic_energy: metrics.KineticEnergy,
uturn_check_fn: metrics.CheckTurning,
max_num_expansions: int = 10,
divergence_threshold: float = 1000,
) -> Callable:
"""Iterative NUTS proposal.
Parameters
----------
integrator
Symplectic integrator used to build the trajectory step by step.
kinetic_energy
Function that computes the kinetic energy.
uturn_check_fn:
Function that determines whether the trajectory is turning on itself
(metric-dependant).
step_size
Size of the integration step.
max_num_expansions
The number of sub-trajectory samples we take to build the trajectory.
divergence_threshold
Threshold above which we say that there is a divergence.
Returns
-------
A kernel that generates a new chain state and information about the
transition.
"""
(
new_termination_state,
update_termination_state,
is_criterion_met,
) = termination.iterative_uturn_numpyro(uturn_check_fn)
trajectory_integrator = trajectory.dynamic_progressive_integration(
integrator,
kinetic_energy,
update_termination_state,
is_criterion_met,
divergence_threshold,
)
expand = trajectory.dynamic_multiplicative_expansion(
trajectory_integrator,
uturn_check_fn,
max_num_expansions,
)
def _compute_energy(state: integrators.IntegratorState) -> float:
energy = -state.logdensity + kinetic_energy(state.momentum)
return energy
def propose(rng_key, initial_state: integrators.IntegratorState, step_size):
initial_termination_state = new_termination_state(
initial_state, max_num_expansions
)
initial_energy = _compute_energy(initial_state) # H0 of the HMC step
initial_proposal = proposal.Proposal(
initial_state, initial_energy, 0.0, -np.inf
)
initial_trajectory = trajectory.Trajectory(
initial_state,
initial_state,
initial_state.momentum,
0,
)
initial_expansion_state = trajectory.DynamicExpansionState(
0, initial_proposal, initial_trajectory, initial_termination_state
)
expansion_state, info = expand(
rng_key, initial_expansion_state, initial_energy, step_size
)
is_diverging, is_turning = info
num_doublings, sampled_proposal, new_trajectory, _ = expansion_state
# Compute average acceptance probabilty across entire trajectory,
# even over subtrees that may have been rejected
acceptance_rate = (
jnp.exp(sampled_proposal.sum_log_p_accept) / new_trajectory.num_states
)
info = NUTSInfo(
initial_state.momentum,
is_diverging,
is_turning,
sampled_proposal.energy,
new_trajectory.leftmost_state,
new_trajectory.rightmost_state,
num_doublings,
new_trajectory.num_states,
acceptance_rate,
)
return sampled_proposal.state, info
return propose
|
Iterative NUTS proposal.
Parameters
----------
integrator
Symplectic integrator used to build the trajectory step by step.
kinetic_energy
Function that computes the kinetic energy.
uturn_check_fn:
Function that determines whether the trajectory is turning on itself
(metric-dependant).
step_size
Size of the integration step.
max_num_expansions
The number of sub-trajectory samples we take to build the trajectory.
divergence_threshold
Threshold above which we say that there is a divergence.
Returns
-------
A kernel that generates a new chain state and information about the
transition.
|
iterative_nuts_proposal
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/nuts.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/nuts.py
|
Apache-2.0
|
def init(
position: ArrayLikeTree, logdensity_fn: Callable, period: int
) -> PeriodicOrbitalState:
"""Create a periodic orbital state from a position.
Parameters
----------
position
the current values of the random variables whose posterior we want to
sample from. Can be anything from a list, a (named) tuple or a dict of
arrays. The arrays can either be Numpy or JAX arrays.
logdensity_fn
a function that returns the value of the log posterior when called
with a position.
period
the number of steps used to build the orbit
Returns
-------
A periodic orbital state that repeats the same position for `period` times,
sets equal weights to all positions, assigns to each position a direction from
0 to period-1, calculates the potential energies for each position and its
gradient.
"""
positions = jax.tree_util.tree_map(
lambda position: jnp.array([position for _ in range(period)]), position
)
weights = jnp.array([1 / period for _ in range(period)])
directions = jnp.arange(period)
logdensities, logdensities_grad = jax.vmap(jax.value_and_grad(logdensity_fn))(
positions
)
return PeriodicOrbitalState(
positions, weights, directions, logdensities, logdensities_grad
)
|
Create a periodic orbital state from a position.
Parameters
----------
position
the current values of the random variables whose posterior we want to
sample from. Can be anything from a list, a (named) tuple or a dict of
arrays. The arrays can either be Numpy or JAX arrays.
logdensity_fn
a function that returns the value of the log posterior when called
with a position.
period
the number of steps used to build the orbit
Returns
-------
A periodic orbital state that repeats the same position for `period` times,
sets equal weights to all positions, assigns to each position a direction from
0 to period-1, calculates the potential energies for each position and its
gradient.
|
init
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/periodic_orbital.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/periodic_orbital.py
|
Apache-2.0
|
def build_kernel(
bijection: Callable = integrators.velocity_verlet,
):
"""Build a Periodic Orbital kernel :cite:p:`neklyudov2022orbital`.
Parameters
----------
bijection
transformation used to build the orbit (given a step size).
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def kernel(
rng_key: PRNGKey,
state: PeriodicOrbitalState,
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: Array,
period: int,
) -> tuple[PeriodicOrbitalState, PeriodicOrbitalInfo]:
"""Generate a new orbit with the Periodic Orbital kernel.
Choose a step from the orbit with probability proportional to its weights.
Then shift the direction (or alternatively sample a new direction randomly),
in order to make the algorithm irreversible, and compute a new orbit from
the selected step and its direction.
Parameters
----------
rng_key
pseudo random number generating key.
state
initial orbit.
logdensity_fn
log probability function we wish to sample from.
step_size
space between steps of the orbit.
inverse_mass_matrix
or a 1D array containing elements of its diagonal.
period
total steps used to build the orbit.
Returns
-------
A kernel that chooses a step from the orbit and outputs a periodic orbital
state and information about the iteration.
"""
momentum_generator, kinetic_energy_fn, *_ = metrics.gaussian_euclidean(
inverse_mass_matrix
)
bijection_fn = bijection(logdensity_fn, kinetic_energy_fn)
proposal_generator = periodic_orbital_proposal(
bijection_fn, kinetic_energy_fn, period, step_size
)
key_choice, key_momentum = jax.random.split(rng_key, 2)
(
positions,
weights,
directions,
logdensities,
logdensities_grad,
) = state
choice_indx = jax.random.choice(key_choice, len(weights), p=weights)
position = jax.tree_util.tree_map(
lambda positions: positions[choice_indx], positions
)
direction = directions[choice_indx]
period = jnp.max(directions) + 1
direction = jnp.mod(direction + jnp.array(period / 2, int), period)
logdensity = logdensities[choice_indx]
logdensity_grad = jax.tree_util.tree_map(
lambda p_energy_grad: p_energy_grad[choice_indx], logdensities_grad
)
momentum = momentum_generator(key_momentum, position)
augmented_state = integrators.IntegratorState(
position,
momentum,
logdensity,
logdensity_grad,
)
proposal, info = proposal_generator(direction, augmented_state)
return proposal, info
return kernel
|
Build a Periodic Orbital kernel :cite:p:`neklyudov2022orbital`.
Parameters
----------
bijection
transformation used to build the orbit (given a step size).
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/periodic_orbital.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/periodic_orbital.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: PeriodicOrbitalState,
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: Array,
period: int,
) -> tuple[PeriodicOrbitalState, PeriodicOrbitalInfo]:
"""Generate a new orbit with the Periodic Orbital kernel.
Choose a step from the orbit with probability proportional to its weights.
Then shift the direction (or alternatively sample a new direction randomly),
in order to make the algorithm irreversible, and compute a new orbit from
the selected step and its direction.
Parameters
----------
rng_key
pseudo random number generating key.
state
initial orbit.
logdensity_fn
log probability function we wish to sample from.
step_size
space between steps of the orbit.
inverse_mass_matrix
or a 1D array containing elements of its diagonal.
period
total steps used to build the orbit.
Returns
-------
A kernel that chooses a step from the orbit and outputs a periodic orbital
state and information about the iteration.
"""
momentum_generator, kinetic_energy_fn, *_ = metrics.gaussian_euclidean(
inverse_mass_matrix
)
bijection_fn = bijection(logdensity_fn, kinetic_energy_fn)
proposal_generator = periodic_orbital_proposal(
bijection_fn, kinetic_energy_fn, period, step_size
)
key_choice, key_momentum = jax.random.split(rng_key, 2)
(
positions,
weights,
directions,
logdensities,
logdensities_grad,
) = state
choice_indx = jax.random.choice(key_choice, len(weights), p=weights)
position = jax.tree_util.tree_map(
lambda positions: positions[choice_indx], positions
)
direction = directions[choice_indx]
period = jnp.max(directions) + 1
direction = jnp.mod(direction + jnp.array(period / 2, int), period)
logdensity = logdensities[choice_indx]
logdensity_grad = jax.tree_util.tree_map(
lambda p_energy_grad: p_energy_grad[choice_indx], logdensities_grad
)
momentum = momentum_generator(key_momentum, position)
augmented_state = integrators.IntegratorState(
position,
momentum,
logdensity,
logdensity_grad,
)
proposal, info = proposal_generator(direction, augmented_state)
return proposal, info
|
Generate a new orbit with the Periodic Orbital kernel.
Choose a step from the orbit with probability proportional to its weights.
Then shift the direction (or alternatively sample a new direction randomly),
in order to make the algorithm irreversible, and compute a new orbit from
the selected step and its direction.
Parameters
----------
rng_key
pseudo random number generating key.
state
initial orbit.
logdensity_fn
log probability function we wish to sample from.
step_size
space between steps of the orbit.
inverse_mass_matrix
or a 1D array containing elements of its diagonal.
period
total steps used to build the orbit.
Returns
-------
A kernel that chooses a step from the orbit and outputs a periodic orbital
state and information about the iteration.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/periodic_orbital.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/periodic_orbital.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
step_size: float,
inverse_mass_matrix: Array, # assume momentum is always Gaussian
period: int,
*,
bijection: Callable = integrators.velocity_verlet,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the Periodic orbital MCMC kernel.
Each iteration of the periodic orbital MCMC outputs ``period`` weighted samples from
a single Hamiltonian orbit connecting the previous sample and momentum (latent) variable
with precision matrix ``inverse_mass_matrix``, evaluated using the ``bijection`` as an
integrator with discretization parameter ``step_size``.
Examples
--------
A new Periodic orbital MCMC kernel can be initialized and used with the following code:
.. code::
per_orbit = blackjax.orbital_hmc(logdensity_fn, step_size, inverse_mass_matrix, period)
state = per_orbit.init(position)
new_state, info = per_orbit.step(rng_key, state)
We can JIT-compile the step function for better performance
.. code::
step = jax.jit(per_orbit.step)
new_state, info = step(rng_key, state)
Parameters
----------
logdensity_fn
The logarithm of the probability density function we wish to draw samples from.
step_size
The value to use for the step size in for the symplectic integrator to buid the orbit.
inverse_mass_matrix
The value to use for the inverse mass matrix when drawing a value for
the momentum and computing the kinetic energy.
period
The number of steps used to build the orbit.
bijection
(algorithm parameter) The symplectic integrator to use to build the orbit.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(bijection)
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, logdensity_fn, period)
def step_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
logdensity_fn,
step_size,
inverse_mass_matrix,
period,
)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the (basic) user interface for the Periodic orbital MCMC kernel.
Each iteration of the periodic orbital MCMC outputs ``period`` weighted samples from
a single Hamiltonian orbit connecting the previous sample and momentum (latent) variable
with precision matrix ``inverse_mass_matrix``, evaluated using the ``bijection`` as an
integrator with discretization parameter ``step_size``.
Examples
--------
A new Periodic orbital MCMC kernel can be initialized and used with the following code:
.. code::
per_orbit = blackjax.orbital_hmc(logdensity_fn, step_size, inverse_mass_matrix, period)
state = per_orbit.init(position)
new_state, info = per_orbit.step(rng_key, state)
We can JIT-compile the step function for better performance
.. code::
step = jax.jit(per_orbit.step)
new_state, info = step(rng_key, state)
Parameters
----------
logdensity_fn
The logarithm of the probability density function we wish to draw samples from.
step_size
The value to use for the step size in for the symplectic integrator to buid the orbit.
inverse_mass_matrix
The value to use for the inverse mass matrix when drawing a value for
the momentum and computing the kinetic energy.
period
The number of steps used to build the orbit.
bijection
(algorithm parameter) The symplectic integrator to use to build the orbit.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/periodic_orbital.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/periodic_orbital.py
|
Apache-2.0
|
def periodic_orbital_proposal(
bijection: Callable,
kinetic_energy_fn: Callable,
period: int,
step_size: float,
) -> Callable:
"""Periodic Orbital algorithm.
The algorithm builds and orbit and computes the weights for each of its steps
by applying a bijection `period` times, both forwards and backwards depending
on the direction of the initial state.
Parameters
----------
bijection
continuous, differentialble and bijective transformation used to build
the orbit step by step.
kinetic_energy_fn
function that computes the kinetic energy.
period
total steps used to build the orbit.
step_size
size between each step of the orbit.
Returns
-------
A kernel that generates a new periodic orbital state and information
about the transition.
"""
def generate(
direction: int, init_state: integrators.IntegratorState
) -> tuple[PeriodicOrbitalState, PeriodicOrbitalInfo]:
"""Generate orbit by applying bijection forwards and backwards on period.
As described in algorithm 2 of :cite:p:`neklyudov2022orbital`, each iteration of the periodic orbital
MCMC takes a position and its direction, i.e. its step in the orbit, then
it runs the bijection backwards until it reaches the direction 0 and forwards
until it reaches the direction period-1. For each step it calculates its
weight using the target density, the auxilary variable's density and the
bijection.
"""
index_steps = jnp.arange(period) - direction
def orbit_fn(state, i):
state = jax.lax.cond(
i != 0,
lambda _: bijection(state, jnp.sign(i) * step_size),
lambda _: init_state,
operand=None,
)
kinetic_energy = kinetic_energy_fn(state.momentum)
weight = state.logdensity - kinetic_energy
return state, (state, jnp.exp(weight))
_, (states, weights) = jax.lax.scan(orbit_fn, init_state, index_steps)
directions = jnp.where(
index_steps < 0, -(index_steps + 1), index_steps + direction
)
state = PeriodicOrbitalState(
states.position,
weights / jnp.sum(weights),
directions,
states.logdensity,
states.logdensity_grad,
)
info = PeriodicOrbitalInfo(
states.momentum,
jnp.mean(weights),
jnp.var(weights),
)
return state, info
return generate
|
Periodic Orbital algorithm.
The algorithm builds and orbit and computes the weights for each of its steps
by applying a bijection `period` times, both forwards and backwards depending
on the direction of the initial state.
Parameters
----------
bijection
continuous, differentialble and bijective transformation used to build
the orbit step by step.
kinetic_energy_fn
function that computes the kinetic energy.
period
total steps used to build the orbit.
step_size
size between each step of the orbit.
Returns
-------
A kernel that generates a new periodic orbital state and information
about the transition.
|
periodic_orbital_proposal
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/periodic_orbital.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/periodic_orbital.py
|
Apache-2.0
|
def generate(
direction: int, init_state: integrators.IntegratorState
) -> tuple[PeriodicOrbitalState, PeriodicOrbitalInfo]:
"""Generate orbit by applying bijection forwards and backwards on period.
As described in algorithm 2 of :cite:p:`neklyudov2022orbital`, each iteration of the periodic orbital
MCMC takes a position and its direction, i.e. its step in the orbit, then
it runs the bijection backwards until it reaches the direction 0 and forwards
until it reaches the direction period-1. For each step it calculates its
weight using the target density, the auxilary variable's density and the
bijection.
"""
index_steps = jnp.arange(period) - direction
def orbit_fn(state, i):
state = jax.lax.cond(
i != 0,
lambda _: bijection(state, jnp.sign(i) * step_size),
lambda _: init_state,
operand=None,
)
kinetic_energy = kinetic_energy_fn(state.momentum)
weight = state.logdensity - kinetic_energy
return state, (state, jnp.exp(weight))
_, (states, weights) = jax.lax.scan(orbit_fn, init_state, index_steps)
directions = jnp.where(
index_steps < 0, -(index_steps + 1), index_steps + direction
)
state = PeriodicOrbitalState(
states.position,
weights / jnp.sum(weights),
directions,
states.logdensity,
states.logdensity_grad,
)
info = PeriodicOrbitalInfo(
states.momentum,
jnp.mean(weights),
jnp.var(weights),
)
return state, info
|
Generate orbit by applying bijection forwards and backwards on period.
As described in algorithm 2 of :cite:p:`neklyudov2022orbital`, each iteration of the periodic orbital
MCMC takes a position and its direction, i.e. its step in the orbit, then
it runs the bijection backwards until it reaches the direction 0 and forwards
until it reaches the direction period-1. For each step it calculates its
weight using the target density, the auxilary variable's density and the
bijection.
|
generate
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/periodic_orbital.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/periodic_orbital.py
|
Apache-2.0
|
def proposal_generator(energy_fn: Callable) -> tuple[Callable, Callable]:
"""
Parameters
----------
energy_fn
A function that computes the energy associated to a given state
Returns
-------
Two functions, one to generate an initial proposal when no step has been taken,
another to generate proposals after each step.
"""
def new(state: TrajectoryState) -> Proposal:
return Proposal(state, energy_fn(state), 0.0, -jnp.inf)
def update(initial_energy: float, new_state: TrajectoryState) -> Proposal:
"""Generate a new proposal from a trajectory state.
The trajectory state records information about the position in the state
space and corresponding logdensity. A proposal also carries a
weight that is equal to the difference between the current energy and
the previous one. It thus carries information about the previous states
as well as the current state.
Parameters
----------
initial_energy:
The initial energy.
new_state:
The new state.
Returns
-------
A proposal
"""
new_energy = energy_fn(new_state)
delta_energy = safe_energy_diff(initial_energy, new_energy)
# The weight of the new proposal is equal to H0 - H(z_new)
weight = delta_energy
# Acceptance statistic min(e^{H0 - H(z_new)}, 1)
sum_log_p_accept = jnp.minimum(delta_energy, 0.0)
return Proposal(
new_state,
new_energy,
weight,
sum_log_p_accept,
)
return new, update
|
Parameters
----------
energy_fn
A function that computes the energy associated to a given state
Returns
-------
Two functions, one to generate an initial proposal when no step has been taken,
another to generate proposals after each step.
|
proposal_generator
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/proposal.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/proposal.py
|
Apache-2.0
|
def update(initial_energy: float, new_state: TrajectoryState) -> Proposal:
"""Generate a new proposal from a trajectory state.
The trajectory state records information about the position in the state
space and corresponding logdensity. A proposal also carries a
weight that is equal to the difference between the current energy and
the previous one. It thus carries information about the previous states
as well as the current state.
Parameters
----------
initial_energy:
The initial energy.
new_state:
The new state.
Returns
-------
A proposal
"""
new_energy = energy_fn(new_state)
delta_energy = safe_energy_diff(initial_energy, new_energy)
# The weight of the new proposal is equal to H0 - H(z_new)
weight = delta_energy
# Acceptance statistic min(e^{H0 - H(z_new)}, 1)
sum_log_p_accept = jnp.minimum(delta_energy, 0.0)
return Proposal(
new_state,
new_energy,
weight,
sum_log_p_accept,
)
|
Generate a new proposal from a trajectory state.
The trajectory state records information about the position in the state
space and corresponding logdensity. A proposal also carries a
weight that is equal to the difference between the current energy and
the previous one. It thus carries information about the previous states
as well as the current state.
Parameters
----------
initial_energy:
The initial energy.
new_state:
The new state.
Returns
-------
A proposal
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/proposal.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/proposal.py
|
Apache-2.0
|
def progressive_biased_sampling(
rng_key: PRNGKey, proposal: Proposal, new_proposal: Proposal
) -> Proposal:
"""Baised proposal sampling :cite:p:`betancourt2017conceptual`.
Unlike uniform sampling, biased sampling favors new proposals. It thus
biases the transition away from the trajectory's initial state.
"""
p_accept = jnp.clip(jnp.exp(new_proposal.weight - proposal.weight), max=1)
do_accept = jax.random.bernoulli(rng_key, p_accept)
new_weight = jnp.logaddexp(proposal.weight, new_proposal.weight)
new_sum_log_p_accept = jnp.logaddexp(
proposal.sum_log_p_accept, new_proposal.sum_log_p_accept
)
return jax.lax.cond(
do_accept,
lambda _: Proposal(
new_proposal.state,
new_proposal.energy,
new_weight,
new_sum_log_p_accept,
),
lambda _: Proposal(
proposal.state,
proposal.energy,
new_weight,
new_sum_log_p_accept,
),
operand=None,
)
|
Baised proposal sampling :cite:p:`betancourt2017conceptual`.
Unlike uniform sampling, biased sampling favors new proposals. It thus
biases the transition away from the trajectory's initial state.
|
progressive_biased_sampling
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/proposal.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/proposal.py
|
Apache-2.0
|
def compute_asymmetric_acceptance_ratio(transition_energy_fn: Callable) -> Callable:
"""Generate a meta function to compute the transition between two states.
In particular, both states are used to compute the energies to consider in weighting
the proposal, to account for asymmetries.
Parameters
----------
transition_energy_fn
A function that computes the energy of a transition from an initial state
to a new state, given some optional keyword arguments.
Returns
-------
A functions to compute the acceptance ratio .
"""
def compute_acceptance_ratio(
initial_state: TrajectoryState,
state: TrajectoryState,
**energy_params,
) -> float:
new_energy = transition_energy_fn(initial_state, state, **energy_params)
prev_energy = transition_energy_fn(state, initial_state, **energy_params)
log_p_accept = safe_energy_diff(prev_energy, new_energy)
return log_p_accept
return compute_acceptance_ratio
|
Generate a meta function to compute the transition between two states.
In particular, both states are used to compute the energies to consider in weighting
the proposal, to account for asymmetries.
Parameters
----------
transition_energy_fn
A function that computes the energy of a transition from an initial state
to a new state, given some optional keyword arguments.
Returns
-------
A functions to compute the acceptance ratio .
|
compute_asymmetric_acceptance_ratio
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/proposal.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/proposal.py
|
Apache-2.0
|
def static_binomial_sampling(
rng_key: PRNGKey, log_p_accept: float, proposal, new_proposal
):
"""Accept or reject a proposal.
In the static setting, the probability with which the new proposal is
accepted is a function of the difference in energy between the previous and
the current states. If the current energy is lower than the previous one
then the new proposal is accepted with probability 1.
"""
p_accept = jnp.clip(jnp.exp(log_p_accept), max=1)
do_accept = jax.random.bernoulli(rng_key, p_accept)
info = do_accept, p_accept, None
return (
jax.lax.cond(
do_accept,
lambda _: new_proposal,
lambda _: proposal,
operand=None,
),
info,
)
|
Accept or reject a proposal.
In the static setting, the probability with which the new proposal is
accepted is a function of the difference in energy between the previous and
the current states. If the current energy is lower than the previous one
then the new proposal is accepted with probability 1.
|
static_binomial_sampling
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/proposal.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/proposal.py
|
Apache-2.0
|
def nonreversible_slice_sampling(
slice: Array, delta_energy: float, proposal, new_proposal
):
"""Slice sampling for non-reversible Metropolis-Hasting update.
Performs a non-reversible update of a uniform [0, 1] value
for Metropolis-Hastings accept/reject decisions :cite:p:`neal2020non`, in addition
to the accept/reject step of a current state and new proposal.
"""
p_accept = jnp.clip(jnp.exp(delta_energy), max=1)
do_accept = jnp.log(jnp.abs(slice)) <= delta_energy
slice_next = slice * (jnp.exp(-delta_energy) * do_accept + (1 - do_accept))
info = do_accept, p_accept, slice_next
return (
jax.lax.cond(
do_accept,
lambda _: new_proposal,
lambda _: proposal,
operand=None,
),
info,
)
|
Slice sampling for non-reversible Metropolis-Hasting update.
Performs a non-reversible update of a uniform [0, 1] value
for Metropolis-Hastings accept/reject decisions :cite:p:`neal2020non`, in addition
to the accept/reject step of a current state and new proposal.
|
nonreversible_slice_sampling
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/proposal.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/proposal.py
|
Apache-2.0
|
def normal(sigma: Array) -> Callable:
"""Normal Random Walk proposal.
Propose a new position such that its distance to the current position is
normally distributed. Suitable for continuous variables.
Parameter
---------
sigma:
vector or matrix that contains the standard deviation of the centered
normal distribution from which we draw the move proposals.
"""
if jnp.ndim(sigma) > 2:
raise ValueError("sigma must be a vector or a matrix.")
def propose(rng_key: PRNGKey, position: ArrayLikeTree) -> ArrayTree:
return generate_gaussian_noise(rng_key, position, sigma=sigma)
return propose
|
Normal Random Walk proposal.
Propose a new position such that its distance to the current position is
normally distributed. Suitable for continuous variables.
Parameter
---------
sigma:
vector or matrix that contains the standard deviation of the centered
normal distribution from which we draw the move proposals.
|
normal
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/random_walk.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/random_walk.py
|
Apache-2.0
|
def build_additive_step():
"""Build a Random Walk Rosenbluth-Metropolis-Hastings kernel
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def kernel(
rng_key: PRNGKey, state: RWState, logdensity_fn: Callable, random_step: Callable
) -> tuple[RWState, RWInfo]:
def proposal_generator(key_proposal, position):
move_proposal = random_step(key_proposal, position)
new_position = jax.tree_util.tree_map(jnp.add, position, move_proposal)
return new_position
inner_kernel = build_rmh()
return inner_kernel(rng_key, state, logdensity_fn, proposal_generator)
return kernel
|
Build a Random Walk Rosenbluth-Metropolis-Hastings kernel
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_additive_step
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/random_walk.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/random_walk.py
|
Apache-2.0
|
def additive_step_random_walk(
logdensity_fn: Callable, random_step: Callable
) -> SamplingAlgorithm:
"""Implements the user interface for the Additive Step RMH
Examples
--------
A new kernel can be initialized and used with the following code:
.. code::
rw = blackjax.additive_step_random_walk(logdensity_fn, random_step)
state = rw.init(position)
new_state, info = rw.step(rng_key, state)
The specific case of a Gaussian `random_step` is already implemented, either with independent components
when `covariance_matrix` is a one dimensional array or with dependent components if a two dimensional array:
.. code::
rw_gaussian = blackjax.additive_step_random_walk.normal_random_walk(logdensity_fn, covariance_matrix)
state = rw_gaussian.init(position)
new_state, info = rw_gaussian.step(rng_key, state)
Parameters
----------
logdensity_fn
The log density probability density function from which we wish to sample.
random_step
A Callable that takes a random number generator and the current state and produces a step,
which will be added to the current position to obtain a new position. Must be symmetric
to maintain detailed balance. This means that P(step|position) = P(-step | position+step)
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_additive_step()
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, logdensity_fn)
def step_fn(rng_key: PRNGKey, state):
return kernel(rng_key, state, logdensity_fn, random_step)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the user interface for the Additive Step RMH
Examples
--------
A new kernel can be initialized and used with the following code:
.. code::
rw = blackjax.additive_step_random_walk(logdensity_fn, random_step)
state = rw.init(position)
new_state, info = rw.step(rng_key, state)
The specific case of a Gaussian `random_step` is already implemented, either with independent components
when `covariance_matrix` is a one dimensional array or with dependent components if a two dimensional array:
.. code::
rw_gaussian = blackjax.additive_step_random_walk.normal_random_walk(logdensity_fn, covariance_matrix)
state = rw_gaussian.init(position)
new_state, info = rw_gaussian.step(rng_key, state)
Parameters
----------
logdensity_fn
The log density probability density function from which we wish to sample.
random_step
A Callable that takes a random number generator and the current state and produces a step,
which will be added to the current position to obtain a new position. Must be symmetric
to maintain detailed balance. This means that P(step|position) = P(-step | position+step)
Returns
-------
A ``SamplingAlgorithm``.
|
additive_step_random_walk
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/random_walk.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/random_walk.py
|
Apache-2.0
|
def build_irmh() -> Callable:
"""
Build an Independent Random Walk Rosenbluth-Metropolis-Hastings kernel. This implies
that the proposal distribution does not depend on the particle being mutated :cite:p:`wang2022exact`.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def kernel(
rng_key: PRNGKey,
state: RWState,
logdensity_fn: Callable,
proposal_distribution: Callable,
proposal_logdensity_fn: Optional[Callable] = None,
) -> tuple[RWState, RWInfo]:
"""
Parameters
----------
proposal_distribution
A function that, given a PRNGKey, is able to produce a sample in the same
domain of the target distribution.
proposal_logdensity_fn:
For non-symmetric proposals, a function that returns the log-density
to obtain a given proposal knowing the current state. If it is not
provided we assume the proposal is symmetric.
"""
def proposal_generator(rng_key: PRNGKey, position: ArrayTree):
del position
return proposal_distribution(rng_key)
inner_kernel = build_rmh()
return inner_kernel(
rng_key, state, logdensity_fn, proposal_generator, proposal_logdensity_fn
)
return kernel
|
Build an Independent Random Walk Rosenbluth-Metropolis-Hastings kernel. This implies
that the proposal distribution does not depend on the particle being mutated :cite:p:`wang2022exact`.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_irmh
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/random_walk.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/random_walk.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: RWState,
logdensity_fn: Callable,
proposal_distribution: Callable,
proposal_logdensity_fn: Optional[Callable] = None,
) -> tuple[RWState, RWInfo]:
"""
Parameters
----------
proposal_distribution
A function that, given a PRNGKey, is able to produce a sample in the same
domain of the target distribution.
proposal_logdensity_fn:
For non-symmetric proposals, a function that returns the log-density
to obtain a given proposal knowing the current state. If it is not
provided we assume the proposal is symmetric.
"""
def proposal_generator(rng_key: PRNGKey, position: ArrayTree):
del position
return proposal_distribution(rng_key)
inner_kernel = build_rmh()
return inner_kernel(
rng_key, state, logdensity_fn, proposal_generator, proposal_logdensity_fn
)
|
Parameters
----------
proposal_distribution
A function that, given a PRNGKey, is able to produce a sample in the same
domain of the target distribution.
proposal_logdensity_fn:
For non-symmetric proposals, a function that returns the log-density
to obtain a given proposal knowing the current state. If it is not
provided we assume the proposal is symmetric.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/random_walk.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/random_walk.py
|
Apache-2.0
|
def irmh_as_top_level_api(
logdensity_fn: Callable,
proposal_distribution: Callable,
proposal_logdensity_fn: Optional[Callable] = None,
) -> SamplingAlgorithm:
"""Implements the (basic) user interface for the independent RMH.
Examples
--------
A new kernel can be initialized and used with the following code:
.. code::
rmh = blackjax.irmh(logdensity_fn, proposal_distribution)
state = rmh.init(position)
new_state, info = rmh.step(rng_key, state)
We can JIT-compile the step function for better performance
.. code::
step = jax.jit(rmh.step)
new_state, info = step(rng_key, state)
Parameters
----------
logdensity_fn
The log density probability density function from which we wish to sample.
proposal_distribution
A Callable that takes a random number generator and produces a new proposal. The
proposal is independent of the sampler's current state.
proposal_logdensity_fn:
For non-symmetric proposals, a function that returns the log-density
to obtain a given proposal knowing the current state. If it is not
provided we assume the proposal is symmetric.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_irmh()
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, logdensity_fn)
def step_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
logdensity_fn,
proposal_distribution,
proposal_logdensity_fn,
)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the (basic) user interface for the independent RMH.
Examples
--------
A new kernel can be initialized and used with the following code:
.. code::
rmh = blackjax.irmh(logdensity_fn, proposal_distribution)
state = rmh.init(position)
new_state, info = rmh.step(rng_key, state)
We can JIT-compile the step function for better performance
.. code::
step = jax.jit(rmh.step)
new_state, info = step(rng_key, state)
Parameters
----------
logdensity_fn
The log density probability density function from which we wish to sample.
proposal_distribution
A Callable that takes a random number generator and produces a new proposal. The
proposal is independent of the sampler's current state.
proposal_logdensity_fn:
For non-symmetric proposals, a function that returns the log-density
to obtain a given proposal knowing the current state. If it is not
provided we assume the proposal is symmetric.
Returns
-------
A ``SamplingAlgorithm``.
|
irmh_as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/random_walk.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/random_walk.py
|
Apache-2.0
|
def build_rmh():
"""Build a Rosenbluth-Metropolis-Hastings kernel.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
"""
def kernel(
rng_key: PRNGKey,
state: RWState,
logdensity_fn: Callable,
transition_generator: Callable,
proposal_logdensity_fn: Optional[Callable] = None,
) -> tuple[RWState, RWInfo]:
"""Move the chain by one step using the Rosenbluth Metropolis Hastings
algorithm.
Parameters
----------
rng_key:
The pseudo-random number generator key used to generate random
numbers.
logdensity_fn:
A function that returns the log-probability at a given position.
transition_generator:
A function that generates a candidate transition for the markov chain.
proposal_logdensity_fn:
For non-symmetric proposals, a function that returns the log-density
to obtain a given proposal knowing the current state. If it is not
provided we assume the proposal is symmetric.
state:
The current state of the chain.
Returns
-------
The next state of the chain and additional information about the current
step.
"""
transition_energy = build_rmh_transition_energy(proposal_logdensity_fn)
compute_acceptance_ratio = proposal.compute_asymmetric_acceptance_ratio(
transition_energy
)
proposal_generator = rmh_proposal(
logdensity_fn, transition_generator, compute_acceptance_ratio
)
new_state, do_accept, p_accept = proposal_generator(rng_key, state)
return new_state, RWInfo(p_accept, do_accept, new_state)
return kernel
|
Build a Rosenbluth-Metropolis-Hastings kernel.
Returns
-------
A kernel that takes a rng_key and a Pytree that contains the current state
of the chain and that returns a new state of the chain along with
information about the transition.
|
build_rmh
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/random_walk.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/random_walk.py
|
Apache-2.0
|
def kernel(
rng_key: PRNGKey,
state: RWState,
logdensity_fn: Callable,
transition_generator: Callable,
proposal_logdensity_fn: Optional[Callable] = None,
) -> tuple[RWState, RWInfo]:
"""Move the chain by one step using the Rosenbluth Metropolis Hastings
algorithm.
Parameters
----------
rng_key:
The pseudo-random number generator key used to generate random
numbers.
logdensity_fn:
A function that returns the log-probability at a given position.
transition_generator:
A function that generates a candidate transition for the markov chain.
proposal_logdensity_fn:
For non-symmetric proposals, a function that returns the log-density
to obtain a given proposal knowing the current state. If it is not
provided we assume the proposal is symmetric.
state:
The current state of the chain.
Returns
-------
The next state of the chain and additional information about the current
step.
"""
transition_energy = build_rmh_transition_energy(proposal_logdensity_fn)
compute_acceptance_ratio = proposal.compute_asymmetric_acceptance_ratio(
transition_energy
)
proposal_generator = rmh_proposal(
logdensity_fn, transition_generator, compute_acceptance_ratio
)
new_state, do_accept, p_accept = proposal_generator(rng_key, state)
return new_state, RWInfo(p_accept, do_accept, new_state)
|
Move the chain by one step using the Rosenbluth Metropolis Hastings
algorithm.
Parameters
----------
rng_key:
The pseudo-random number generator key used to generate random
numbers.
logdensity_fn:
A function that returns the log-probability at a given position.
transition_generator:
A function that generates a candidate transition for the markov chain.
proposal_logdensity_fn:
For non-symmetric proposals, a function that returns the log-density
to obtain a given proposal knowing the current state. If it is not
provided we assume the proposal is symmetric.
state:
The current state of the chain.
Returns
-------
The next state of the chain and additional information about the current
step.
|
kernel
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/random_walk.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/random_walk.py
|
Apache-2.0
|
def rmh_as_top_level_api(
logdensity_fn: Callable,
proposal_generator: Callable[[PRNGKey, ArrayLikeTree], ArrayTree],
proposal_logdensity_fn: Optional[Callable[[ArrayLikeTree], ArrayTree]] = None,
) -> SamplingAlgorithm:
"""Implements the user interface for the RMH.
Examples
--------
A new kernel can be initialized and used with the following code:
.. code::
rmh = blackjax.rmh(logdensity_fn, proposal_generator)
state = rmh.init(position)
new_state, info = rmh.step(rng_key, state)
We can JIT-compile the step function for better performance
.. code::
step = jax.jit(rmh.step)
new_state, info = step(rng_key, state)
Parameters
----------
logdensity_fn
The log density probability density function from which we wish to sample.
proposal_generator
A Callable that takes a random number generator and the current state and produces a new proposal.
proposal_logdensity_fn
The logdensity function associated to the proposal_generator. If the generator is non-symmetric,
P(x_t|x_t-1) is not equal to P(x_t-1|x_t), then this parameter must be not None in order to apply
the Metropolis-Hastings correction for detailed balance.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_rmh()
def init_fn(position: ArrayLikeTree, rng_key=None):
del rng_key
return init(position, logdensity_fn)
def step_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
logdensity_fn,
proposal_generator,
proposal_logdensity_fn,
)
return SamplingAlgorithm(init_fn, step_fn)
|
Implements the user interface for the RMH.
Examples
--------
A new kernel can be initialized and used with the following code:
.. code::
rmh = blackjax.rmh(logdensity_fn, proposal_generator)
state = rmh.init(position)
new_state, info = rmh.step(rng_key, state)
We can JIT-compile the step function for better performance
.. code::
step = jax.jit(rmh.step)
new_state, info = step(rng_key, state)
Parameters
----------
logdensity_fn
The log density probability density function from which we wish to sample.
proposal_generator
A Callable that takes a random number generator and the current state and produces a new proposal.
proposal_logdensity_fn
The logdensity function associated to the proposal_generator. If the generator is non-symmetric,
P(x_t|x_t-1) is not equal to P(x_t-1|x_t), then this parameter must be not None in order to apply
the Metropolis-Hastings correction for detailed balance.
Returns
-------
A ``SamplingAlgorithm``.
|
rmh_as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/random_walk.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/random_walk.py
|
Apache-2.0
|
def as_top_level_api(
logdensity_fn: Callable,
step_size: float,
mass_matrix: Union[metrics.Metric, Callable],
num_integration_steps: int,
*,
divergence_threshold: int = 1000,
integrator: Callable = integrators.implicit_midpoint,
) -> SamplingAlgorithm:
"""A Riemannian Manifold Hamiltonian Monte Carlo kernel
Of note, this kernel is simply an alias of the ``hmc`` kernel with a
different choice of default integrator (``implicit_midpoint`` instead of
``velocity_verlet``) since RMHMC is typically used for Hamiltonian systems
that are not separable.
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
mass_matrix
A function which computes the mass matrix (not inverse) at a given
position when drawing a value for the momentum and computing the kinetic
energy. In practice, this argument will be passed to the
``metrics.default_metric`` function so it supports all the options
discussed there.
num_integration_steps
The number of steps we take with the symplectic integrator at each
sample step before returning a sample.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the
trajectory.
Returns
-------
A ``SamplingAlgorithm``.
"""
kernel = build_kernel(integrator, divergence_threshold)
def init_fn(position: ArrayTree, rng_key=None):
del rng_key
return init(position, logdensity_fn)
def step_fn(rng_key: PRNGKey, state):
return kernel(
rng_key,
state,
logdensity_fn,
step_size,
mass_matrix,
num_integration_steps,
)
return SamplingAlgorithm(init_fn, step_fn)
|
A Riemannian Manifold Hamiltonian Monte Carlo kernel
Of note, this kernel is simply an alias of the ``hmc`` kernel with a
different choice of default integrator (``implicit_midpoint`` instead of
``velocity_verlet``) since RMHMC is typically used for Hamiltonian systems
that are not separable.
Parameters
----------
logdensity_fn
The log-density function we wish to draw samples from.
step_size
The value to use for the step size in the symplectic integrator.
mass_matrix
A function which computes the mass matrix (not inverse) at a given
position when drawing a value for the momentum and computing the kinetic
energy. In practice, this argument will be passed to the
``metrics.default_metric`` function so it supports all the options
discussed there.
num_integration_steps
The number of steps we take with the symplectic integrator at each
sample step before returning a sample.
divergence_threshold
The absolute value of the difference in energy between two states above
which we say that the transition is divergent. The default value is
commonly found in other libraries, and yet is arbitrary.
integrator
(algorithm parameter) The symplectic integrator to use to integrate the
trajectory.
Returns
-------
A ``SamplingAlgorithm``.
|
as_top_level_api
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/rmhmc.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/rmhmc.py
|
Apache-2.0
|
def _leaf_idx_to_ckpt_idxs(n):
"""Find the checkpoint id from a step number."""
# computes the number of non-zero bits except the last bit
# e.g. 6 -> 2, 7 -> 2, 13 -> 2
idx_max = jnp.bitwise_count(n >> 1).astype(jnp.int32)
# computes the number of contiguous last non-zero bits
# e.g. 6 -> 0, 7 -> 3, 13 -> 1
num_subtrees = jnp.bitwise_count((~n & (n + 1)) - 1).astype(jnp.int32)
idx_min = idx_max - num_subtrees + 1
return idx_min, idx_max
|
Find the checkpoint id from a step number.
|
_leaf_idx_to_ckpt_idxs
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/termination.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/termination.py
|
Apache-2.0
|
def _is_iterative_turning(checkpoints, momentum_sum, momentum):
"""Checks whether there is a U-turn in the iteratively built expanded trajectory.
These checks only need to be performed as specific points.
"""
r, _ = jax.flatten_util.ravel_pytree(momentum)
r_sum, _ = jax.flatten_util.ravel_pytree(momentum_sum)
r_ckpts, r_sum_ckpts, idx_min, idx_max = checkpoints
def _body_fn(state):
i, _ = state
subtree_r_sum = r_sum - r_sum_ckpts[i] + r_ckpts[i]
return i - 1, is_turning(r_ckpts[i], r, subtree_r_sum)
_, turning = jax.lax.while_loop(
lambda it: (it[0] >= idx_min) & ~it[1], _body_fn, (idx_max, False)
)
return turning
|
Checks whether there is a U-turn in the iteratively built expanded trajectory.
These checks only need to be performed as specific points.
|
_is_iterative_turning
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/termination.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/termination.py
|
Apache-2.0
|
def append_to_trajectory(trajectory: Trajectory, state: IntegratorState) -> Trajectory:
"""Append a state to the (right of the) trajectory to form a new trajectory."""
momentum_sum = jax.tree_util.tree_map(
jnp.add, trajectory.momentum_sum, state.momentum
)
return Trajectory(
trajectory.leftmost_state, state, momentum_sum, trajectory.num_states + 1
)
|
Append a state to the (right of the) trajectory to form a new trajectory.
|
append_to_trajectory
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def reorder_trajectories(
direction: int, trajectory: Trajectory, new_trajectory: Trajectory
) -> tuple[Trajectory, Trajectory]:
"""Order the two trajectories depending on the direction."""
return jax.lax.cond(
direction > 0,
lambda _: (
trajectory,
new_trajectory,
),
lambda _: (
new_trajectory,
trajectory,
),
operand=None,
)
|
Order the two trajectories depending on the direction.
|
reorder_trajectories
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def static_integration(
integrator: Callable,
direction: int = 1,
) -> Callable:
"""Generate a trajectory by integrating several times in one direction."""
def integrate(
initial_state: IntegratorState, step_size, num_integration_steps
) -> IntegratorState:
directed_step_size = jax.tree_util.tree_map(
lambda step_size: direction * step_size, step_size
)
def one_step(_, state):
return integrator(state, directed_step_size)
return jax.lax.fori_loop(0, num_integration_steps, one_step, initial_state)
return integrate
|
Generate a trajectory by integrating several times in one direction.
|
static_integration
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def dynamic_progressive_integration(
integrator: Callable,
kinetic_energy: Callable,
update_termination_state: Callable,
is_criterion_met: Callable,
divergence_threshold: float,
):
"""Integrate a trajectory and update the proposal sequentially in one direction
until the termination criterion is met.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
update_termination_state
Updates the state of the termination mechanism.
is_criterion_met
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above
which we say a transition is divergent.
"""
_, generate_proposal = proposal_generator(hmc_energy(kinetic_energy))
sample_proposal = progressive_uniform_sampling
def integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
termination_state,
max_num_steps: int,
step_size,
initial_energy,
):
"""Integrate the trajectory starting from `initial_state` and update the
proposal sequentially (hence progressive) until the termination
criterion is met (hence dynamic).
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
termination_state
The state that keeps track of the information needed for the
termination criterion.
max_num_steps
The maximum number of integration steps. The expansion will stop
when this number is reached if the termination criterion has not
been met.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial
energy of the subtree)
"""
def do_keep_integrating(loop_state):
"""Decide whether we should continue integrating the trajectory"""
integration_state, (is_diverging, has_terminated) = loop_state
return (
(integration_state.step < max_num_steps)
& ~has_terminated
& ~is_diverging
)
def add_one_state(loop_state):
integration_state, _ = loop_state
step, proposal, trajectory, termination_state = integration_state
proposal_key = jax.random.fold_in(rng_key, step)
new_state = integrator(trajectory.rightmost_state, direction * step_size)
new_proposal = generate_proposal(initial_energy, new_state)
is_diverging = -new_proposal.weight > divergence_threshold
# At step 0, we always accept the proposal, since we
# take one step to get the leftmost state of the tree.
(new_trajectory, sampled_proposal) = jax.lax.cond(
step == 0,
lambda _: (
Trajectory(new_state, new_state, new_state.momentum, 1),
new_proposal,
),
lambda _: (
append_to_trajectory(trajectory, new_state),
sample_proposal(proposal_key, proposal, new_proposal),
),
operand=None,
)
new_termination_state = update_termination_state(
termination_state, new_trajectory.momentum_sum, new_state.momentum, step
)
has_terminated = is_criterion_met(
new_termination_state, new_trajectory.momentum_sum, new_state.momentum
)
new_integration_state = DynamicIntegrationState(
step + 1,
sampled_proposal,
new_trajectory,
new_termination_state,
)
return (new_integration_state, (is_diverging, has_terminated))
proposal_placeholder = generate_proposal(initial_energy, initial_state)
trajectory_placeholder = Trajectory(
initial_state, initial_state, initial_state.momentum, 0
)
integration_state_placeholder = DynamicIntegrationState(
0,
proposal_placeholder,
trajectory_placeholder,
termination_state,
)
new_integration_state, (is_diverging, has_terminated) = jax.lax.while_loop(
do_keep_integrating,
add_one_state,
(integration_state_placeholder, (False, False)),
)
_, proposal, trajectory, termination_state = new_integration_state
# In the while_loop we always extend on the right most direction.
new_trajectory = jax.lax.cond(
direction > 0,
lambda _: trajectory,
lambda _: Trajectory(
trajectory.rightmost_state,
trajectory.leftmost_state,
trajectory.momentum_sum,
trajectory.num_states,
),
operand=None,
)
return (
proposal,
new_trajectory,
termination_state,
is_diverging,
has_terminated,
)
return integrate
|
Integrate a trajectory and update the proposal sequentially in one direction
until the termination criterion is met.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
update_termination_state
Updates the state of the termination mechanism.
is_criterion_met
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above
which we say a transition is divergent.
|
dynamic_progressive_integration
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
termination_state,
max_num_steps: int,
step_size,
initial_energy,
):
"""Integrate the trajectory starting from `initial_state` and update the
proposal sequentially (hence progressive) until the termination
criterion is met (hence dynamic).
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
termination_state
The state that keeps track of the information needed for the
termination criterion.
max_num_steps
The maximum number of integration steps. The expansion will stop
when this number is reached if the termination criterion has not
been met.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial
energy of the subtree)
"""
def do_keep_integrating(loop_state):
"""Decide whether we should continue integrating the trajectory"""
integration_state, (is_diverging, has_terminated) = loop_state
return (
(integration_state.step < max_num_steps)
& ~has_terminated
& ~is_diverging
)
def add_one_state(loop_state):
integration_state, _ = loop_state
step, proposal, trajectory, termination_state = integration_state
proposal_key = jax.random.fold_in(rng_key, step)
new_state = integrator(trajectory.rightmost_state, direction * step_size)
new_proposal = generate_proposal(initial_energy, new_state)
is_diverging = -new_proposal.weight > divergence_threshold
# At step 0, we always accept the proposal, since we
# take one step to get the leftmost state of the tree.
(new_trajectory, sampled_proposal) = jax.lax.cond(
step == 0,
lambda _: (
Trajectory(new_state, new_state, new_state.momentum, 1),
new_proposal,
),
lambda _: (
append_to_trajectory(trajectory, new_state),
sample_proposal(proposal_key, proposal, new_proposal),
),
operand=None,
)
new_termination_state = update_termination_state(
termination_state, new_trajectory.momentum_sum, new_state.momentum, step
)
has_terminated = is_criterion_met(
new_termination_state, new_trajectory.momentum_sum, new_state.momentum
)
new_integration_state = DynamicIntegrationState(
step + 1,
sampled_proposal,
new_trajectory,
new_termination_state,
)
return (new_integration_state, (is_diverging, has_terminated))
proposal_placeholder = generate_proposal(initial_energy, initial_state)
trajectory_placeholder = Trajectory(
initial_state, initial_state, initial_state.momentum, 0
)
integration_state_placeholder = DynamicIntegrationState(
0,
proposal_placeholder,
trajectory_placeholder,
termination_state,
)
new_integration_state, (is_diverging, has_terminated) = jax.lax.while_loop(
do_keep_integrating,
add_one_state,
(integration_state_placeholder, (False, False)),
)
_, proposal, trajectory, termination_state = new_integration_state
# In the while_loop we always extend on the right most direction.
new_trajectory = jax.lax.cond(
direction > 0,
lambda _: trajectory,
lambda _: Trajectory(
trajectory.rightmost_state,
trajectory.leftmost_state,
trajectory.momentum_sum,
trajectory.num_states,
),
operand=None,
)
return (
proposal,
new_trajectory,
termination_state,
is_diverging,
has_terminated,
)
|
Integrate the trajectory starting from `initial_state` and update the
proposal sequentially (hence progressive) until the termination
criterion is met (hence dynamic).
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
termination_state
The state that keeps track of the information needed for the
termination criterion.
max_num_steps
The maximum number of integration steps. The expansion will stop
when this number is reached if the termination criterion has not
been met.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial
energy of the subtree)
|
integrate
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def do_keep_integrating(loop_state):
"""Decide whether we should continue integrating the trajectory"""
integration_state, (is_diverging, has_terminated) = loop_state
return (
(integration_state.step < max_num_steps)
& ~has_terminated
& ~is_diverging
)
|
Decide whether we should continue integrating the trajectory
|
do_keep_integrating
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def dynamic_recursive_integration(
integrator: Callable,
kinetic_energy: Callable,
uturn_check_fn: Callable,
divergence_threshold: float,
use_robust_uturn_check: bool = False,
):
"""Integrate a trajectory and update the proposal recursively in Python
until the termination criterion is met.
This is the implementation of Algorithm 6 from :cite:p:`hoffman2014no` with
multinomial sampling. The implemenation here is mostly for validating the
progressive implementation to make sure the two are equivalent. The recursive
implementation should not be used for actually sampling as it cannot be jitted and
thus likely slow.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
uturn_check_fn
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above which we
say a transition is divergent.
use_robust_uturn_check
Bool to indicate whether to perform additional U turn check between two
trajectory.
"""
_, generate_proposal = proposal_generator(hmc_energy(kinetic_energy))
sample_proposal = progressive_uniform_sampling
def buildtree_integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
tree_depth: int,
step_size,
initial_energy: float,
):
"""Integrate the trajectory starting from `initial_state` and update
the proposal recursively with tree doubling until the termination criterion is met.
The function `buildtree_integrate` calls itself for tree_depth > 0, thus invokes
the recursive scheme that builds a trajectory by doubling a binary tree.
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
tree_depth
The depth of the binary tree doubling.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial energy
of the subtree)
"""
if tree_depth == 0:
# Base case - take one velocity_verlet step in the direction v.
next_state = integrator(initial_state, direction * step_size)
new_proposal = generate_proposal(initial_energy, next_state)
is_diverging = -new_proposal.weight > divergence_threshold
trajectory = Trajectory(next_state, next_state, next_state.momentum, 1)
return (
rng_key,
new_proposal,
trajectory,
is_diverging,
False,
)
else:
(
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
initial_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
# Note that is_diverging and is_turning is inplace updated
if (not is_diverging) & (not is_turning):
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
rng_key,
new_proposal,
new_trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
start_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
trajectory = merge_trajectories(left_trajectory, right_trajectory)
if not is_turning:
is_turning = uturn_check_fn(
trajectory.leftmost_state.momentum,
trajectory.rightmost_state.momentum,
trajectory.momentum_sum,
)
if use_robust_uturn_check & (tree_depth - 1 > 0):
momentum_sum_left = jax.tree_util.tree_map(
jnp.add,
left_trajectory.momentum_sum,
right_trajectory.leftmost_state.momentum,
)
is_turning_left = uturn_check_fn(
left_trajectory.leftmost_state.momentum,
right_trajectory.leftmost_state.momentum,
momentum_sum_left,
)
momentum_sum_right = jax.tree_util.tree_map(
jnp.add,
left_trajectory.rightmost_state.momentum,
right_trajectory.momentum_sum,
)
is_turning_right = uturn_check_fn(
left_trajectory.rightmost_state.momentum,
right_trajectory.rightmost_state.momentum,
momentum_sum_right,
)
is_turning = is_turning | is_turning_left | is_turning_right
rng_key, proposal_key = jax.random.split(rng_key)
proposal = sample_proposal(proposal_key, proposal, new_proposal)
return (
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
)
return buildtree_integrate
|
Integrate a trajectory and update the proposal recursively in Python
until the termination criterion is met.
This is the implementation of Algorithm 6 from :cite:p:`hoffman2014no` with
multinomial sampling. The implemenation here is mostly for validating the
progressive implementation to make sure the two are equivalent. The recursive
implementation should not be used for actually sampling as it cannot be jitted and
thus likely slow.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
uturn_check_fn
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above which we
say a transition is divergent.
use_robust_uturn_check
Bool to indicate whether to perform additional U turn check between two
trajectory.
|
dynamic_recursive_integration
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def buildtree_integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
tree_depth: int,
step_size,
initial_energy: float,
):
"""Integrate the trajectory starting from `initial_state` and update
the proposal recursively with tree doubling until the termination criterion is met.
The function `buildtree_integrate` calls itself for tree_depth > 0, thus invokes
the recursive scheme that builds a trajectory by doubling a binary tree.
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
tree_depth
The depth of the binary tree doubling.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial energy
of the subtree)
"""
if tree_depth == 0:
# Base case - take one velocity_verlet step in the direction v.
next_state = integrator(initial_state, direction * step_size)
new_proposal = generate_proposal(initial_energy, next_state)
is_diverging = -new_proposal.weight > divergence_threshold
trajectory = Trajectory(next_state, next_state, next_state.momentum, 1)
return (
rng_key,
new_proposal,
trajectory,
is_diverging,
False,
)
else:
(
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
initial_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
# Note that is_diverging and is_turning is inplace updated
if (not is_diverging) & (not is_turning):
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
rng_key,
new_proposal,
new_trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
start_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
trajectory = merge_trajectories(left_trajectory, right_trajectory)
if not is_turning:
is_turning = uturn_check_fn(
trajectory.leftmost_state.momentum,
trajectory.rightmost_state.momentum,
trajectory.momentum_sum,
)
if use_robust_uturn_check & (tree_depth - 1 > 0):
momentum_sum_left = jax.tree_util.tree_map(
jnp.add,
left_trajectory.momentum_sum,
right_trajectory.leftmost_state.momentum,
)
is_turning_left = uturn_check_fn(
left_trajectory.leftmost_state.momentum,
right_trajectory.leftmost_state.momentum,
momentum_sum_left,
)
momentum_sum_right = jax.tree_util.tree_map(
jnp.add,
left_trajectory.rightmost_state.momentum,
right_trajectory.momentum_sum,
)
is_turning_right = uturn_check_fn(
left_trajectory.rightmost_state.momentum,
right_trajectory.rightmost_state.momentum,
momentum_sum_right,
)
is_turning = is_turning | is_turning_left | is_turning_right
rng_key, proposal_key = jax.random.split(rng_key)
proposal = sample_proposal(proposal_key, proposal, new_proposal)
return (
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
)
|
Integrate the trajectory starting from `initial_state` and update
the proposal recursively with tree doubling until the termination criterion is met.
The function `buildtree_integrate` calls itself for tree_depth > 0, thus invokes
the recursive scheme that builds a trajectory by doubling a binary tree.
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
tree_depth
The depth of the binary tree doubling.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial energy
of the subtree)
|
buildtree_integrate
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def dynamic_multiplicative_expansion(
trajectory_integrator: Callable,
uturn_check_fn: Callable,
max_num_expansions: int = 10,
rate: int = 2,
) -> Callable:
"""Sample a trajectory and update the proposal sequentially
until the termination criterion is met.
The trajectory is sampled with the following procedure:
1. Pick a direction at random;
2. Integrate `num_step` steps in this direction;
3. If the integration has stopped prematurely, do not update the proposal;
4. Else if the trajectory is performing a U-turn, return current proposal;
5. Else update proposal, `num_steps = num_steps ** rate` and repeat from (1).
Parameters
----------
trajectory_integrator
A function that runs the symplectic integrators and returns a new proposal
and the integrated trajectory.
uturn_check_fn
Function used to check the U-Turn criterion.
step_size
The step size used by the symplectic integrator.
max_num_expansions
The maximum number of trajectory expansions until the proposal is returned.
rate
The rate of the geometrical expansion. Typically 2 in NUTS, this is why
the literature often refers to "tree doubling".
"""
proposal_sampler = progressive_biased_sampling
def expand(
rng_key: PRNGKey,
initial_expansion_state: DynamicExpansionState,
initial_energy: float,
step_size: float,
):
def do_keep_expanding(loop_state) -> bool:
"""Determine whether we need to keep expanding the trajectory."""
expansion_state, (is_diverging, is_turning) = loop_state
return (
(expansion_state.step < max_num_expansions)
& ~is_diverging
& ~is_turning
)
def expand_once(loop_state):
"""Expand the current trajectory.
At each step we draw a direction at random, build a subtrajectory
starting from the leftmost or rightmost point of the current
trajectory that is twice as long as the current trajectory.
Once that is done, possibly update the current proposal with that of
the subtrajectory.
"""
expansion_state, _ = loop_state
step, proposal, trajectory, termination_state = expansion_state
subkey = jax.random.fold_in(rng_key, step)
direction_key, trajectory_key, proposal_key = jax.random.split(subkey, 3)
# create new subtrajectory that is twice as long as the current
# trajectory.
direction = jnp.where(jax.random.bernoulli(direction_key), 1, -1)
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
new_proposal,
new_trajectory,
termination_state,
is_diverging,
is_turning_subtree,
) = trajectory_integrator(
trajectory_key,
start_state,
direction,
termination_state,
rate**step,
step_size,
initial_energy,
)
# Update the proposal
#
# We do not accept proposals that come from diverging or turning
# subtrajectories. However the definition of the acceptance probability is
# such that the acceptance probability needs to be computed across the
# entire trajectory.
def update_sum_log_p_accept(inputs):
_, proposal, new_proposal = inputs
return Proposal(
proposal.state,
proposal.energy,
proposal.weight,
jnp.logaddexp(
proposal.sum_log_p_accept, new_proposal.sum_log_p_accept
),
)
updated_proposal = jax.lax.cond(
is_diverging | is_turning_subtree,
update_sum_log_p_accept,
lambda x: proposal_sampler(*x),
operand=(proposal_key, proposal, new_proposal),
)
# Is the full trajectory making a U-Turn?
#
# We first merge the subtrajectory that was just generated with the
# trajectory and check the U-Turn criterior on the whole trajectory.
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
merged_trajectory = merge_trajectories(left_trajectory, right_trajectory)
is_turning = uturn_check_fn(
merged_trajectory.leftmost_state.momentum,
merged_trajectory.rightmost_state.momentum,
merged_trajectory.momentum_sum,
)
new_state = DynamicExpansionState(
step + 1, updated_proposal, merged_trajectory, termination_state
)
info = (is_diverging, is_turning_subtree | is_turning)
return (new_state, info)
expansion_state, (is_diverging, is_turning) = jax.lax.while_loop(
do_keep_expanding,
expand_once,
(initial_expansion_state, (False, False)),
)
return expansion_state, (is_diverging, is_turning)
return expand
|
Sample a trajectory and update the proposal sequentially
until the termination criterion is met.
The trajectory is sampled with the following procedure:
1. Pick a direction at random;
2. Integrate `num_step` steps in this direction;
3. If the integration has stopped prematurely, do not update the proposal;
4. Else if the trajectory is performing a U-turn, return current proposal;
5. Else update proposal, `num_steps = num_steps ** rate` and repeat from (1).
Parameters
----------
trajectory_integrator
A function that runs the symplectic integrators and returns a new proposal
and the integrated trajectory.
uturn_check_fn
Function used to check the U-Turn criterion.
step_size
The step size used by the symplectic integrator.
max_num_expansions
The maximum number of trajectory expansions until the proposal is returned.
rate
The rate of the geometrical expansion. Typically 2 in NUTS, this is why
the literature often refers to "tree doubling".
|
dynamic_multiplicative_expansion
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def do_keep_expanding(loop_state) -> bool:
"""Determine whether we need to keep expanding the trajectory."""
expansion_state, (is_diverging, is_turning) = loop_state
return (
(expansion_state.step < max_num_expansions)
& ~is_diverging
& ~is_turning
)
|
Determine whether we need to keep expanding the trajectory.
|
do_keep_expanding
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def expand_once(loop_state):
"""Expand the current trajectory.
At each step we draw a direction at random, build a subtrajectory
starting from the leftmost or rightmost point of the current
trajectory that is twice as long as the current trajectory.
Once that is done, possibly update the current proposal with that of
the subtrajectory.
"""
expansion_state, _ = loop_state
step, proposal, trajectory, termination_state = expansion_state
subkey = jax.random.fold_in(rng_key, step)
direction_key, trajectory_key, proposal_key = jax.random.split(subkey, 3)
# create new subtrajectory that is twice as long as the current
# trajectory.
direction = jnp.where(jax.random.bernoulli(direction_key), 1, -1)
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
new_proposal,
new_trajectory,
termination_state,
is_diverging,
is_turning_subtree,
) = trajectory_integrator(
trajectory_key,
start_state,
direction,
termination_state,
rate**step,
step_size,
initial_energy,
)
# Update the proposal
#
# We do not accept proposals that come from diverging or turning
# subtrajectories. However the definition of the acceptance probability is
# such that the acceptance probability needs to be computed across the
# entire trajectory.
def update_sum_log_p_accept(inputs):
_, proposal, new_proposal = inputs
return Proposal(
proposal.state,
proposal.energy,
proposal.weight,
jnp.logaddexp(
proposal.sum_log_p_accept, new_proposal.sum_log_p_accept
),
)
updated_proposal = jax.lax.cond(
is_diverging | is_turning_subtree,
update_sum_log_p_accept,
lambda x: proposal_sampler(*x),
operand=(proposal_key, proposal, new_proposal),
)
# Is the full trajectory making a U-Turn?
#
# We first merge the subtrajectory that was just generated with the
# trajectory and check the U-Turn criterior on the whole trajectory.
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
merged_trajectory = merge_trajectories(left_trajectory, right_trajectory)
is_turning = uturn_check_fn(
merged_trajectory.leftmost_state.momentum,
merged_trajectory.rightmost_state.momentum,
merged_trajectory.momentum_sum,
)
new_state = DynamicExpansionState(
step + 1, updated_proposal, merged_trajectory, termination_state
)
info = (is_diverging, is_turning_subtree | is_turning)
return (new_state, info)
|
Expand the current trajectory.
At each step we draw a direction at random, build a subtrajectory
starting from the leftmost or rightmost point of the current
trajectory that is twice as long as the current trajectory.
Once that is done, possibly update the current proposal with that of
the subtrajectory.
|
expand_once
|
python
|
blackjax-devs/blackjax
|
blackjax/mcmc/trajectory.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/mcmc/trajectory.py
|
Apache-2.0
|
def dual_averaging(
t0: int = 10, gamma: float = 0.05, kappa: float = 0.75
) -> tuple[Callable, Callable, Callable]:
"""Find the state that minimizes an objective function using a primal-dual
subgradient method.
See :cite:p:`nesterov2009primal` for a detailed explanation of the algorithm and its mathematical
properties.
Parameters
----------
t0: float >= 0
Free parameter that stabilizes the initial iterations of the algorithm.
Large values may slow down convergence. Introduced in :cite:p:`hoffman2014no` with a default
value of 10.
gamma
Controls the speed of convergence of the scheme. The authors of :cite:p:`hoffman2014no` recommend
a value of 0.05.
kappa: float in ]0.5, 1]
Controls the weights of past steps in the current update. The scheme will
quickly forget earlier step for a small value of `kappa`. Introduced
in :cite:p:`hoffman2014no`, with a recommended value of .75
Returns
-------
init
A function that initializes the state of the dual averaging scheme.
update
a function that updates the state of the dual averaging scheme.
final
a function that returns the state that minimizes the objective function.
"""
def init(x_init: float) -> DualAveragingState:
"""Initialize the state of the dual averaging scheme.
The parameter :math:`\\mu` is set to :math:`\\log(10 \\x_init)`
where :math:`\\x_init` is the initial value of the state.
"""
mu: float = jnp.log(10 * x_init)
step = 1
avg_error: float = 0.0
log_x: float = jnp.log(x_init)
log_x_avg: float = 0.0
return DualAveragingState(log_x, log_x_avg, step, avg_error, mu)
def update(da_state: DualAveragingState, gradient) -> DualAveragingState:
"""Update the state of the Dual Averaging adaptive algorithm.
Parameters
----------
gradient:
The gradient of the function to optimize with respect to the state
`x`, computed at the current value of `x`.
da_state:
The current state of the dual averaging algorithm.
Returns
-------
The updated state of the dual averaging algorithm.
"""
log_step, avg_log_step, step, avg_error, mu = da_state
reg_step = step + t0
eta_t = step ** (-kappa)
avg_error = (1 - (1 / (reg_step))) * avg_error + gradient / reg_step
log_x = mu - (jnp.sqrt(step) / gamma) * avg_error
log_x_avg = eta_t * log_step + (1 - eta_t) * avg_log_step
return DualAveragingState(log_x, log_x_avg, step + 1, avg_error, mu)
def final(da_state: DualAveragingState) -> float:
"""Returns the state that minimizes the objective function."""
return jnp.exp(da_state.log_x_avg)
return init, update, final
|
Find the state that minimizes an objective function using a primal-dual
subgradient method.
See :cite:p:`nesterov2009primal` for a detailed explanation of the algorithm and its mathematical
properties.
Parameters
----------
t0: float >= 0
Free parameter that stabilizes the initial iterations of the algorithm.
Large values may slow down convergence. Introduced in :cite:p:`hoffman2014no` with a default
value of 10.
gamma
Controls the speed of convergence of the scheme. The authors of :cite:p:`hoffman2014no` recommend
a value of 0.05.
kappa: float in ]0.5, 1]
Controls the weights of past steps in the current update. The scheme will
quickly forget earlier step for a small value of `kappa`. Introduced
in :cite:p:`hoffman2014no`, with a recommended value of .75
Returns
-------
init
A function that initializes the state of the dual averaging scheme.
update
a function that updates the state of the dual averaging scheme.
final
a function that returns the state that minimizes the objective function.
|
dual_averaging
|
python
|
blackjax-devs/blackjax
|
blackjax/optimizers/dual_averaging.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/optimizers/dual_averaging.py
|
Apache-2.0
|
def init(x_init: float) -> DualAveragingState:
"""Initialize the state of the dual averaging scheme.
The parameter :math:`\\mu` is set to :math:`\\log(10 \\x_init)`
where :math:`\\x_init` is the initial value of the state.
"""
mu: float = jnp.log(10 * x_init)
step = 1
avg_error: float = 0.0
log_x: float = jnp.log(x_init)
log_x_avg: float = 0.0
return DualAveragingState(log_x, log_x_avg, step, avg_error, mu)
|
Initialize the state of the dual averaging scheme.
The parameter :math:`\mu` is set to :math:`\log(10 \x_init)`
where :math:`\x_init` is the initial value of the state.
|
init
|
python
|
blackjax-devs/blackjax
|
blackjax/optimizers/dual_averaging.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/optimizers/dual_averaging.py
|
Apache-2.0
|
def update(da_state: DualAveragingState, gradient) -> DualAveragingState:
"""Update the state of the Dual Averaging adaptive algorithm.
Parameters
----------
gradient:
The gradient of the function to optimize with respect to the state
`x`, computed at the current value of `x`.
da_state:
The current state of the dual averaging algorithm.
Returns
-------
The updated state of the dual averaging algorithm.
"""
log_step, avg_log_step, step, avg_error, mu = da_state
reg_step = step + t0
eta_t = step ** (-kappa)
avg_error = (1 - (1 / (reg_step))) * avg_error + gradient / reg_step
log_x = mu - (jnp.sqrt(step) / gamma) * avg_error
log_x_avg = eta_t * log_step + (1 - eta_t) * avg_log_step
return DualAveragingState(log_x, log_x_avg, step + 1, avg_error, mu)
|
Update the state of the Dual Averaging adaptive algorithm.
Parameters
----------
gradient:
The gradient of the function to optimize with respect to the state
`x`, computed at the current value of `x`.
da_state:
The current state of the dual averaging algorithm.
Returns
-------
The updated state of the dual averaging algorithm.
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/optimizers/dual_averaging.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/optimizers/dual_averaging.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.