code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def get_dataset_type_fast(file_path: str, max_chars: int = 100) -> Union[str, None]:
'''Get the type values from the first and last n lines of a large json dataset.
'''
file_content_preview = []
dataset_type = None
dataset_type_pattern = re.compile(r'[\"\']type[\"\']:\s*[\'\"]([^"]+)[\'\"]')
file_content_preview.extend(preview_file(file_path, max_chars))
for content in file_content_preview:
try:
dataset_type = dataset_type_pattern.search(content).group(1)
break
except AttributeError:
continue
return dataset_type
|
Get the type values from the first and last n lines of a large json dataset.
|
get_dataset_type_fast
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/data_utils.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/data_utils.py
|
Apache-2.0
|
def check_dataset_instances_key_fast(file_path: str, instances_key: str, max_lines: int = 100) -> bool:
'''Check if the dataset instances key matches the instance_key.
'''
file_content_preview = []
instance_key_pattern = re.compile(r'[\"\']' + instances_key + r'[\"\']')
file_content_preview.extend(preview_file(file_path, max_lines))
for content in file_content_preview:
if instance_key_pattern.search(content):
return True
return False
|
Check if the dataset instances key matches the instance_key.
|
check_dataset_instances_key_fast
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/data_utils.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/data_utils.py
|
Apache-2.0
|
def answer_extraction(response, answer_type=None): #use this funtion to extract answers from generated text
"""
Use this funtion to extract answers from generated text
Parameters
------------
args :
Arguments.
response : str
plain string response.
Returns
------------
answer:
Decoded answer (such as A, B, C, D, E for mutiple-choice QA).
"""
# temp = response["generated_text"]
temp = response
if answer_type in ("gsm8k", "svamp", "asdiv", "addsub", "singleeq", "multiarith", "math"):
temp = temp.replace(",", "")
temp = [s for s in re.findall(r'-?\d+\.?\d*', temp)]
elif answer_type in ("aqua", "csqa", "multiple_choice"):
temp = re.findall(r'A|B|C|D|E', temp)
elif answer_type in ("strategyqa", "coin_flip"):
temp = temp.lower()
temp = re.sub("\"|\'|\n|\.|\s|\:|\,"," ", temp)
temp = temp.split(" ")
temp = [i for i in temp if i in ("yes", "no")]
elif answer_type in ("last_letters"):
temp = re.sub("\"|\'|\n|\.|\s","", temp)
temp = [temp]
elif answer_type in ("pubmedqa", "binary_choice"):
# pattern = "Output: (yes|no|maybe)"
# sttr = re.search(pattern, temp)
# answer = sttr.group(0)[8:] if sttr is not None else "N/A"
pattern = "(answer|Answer|ANSWER|output|Output|OUTPUT|A): \(*(yes|Yes|YES|no|No|NO|maybe|Maybe|MAYBE)"
sttr = re.search(pattern, temp)
if sttr is not None:
mid_answer = sttr.group(0)
mid_answer = mid_answer.split(":")[-1].strip()
answer = mid_answer.lower()
else:
pattern = "(yes|Yes|YES|no|No|NO|maybe|Maybe|MAYBE)(\.|\s)"
sttr = re.search(pattern, temp)
if sttr is not None:
answer = sttr.group(0)[:-1].lower()
else:
answer = "N/A"
return answer
elif answer_type == "medmcqa":
# pattern = "Output: (A|B|C|D)."
# sttr = re.search(pattern, temp)
# answer = sttr.group(0)[8:-1].lower() if sttr is not None else "N/A"
pattern = "(answer|Answer|ANSWER|output|Output|OUTPUT|A): \(*(A|B|C|D|a|b|c|d)"
sttr = re.search(pattern, temp)
if sttr is not None:
mid_answer = sttr.group(0)
answer = mid_answer[-1].lower()
else:
pattern = "\(*(A|B|C|D|a|b|c|d)\)*(\.|\s)"
sttr = re.search(pattern, temp)
if sttr is not None:
if '(' in sttr.group(0):
answer = sttr.group(0)[1].lower()
else:
answer = sttr.group(0)[0].lower()
else:
answer = "N/A"
return answer
elif answer_type == "usmle":
# pattern = "Output: (A|B|C|D)."
# sttr = re.search(pattern, temp)
# answer = sttr.group(0)[8:-1].lower() if sttr is not None else "N/A"
pattern = "(Answer|Output|A): \(*(A|B|C|D|a|b|c|d)"
sttr = re.search(pattern, temp)
if sttr is not None:
mid_answer = sttr.group(0)
answer = mid_answer[-1].lower()
else:
pattern = "\(*(A|B|C|D|a|b|c|d)\)*(\.|\s)"
sttr = re.search(pattern, temp)
if sttr is not None:
if '(' in sttr.group(0):
answer = sttr.group(0)[1].lower()
else:
answer = sttr.group(0)[0].lower()
else:
answer = "N/A"
return answer
elif answer_type == "text":
return response
else:
raise NotImplementedError(f"Unsupported answer type: {answer_type}")
if len(temp) != 0:
answer = temp[-1]
# if there is . at the end of answer, remove it
# e.g. answer = 64.
if answer != "":
if answer[-1] == ".":
answer = answer[:-1]
# round the answer to nearest integer
if answer_type in ("gsm8k", "svamp"):
try:
answer = str(round(float(answer)))
except:
answer = "" # no sol or sol doesn't have valid format
elif answer_type in ("last_letters"):
try:
answer = answer[-args.concat_length:]
except:
answer = ""
else:
answer = ""
return answer
|
Use this funtion to extract answers from generated text
Parameters
------------
args :
Arguments.
response : str
plain string response.
Returns
------------
answer:
Decoded answer (such as A, B, C, D, E for mutiple-choice QA).
|
answer_extraction
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/data_utils.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/data_utils.py
|
Apache-2.0
|
def format(self, **kwargs) -> list:
"""Format the string components with the provided keyword arguments.
Mostly used for formatting system prompt, user and assistant messages.
Parameters
----------
**kwargs : dict
Keyword arguments containing values to replace in the template components.
Returns
-------
list
Formatted template.
"""
formatted_template = []
for component in self.template:
if component.type == 'string':
for key, value in kwargs.items():
templated = component.content.replace("{{" + key + "}}", value)
if len(templated) == 0:
logger.warning("Found empty string after formatting, adding a space instead. "
"If this is not intended, please check the dataset.")
templated = " "
formatted_template.append(TemplateComponent(type='string', content=templated))
else:
formatted_template.append(component)
logger.debug(formatted_template)
return formatted_template
|
Format the string components with the provided keyword arguments.
Mostly used for formatting system prompt, user and assistant messages.
Parameters
----------
**kwargs : dict
Keyword arguments containing values to replace in the template components.
Returns
-------
list
Formatted template.
|
format
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/conversation_template/base.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/conversation_template/base.py
|
Apache-2.0
|
def encode_conversation(
self,
tokenizer: PreTrainedTokenizer,
messages: List[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[List[str]] = None,
**kwargs
) -> Sequence[Tuple[List[int], List[int]]]:
r'''
Messages here should be guaranteed to be in pairs, with the first message being the user message and the second message being the system message.
Data example:
```json
{
"conversation_id": 2,
"system": "sysinfo1",
"tools": ["tool_1_desc"],
"messages": [
{
"role": "user",
"content": "hi"
},
{
"role": "assistant",
"content": "Hello!"
}
]
}
```
'''
assert isinstance(messages, list), "Messages must be a list."
if tools:
logger.warning("Tools are not supported yet. Please include tools in the system message manually.")
if system:
if system.replace(" ",""):
if not self.system_formatter:
raise ValueError("Your dataset contains system message but no system formatter is provided. "
"Consider either providing a system formatter or removing system prompt from your dataset.")
else:
system = self.system_default if self.system_default else None
encoded_pairs = self._encode(tokenizer, messages, system, tools, **kwargs)
encoded_pairs = self.post_process_pairs(encoded_pairs=encoded_pairs, tokenizer=tokenizer)
return encoded_pairs
|
Messages here should be guaranteed to be in pairs, with the first message being the user message and the second message being the system message.
Data example:
```json
{
"conversation_id": 2,
"system": "sysinfo1",
"tools": ["tool_1_desc"],
"messages": [
{
"role": "user",
"content": "hi"
},
{
"role": "assistant",
"content": "Hello!"
}
]
}
```
|
encode_conversation
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/conversation_template/base.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/conversation_template/base.py
|
Apache-2.0
|
def _encode_template(
self,
template: List[TemplateComponent],
tokenizer: PreTrainedTokenizer,
**kwargs
) -> List[int]:
"""Encode template components into token ids.
Parameters
----------
template : List[TemplateComponent]
Formatted template components.
tokenizer : PreTrainedTokenizer
Tokenizer to convert tokens into token ids.
Returns
-------
List[int]
Encoded token ids.
"""
encoded_ids = []
for component in template:
if component.type == 'string':
if len(component.content) == 0:
logger.warning("Empty string component found in the template.")
continue
else:
encoded_ids += tokenizer.encode(component.content, add_special_tokens=False)
elif component.type == 'token':
if component.content == 'bos_token':
encoded_ids += [tokenizer.bos_token_id]
elif component.content == 'eos_token':
encoded_ids += [tokenizer.eos_token_id]
else:
encoded_ids += self._ensure_id_list(tokenizer.convert_tokens_to_ids(component.content))
elif component.type == 'token_id':
encoded_ids += self._ensure_id_list(component.content)
else:
raise NotImplementedError(f"Component type {component.type} is not supported yet.")
return encoded_ids
|
Encode template components into token ids.
Parameters
----------
template : List[TemplateComponent]
Formatted template components.
tokenizer : PreTrainedTokenizer
Tokenizer to convert tokens into token ids.
Returns
-------
List[int]
Encoded token ids.
|
_encode_template
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/conversation_template/base.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/conversation_template/base.py
|
Apache-2.0
|
def _ensure_id_list(self, obj: Union[int, List[int]]) -> List[int]:
'''Make sure the object is a list of integers. Useful for handling token ids.
'''
if isinstance(obj, int):
return [obj]
elif isinstance(obj, list):
return obj
else:
raise ValueError(f"Object type {type(obj)} is not supported yet.")
|
Make sure the object is a list of integers. Useful for handling token ids.
|
_ensure_id_list
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/conversation_template/base.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/conversation_template/base.py
|
Apache-2.0
|
def encode_conversation(
self,
tokenizer: PreTrainedTokenizer,
messages: List[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[List[str]] = None,
**kwargs
) -> Sequence[Tuple[List[int], List[int]]]:
r'''
Messages here should be guaranteed to be in pairs, with the first message being the user message and the second message being the system message.
Data example:
```json
{
"conversation_id": 2,
"system": "sysinfo1",
"tools": ["tool_1_desc"],
"messages": [
{
"role": "user",
"content": "hi"
},
{
"role": "assistant",
"content": "Hello!"
}
]
}
```
'''
assert isinstance(messages, list), "Messages must be a list."
tools = self._handle_tools(tools)
if system is None:
system = ""
else:
if system.replace(" ",""):
if not self.system_formatter:
raise ValueError("Your dataset contains system message but no system formatter is provided. "
"Consider either providing a system formatter or removing system prompt from your dataset.")
else:
system = self.system_default if self.system_default else ""
encoded_pairs = self._encode(tokenizer, messages, system, tools, **kwargs)
encoded_pairs = self.post_process_pairs(encoded_pairs=encoded_pairs, tokenizer=tokenizer)
return encoded_pairs
|
Messages here should be guaranteed to be in pairs, with the first message being the user message and the second message being the system message.
Data example:
```json
{
"conversation_id": 2,
"system": "sysinfo1",
"tools": ["tool_1_desc"],
"messages": [
{
"role": "user",
"content": "hi"
},
{
"role": "assistant",
"content": "Hello!"
}
]
}
```
|
encode_conversation
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/conversation_template/base.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/conversation_template/base.py
|
Apache-2.0
|
def _encode_template(
self,
template: List[TemplateComponent],
tokenizer: PreTrainedTokenizer,
**kwargs
) -> List[int]:
"""Encode template components into token ids.
Parameters
----------
template : List[TemplateComponent]
Formatted template components.
tokenizer : PreTrainedTokenizer
Tokenizer to convert tokens into token ids.
Returns
-------
List[int]
Encoded token ids.
"""
encoded_ids = []
for component in template:
if component.type == 'string':
if len(component.content) == 0:
logger.warning("Empty string component found in the template.")
continue
else:
encoded_ids += tokenizer.encode(component.content, add_special_tokens=False)
elif component.type == 'token':
if component.content == 'bos_token':
encoded_ids += [tokenizer.bos_token_id]
elif component.content == 'eos_token':
encoded_ids += [tokenizer.eos_token_id]
else:
encoded_ids += self._ensure_id_list(tokenizer.convert_tokens_to_ids(component.content))
elif component.type == 'token_id':
encoded_ids += self._ensure_id_list(component.content)
else:
raise NotImplementedError(f"Component type {component.type} is not supported yet.")
return encoded_ids
|
Encode template components into token ids.
Parameters
----------
template : List[TemplateComponent]
Formatted template components.
tokenizer : PreTrainedTokenizer
Tokenizer to convert tokens into token ids.
Returns
-------
List[int]
Encoded token ids.
|
_encode_template
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/conversation_template/base.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/conversation_template/base.py
|
Apache-2.0
|
def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None):
"""
qkv: (batch, seqlen, 3, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen).
ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)
"""
# Make sure that the last dimension is contiguous
if qkv.stride(-1) != 1:
qkv = qkv.contiguous()
o, lse, ctx.softmax_scale = _flash_attn_forward(
qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], bias=bias, causal=causal,
softmax_scale=softmax_scale
)
ctx.save_for_backward(qkv, o, lse, bias)
ctx.causal = causal
return o
|
qkv: (batch, seqlen, 3, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen).
ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)
|
forward
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/flash_attention/triton_flash_attention.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/flash_attention/triton_flash_attention.py
|
Apache-2.0
|
def forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None):
"""
q: (batch, seqlen_q, nheads, headdim)
kv: (batch, seqlen_k, 2, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
"""
# Make sure that the last dimension is contiguous
q, kv = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, kv]]
o, lse, ctx.softmax_scale = _flash_attn_forward(
q, kv[:, :, 0], kv[:, :, 1], bias=bias, causal=causal, softmax_scale=softmax_scale
)
ctx.save_for_backward(q, kv, o, lse, bias)
ctx.causal = causal
return o
|
q: (batch, seqlen_q, nheads, headdim)
kv: (batch, seqlen_k, 2, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
|
forward
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/flash_attention/triton_flash_attention.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/flash_attention/triton_flash_attention.py
|
Apache-2.0
|
def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
"""
q: (batch_size, seqlen_q, nheads, headdim)
k, v: (batch_size, seqlen_k, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
"""
# Make sure that the last dimension is contiguous
q, k, v = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]]
o, lse, ctx.softmax_scale = _flash_attn_forward(
q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale
)
ctx.save_for_backward(q, k, v, o, lse, bias)
ctx.causal = causal
return o
|
q: (batch_size, seqlen_q, nheads, headdim)
k, v: (batch_size, seqlen_k, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
|
forward
|
python
|
OptimalScale/LMFlow
|
src/lmflow/utils/flash_attention/triton_flash_attention.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/flash_attention/triton_flash_attention.py
|
Apache-2.0
|
def find_abbreviation(
long_form_candidate: Span, short_form_candidate: Span
) -> Tuple[Span, Optional[Span]]:
"""
Implements the abbreviation detection algorithm in "A simple algorithm
for identifying abbreviation definitions in biomedical text.", (Schwartz & Hearst, 2003).
The algorithm works by enumerating the characters in the short form of the abbreviation,
checking that they can be matched against characters in a candidate text for the long form
in order, as well as requiring that the first letter of the abbreviated form matches the
_beginning_ letter of a word.
Parameters
----------
long_form_candidate: Span, required.
The spaCy span for the long form candidate of the definition.
short_form_candidate: Span, required.
The spaCy span for the abbreviation candidate.
Returns
-------
A Tuple[Span, Optional[Span]], representing the short form abbreviation and the
span corresponding to the long form expansion, or None if a match is not found.
"""
long_form = " ".join([x.text for x in long_form_candidate])
short_form = " ".join([x.text for x in short_form_candidate])
long_index = len(long_form) - 1
short_index = len(short_form) - 1
while short_index >= 0:
current_char = short_form[short_index].lower()
# We don't check non alpha-numeric characters.
if not current_char.isalnum():
short_index -= 1
continue
# Does the character match at this position? ...
while (
(long_index >= 0 and long_form[long_index].lower() != current_char)
or
# .... or if we are checking the first character of the abbreviation, we enforce
# to be the _starting_ character of a span.
(
short_index == 0
and long_index > 0
and long_form[long_index - 1].isalnum()
)
):
long_index -= 1
if long_index < 0:
return short_form_candidate, None
long_index -= 1
short_index -= 1
# The last subtraction will either take us on to a whitespace character, or
# off the front of the string (i.e. long_index == -1). Either way, we want to add
# one to get back to the start character of the long form
long_index += 1
# Now we know the character index of the start of the character span,
# here we just translate that to the first token beginning after that
# value, so we can return a spaCy span instead.
word_lengths = 0
starting_index = None
for i, word in enumerate(long_form_candidate):
# need to add 1 for the space characters
word_lengths += len(word.text_with_ws)
if word_lengths > long_index:
starting_index = i
break
return short_form_candidate, long_form_candidate[starting_index:]
|
Implements the abbreviation detection algorithm in "A simple algorithm
for identifying abbreviation definitions in biomedical text.", (Schwartz & Hearst, 2003).
The algorithm works by enumerating the characters in the short form of the abbreviation,
checking that they can be matched against characters in a candidate text for the long form
in order, as well as requiring that the first letter of the abbreviated form matches the
_beginning_ letter of a word.
Parameters
----------
long_form_candidate: Span, required.
The spaCy span for the long form candidate of the definition.
short_form_candidate: Span, required.
The spaCy span for the abbreviation candidate.
Returns
-------
A Tuple[Span, Optional[Span]], representing the short form abbreviation and the
span corresponding to the long form expansion, or None if a match is not found.
|
find_abbreviation
|
python
|
allenai/scispacy
|
scispacy/abbreviation.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/abbreviation.py
|
Apache-2.0
|
def find(self, span: Span, doc: Doc) -> Tuple[Span, Set[Span]]:
"""
Functional version of calling the matcher for a single span.
This method is helpful if you already have an abbreviation which
you want to find a definition for.
"""
dummy_matches = [(-1, int(span.start), int(span.end))]
filtered = filter_matches(dummy_matches, doc)
abbreviations = self.find_matches_for(filtered, doc)
if not abbreviations:
return span, set()
else:
return abbreviations[0]
|
Functional version of calling the matcher for a single span.
This method is helpful if you already have an abbreviation which
you want to find a definition for.
|
find
|
python
|
allenai/scispacy
|
scispacy/abbreviation.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/abbreviation.py
|
Apache-2.0
|
def make_short_form_serializable(self, abbreviation: Span):
"""
Converts the abbreviations into a short form that is serializable to enable multiprocessing
Parameters
----------
abbreviation: Span
The abbreviation span identified by the detector
"""
long_form = abbreviation._.long_form
abbreviation._.long_form = long_form.text
serializable_abbr = {
"short_text": abbreviation.text,
"short_start": abbreviation.start,
"short_end": abbreviation.end,
"long_text": long_form.text,
"long_start": long_form.start,
"long_end": long_form.end,
}
return serializable_abbr
|
Converts the abbreviations into a short form that is serializable to enable multiprocessing
Parameters
----------
abbreviation: Span
The abbreviation span identified by the detector
|
make_short_form_serializable
|
python
|
allenai/scispacy
|
scispacy/abbreviation.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/abbreviation.py
|
Apache-2.0
|
def load_approximate_nearest_neighbours_index(
linker_paths: LinkerPaths,
ef_search: int = 200,
) -> FloatIndex:
"""
Load an approximate nearest neighbours index from disk.
Parameters
----------
linker_paths: LinkerPaths, required.
Contains the paths to the data required for the entity linker.
ef_search: int, optional (default = 200)
Controls speed performance at query time. Max value is 2000,
but reducing to around ~100 will increase query speed by an order
of magnitude for a small performance hit.
"""
concept_alias_tfidfs = scipy.sparse.load_npz(
cached_path(linker_paths.tfidf_vectors)
).astype(numpy.float32)
ann_index = nmslib.init(
method="hnsw",
space="cosinesimil_sparse",
data_type=nmslib.DataType.SPARSE_VECTOR,
)
ann_index.addDataPointBatch(concept_alias_tfidfs)
ann_index.loadIndex(cached_path(linker_paths.ann_index))
query_time_params = {"efSearch": ef_search}
ann_index.setQueryTimeParams(query_time_params)
return ann_index
|
Load an approximate nearest neighbours index from disk.
Parameters
----------
linker_paths: LinkerPaths, required.
Contains the paths to the data required for the entity linker.
ef_search: int, optional (default = 200)
Controls speed performance at query time. Max value is 2000,
but reducing to around ~100 will increase query speed by an order
of magnitude for a small performance hit.
|
load_approximate_nearest_neighbours_index
|
python
|
allenai/scispacy
|
scispacy/candidate_generation.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/candidate_generation.py
|
Apache-2.0
|
def nmslib_knn_with_zero_vectors(
self, vectors: numpy.ndarray, k: int
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
ann_index.knnQueryBatch crashes if any of the vectors is all zeros.
This function is a wrapper around `ann_index.knnQueryBatch` that solves this problem. It works as follows:
- remove empty vectors from `vectors`.
- call `ann_index.knnQueryBatch` with the non-empty vectors only. This returns `neighbors`,
a list of list of neighbors. `len(neighbors)` equals the length of the non-empty vectors.
- extend the list `neighbors` with `None`s in place of empty vectors.
- return the extended list of neighbors and distances.
"""
empty_vectors_boolean_flags = numpy.array(vectors.sum(axis=1) != 0).reshape(-1)
empty_vectors_count = vectors.shape[0] - sum(empty_vectors_boolean_flags)
if self.verbose:
print(f"Number of empty vectors: {empty_vectors_count}")
# init extended_neighbors with a list of Nones
extended_neighbors = numpy.empty(
(len(empty_vectors_boolean_flags),), dtype=object
)
extended_distances = numpy.empty(
(len(empty_vectors_boolean_flags),), dtype=object
)
if vectors.shape[0] - empty_vectors_count == 0:
return extended_neighbors, extended_distances
# remove empty vectors before calling `ann_index.knnQueryBatch`
vectors = vectors[empty_vectors_boolean_flags]
# call `knnQueryBatch` to get neighbors
original_neighbours = self.ann_index.knnQueryBatch(vectors, k=k)
neighbors, distances = zip(
*[(x[0].tolist(), x[1].tolist()) for x in original_neighbours]
)
neighbors = list(neighbors) # type: ignore
distances = list(distances) # type: ignore
# neighbors need to be converted to an np.array of objects instead of ndarray of dimensions len(vectors)xk
# Solution: add a row to `neighbors` with any length other than k. This way, calling np.array(neighbors)
# returns an np.array of objects
neighbors.append([]) # type: ignore
distances.append([]) # type: ignore
# interleave `neighbors` and Nones in `extended_neighbors`
extended_neighbors[empty_vectors_boolean_flags] = numpy.array(
neighbors, dtype=object
)[:-1]
extended_distances[empty_vectors_boolean_flags] = numpy.array(
distances, dtype=object
)[:-1]
return extended_neighbors, extended_distances
|
ann_index.knnQueryBatch crashes if any of the vectors is all zeros.
This function is a wrapper around `ann_index.knnQueryBatch` that solves this problem. It works as follows:
- remove empty vectors from `vectors`.
- call `ann_index.knnQueryBatch` with the non-empty vectors only. This returns `neighbors`,
a list of list of neighbors. `len(neighbors)` equals the length of the non-empty vectors.
- extend the list `neighbors` with `None`s in place of empty vectors.
- return the extended list of neighbors and distances.
|
nmslib_knn_with_zero_vectors
|
python
|
allenai/scispacy
|
scispacy/candidate_generation.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/candidate_generation.py
|
Apache-2.0
|
def __call__(
self, mention_texts: List[str], k: int
) -> List[List[MentionCandidate]]:
"""
Given a list of mention texts, returns a list of candidate neighbors.
NOTE: Because we include canonical name aliases in the ann index, the list
of candidates returned will not necessarily be of length k for each candidate,
because we then map these to canonical ids only.
NOTE: For a given mention, the returned candidate list might be empty, which implies that
the tfidf vector for this mention was all zeros (i.e there were no 3 gram overlaps). This
happens reasonably rarely, but does occasionally.
Parameters
----------
mention_texts: List[str], required.
The list of mention strings to generate candidates for.
k: int, required.
The number of ann neighbours to look up.
Note that the number returned may differ due to aliases.
Returns
-------
A list of MentionCandidate objects per mention containing KB concept_ids and aliases
and distances which were mapped to. Note that these are lists for each concept id,
because the index contains aliases which are canonicalized, so multiple values may map
to the same canonical id.
"""
if self.verbose:
print(f"Generating candidates for {len(mention_texts)} mentions")
# tfidf vectorizer crashes on an empty array, so we return early here
if mention_texts == []:
return []
tfidfs = self.vectorizer.transform(mention_texts)
start_time = datetime.datetime.now()
# `ann_index.knnQueryBatch` crashes if one of the vectors is all zeros.
# `nmslib_knn_with_zero_vectors` is a wrapper around `ann_index.knnQueryBatch` that addresses this issue.
batch_neighbors, batch_distances = self.nmslib_knn_with_zero_vectors(tfidfs, k)
end_time = datetime.datetime.now()
total_time = end_time - start_time
if self.verbose:
print(f"Finding neighbors took {total_time.total_seconds()} seconds")
batch_mention_candidates = []
for neighbors, distances in zip(batch_neighbors, batch_distances):
if neighbors is None:
neighbors = []
if distances is None:
distances = []
concept_to_mentions: Dict[str, List[str]] = defaultdict(list)
concept_to_similarities: Dict[str, List[float]] = defaultdict(list)
for neighbor_index, distance in zip(neighbors, distances):
mention = self.ann_concept_aliases_list[neighbor_index]
concepts_for_mention = self.kb.alias_to_cuis[mention]
for concept_id in concepts_for_mention:
concept_to_mentions[concept_id].append(mention)
concept_to_similarities[concept_id].append(1.0 - distance)
mention_candidates = [
MentionCandidate(concept, mentions, concept_to_similarities[concept])
for concept, mentions in concept_to_mentions.items()
]
mention_candidates = sorted(mention_candidates, key=lambda c: c.concept_id)
batch_mention_candidates.append(mention_candidates)
return batch_mention_candidates
|
Given a list of mention texts, returns a list of candidate neighbors.
NOTE: Because we include canonical name aliases in the ann index, the list
of candidates returned will not necessarily be of length k for each candidate,
because we then map these to canonical ids only.
NOTE: For a given mention, the returned candidate list might be empty, which implies that
the tfidf vector for this mention was all zeros (i.e there were no 3 gram overlaps). This
happens reasonably rarely, but does occasionally.
Parameters
----------
mention_texts: List[str], required.
The list of mention strings to generate candidates for.
k: int, required.
The number of ann neighbours to look up.
Note that the number returned may differ due to aliases.
Returns
-------
A list of MentionCandidate objects per mention containing KB concept_ids and aliases
and distances which were mapped to. Note that these are lists for each concept id,
because the index contains aliases which are canonicalized, so multiple values may map
to the same canonical id.
|
__call__
|
python
|
allenai/scispacy
|
scispacy/candidate_generation.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/candidate_generation.py
|
Apache-2.0
|
def create_tfidf_ann_index(
out_path: str, kb: Optional[KnowledgeBase] = None
) -> Tuple[List[str], TfidfVectorizer, FloatIndex]:
"""
Build tfidf vectorizer and ann index.
Parameters
----------
out_path: str, required.
The path where the various model pieces will be saved.
kb : KnowledgeBase, optional.
The kb items to generate the index and vectors for.
"""
if not scipy_supports_sparse_float16():
raise RuntimeError(
"This function requires scipy<1.11, which only runs on Python<3.11."
)
tfidf_vectorizer_path = f"{out_path}/tfidf_vectorizer.joblib"
ann_index_path = f"{out_path}/nmslib_index.bin"
tfidf_vectors_path = f"{out_path}/tfidf_vectors_sparse.npz"
umls_concept_aliases_path = f"{out_path}/concept_aliases.json"
kb = kb or UmlsKnowledgeBase()
# nmslib hyperparameters (very important)
# guide: https://github.com/nmslib/nmslib/blob/master/manual/methods.md
# Default values resulted in very low recall.
# set to the maximum recommended value. Improves recall at the expense of longer indexing time.
# We use the HNSW (Hierarchical Navigable Small World Graph) representation which is constructed
# by consecutive insertion of elements in a random order by connecting them to M closest neighbours
# from the previously inserted elements. These later become bridges between the network hubs that
# improve overall graph connectivity. (bigger M -> higher recall, slower creation)
# For more details see: https://arxiv.org/pdf/1603.09320.pdf?
m_parameter = 100
# `C` for Construction. Set to the maximum recommended value
# Improves recall at the expense of longer indexing time
construction = 2000
num_threads = 60 # set based on the machine
index_params = {
"M": m_parameter,
"indexThreadQty": num_threads,
"efConstruction": construction,
"post": 0,
}
print(
f"No tfidf vectorizer on {tfidf_vectorizer_path} or ann index on {ann_index_path}"
)
concept_aliases = list(kb.alias_to_cuis.keys())
# NOTE: here we are creating the tf-idf vectorizer with float32 type, but we can serialize the
# resulting vectors using float16, meaning they take up half the memory on disk. Unfortunately
# we can't use the float16 format to actually run the vectorizer, because of this bug in sparse
# matrix representations in scipy: https://github.com/scipy/scipy/issues/7408
print(f"Fitting tfidf vectorizer on {len(concept_aliases)} aliases")
tfidf_vectorizer = TfidfVectorizer(
analyzer="char_wb", ngram_range=(3, 3), min_df=10, dtype=numpy.float32
)
start_time = datetime.datetime.now()
concept_alias_tfidfs = tfidf_vectorizer.fit_transform(concept_aliases)
print(f"Saving tfidf vectorizer to {tfidf_vectorizer_path}")
joblib.dump(tfidf_vectorizer, tfidf_vectorizer_path)
end_time = datetime.datetime.now()
total_time = end_time - start_time
print(f"Fitting and saving vectorizer took {total_time.total_seconds()} seconds")
print("Finding empty (all zeros) tfidf vectors")
empty_tfidfs_boolean_flags = numpy.array(
concept_alias_tfidfs.sum(axis=1) != 0
).reshape(-1)
number_of_non_empty_tfidfs = sum(empty_tfidfs_boolean_flags == False) # noqa: E712
total_number_of_tfidfs = numpy.size(concept_alias_tfidfs, 0)
print(
f"Deleting {number_of_non_empty_tfidfs}/{total_number_of_tfidfs} aliases because their tfidf is empty"
)
# remove empty tfidf vectors, otherwise nmslib will crash
concept_aliases = [
alias
for alias, flag in zip(concept_aliases, empty_tfidfs_boolean_flags)
if flag
]
concept_alias_tfidfs = concept_alias_tfidfs[empty_tfidfs_boolean_flags]
assert len(concept_aliases) == numpy.size(concept_alias_tfidfs, 0)
print(
f"Saving list of concept ids and tfidfs vectors to {umls_concept_aliases_path} and {tfidf_vectors_path}"
)
json.dump(concept_aliases, open(umls_concept_aliases_path, "w"))
scipy.sparse.save_npz(
tfidf_vectors_path, concept_alias_tfidfs.astype(numpy.float16)
)
print(f"Fitting ann index on {len(concept_aliases)} aliases (takes 2 hours)")
start_time = datetime.datetime.now()
ann_index = nmslib.init(
method="hnsw",
space="cosinesimil_sparse",
data_type=nmslib.DataType.SPARSE_VECTOR,
)
ann_index.addDataPointBatch(concept_alias_tfidfs)
ann_index.createIndex(index_params, print_progress=True)
ann_index.saveIndex(ann_index_path)
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
print(f"Fitting ann index took {elapsed_time.total_seconds()} seconds")
return concept_aliases, tfidf_vectorizer, ann_index
|
Build tfidf vectorizer and ann index.
Parameters
----------
out_path: str, required.
The path where the various model pieces will be saved.
kb : KnowledgeBase, optional.
The kb items to generate the index and vectors for.
|
create_tfidf_ann_index
|
python
|
allenai/scispacy
|
scispacy/candidate_generation.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/candidate_generation.py
|
Apache-2.0
|
def pysbd_sentencizer(doc: Doc) -> Doc:
"""Adds sentence boundaries to a Doc.
Intended to be used as a pipe in a spaCy pipeline.
Uses https://github.com/nipunsadvilkar/pySBD to get proper sentence and
respective char_spans
Handle special cases:
New lines cannot be end of sentence tokens.
New lines that separate sentences will be added to the
beginning of the next sentence.
@param doc: the spaCy document to be annotated with sentence boundaries
"""
segmenter = pysbd.Segmenter(language="en", clean=False, char_span=True)
sents_char_spans: List[TextSpan] = segmenter.segment(doc.text)
char_spans = [
doc.char_span(
sent_span.start,
# strip off trailing spaces when creating spans to accomodate spacy
sent_span.end - (len(sent_span.sent) - len(sent_span.sent.rstrip(" "))),
)
for sent_span in sents_char_spans
]
start_token_char_offsets = [span[0].idx for span in char_spans if span is not None]
for token in doc:
prev_token = token.nbor(-1) if token.i != 0 else None
if token.idx in start_token_char_offsets:
if prev_token and (
prev_token.text in ABBREVIATIONS
# Glom new lines at the beginning of the text onto the following sentence
or (prev_token.i == 0 and all(c == "\n" for c in prev_token.text))
):
token.is_sent_start = False
else:
token.is_sent_start = True
# check if previous token contains more than 2 newline chars
elif prev_token and prev_token.i != 0 and prev_token.text.count("\n") >= 2:
token.is_sent_start = True
else:
token.is_sent_start = False
return doc
|
Adds sentence boundaries to a Doc.
Intended to be used as a pipe in a spaCy pipeline.
Uses https://github.com/nipunsadvilkar/pySBD to get proper sentence and
respective char_spans
Handle special cases:
New lines cannot be end of sentence tokens.
New lines that separate sentences will be added to the
beginning of the next sentence.
@param doc: the spaCy document to be annotated with sentence boundaries
|
pysbd_sentencizer
|
python
|
allenai/scispacy
|
scispacy/custom_sentence_segmenter.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/custom_sentence_segmenter.py
|
Apache-2.0
|
def remove_new_lines(text: str) -> str:
"""Used to preprocess away new lines in the middle of words. This function
is intended to be called on a raw string before it is passed through a
spaCy pipeline
@param text: a string of text to be processed
"""
text = text.replace("-\n\n", "")
text = text.replace("- \n\n", "")
text = text.replace("-\n", "")
text = text.replace("- \n", "")
return text
|
Used to preprocess away new lines in the middle of words. This function
is intended to be called on a raw string before it is passed through a
spaCy pipeline
@param text: a string of text to be processed
|
remove_new_lines
|
python
|
allenai/scispacy
|
scispacy/custom_tokenizer.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/custom_tokenizer.py
|
Apache-2.0
|
def process_example(lines: List[str]) -> MedMentionExample:
"""
Processes the text lines of a file corresponding to a single MedMention abstract,
extracts the title, abstract, pubmed id and entities. The lines of the file should
have the following format:
PMID | t | Title text
PMID | a | Abstract text
PMID TAB StartIndex TAB EndIndex TAB MentionTextSegment TAB SemanticTypeID TAB EntityID
...
"""
pubmed_id, _, title = [x.strip() for x in lines[0].split("|", maxsplit=2)]
_, _, abstract = [x.strip() for x in lines[1].split("|", maxsplit=2)]
entities = []
for entity_line in lines[2:]:
_, start, end, mention, mention_type, umls_id = entity_line.split("\t")
mention_type = mention_type.split(",")[0]
entities.append(
MedMentionEntity(int(start), int(end), mention, mention_type, umls_id)
)
return MedMentionExample(
title, abstract, title + " " + abstract, pubmed_id, entities
)
|
Processes the text lines of a file corresponding to a single MedMention abstract,
extracts the title, abstract, pubmed id and entities. The lines of the file should
have the following format:
PMID | t | Title text
PMID | a | Abstract text
PMID TAB StartIndex TAB EndIndex TAB MentionTextSegment TAB SemanticTypeID TAB EntityID
...
|
process_example
|
python
|
allenai/scispacy
|
scispacy/data_util.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/data_util.py
|
Apache-2.0
|
def med_mentions_example_iterator(filename: str) -> Iterator[MedMentionExample]:
"""
Iterates over a Med Mentions file, yielding examples.
"""
with open(filename, "r", encoding="utf-8") as med_mentions_file:
lines = []
for line in med_mentions_file:
line = line.strip()
if line:
lines.append(line)
else:
yield process_example(lines)
lines = []
# Pick up stragglers
if lines:
yield process_example(lines)
|
Iterates over a Med Mentions file, yielding examples.
|
med_mentions_example_iterator
|
python
|
allenai/scispacy
|
scispacy/data_util.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/data_util.py
|
Apache-2.0
|
def select_subset_of_overlapping_chain(
chain: List[Tuple[int, int, str]]
) -> List[Tuple[int, int, str]]:
"""
Select the subset of entities in an overlapping chain to return by greedily choosing the
longest entity in the chain until there are no entities remaining
"""
sorted_chain = sorted(chain, key=lambda x: x[1] - x[0], reverse=True)
selections_from_chain: List[Tuple[int, int, str]] = []
chain_index = 0
# dump the current chain by greedily keeping the longest entity that doesn't overlap
while chain_index < len(sorted_chain):
entity = sorted_chain[chain_index]
match_found = False
for already_selected_entity in selections_from_chain:
max_start = max(entity[0], already_selected_entity[0])
min_end = min(entity[1], already_selected_entity[1])
if len(range(max_start, min_end)) > 0:
match_found = True
break
if not match_found:
selections_from_chain.append(entity)
chain_index += 1
return selections_from_chain
|
Select the subset of entities in an overlapping chain to return by greedily choosing the
longest entity in the chain until there are no entities remaining
|
select_subset_of_overlapping_chain
|
python
|
allenai/scispacy
|
scispacy/data_util.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/data_util.py
|
Apache-2.0
|
def remove_overlapping_entities(
sorted_spacy_format_entities: List[Tuple[int, int, str]]
) -> List[Tuple[int, int, str]]:
"""
Removes overlapping entities from the entity set, by greedilytaking the longest
entity from each overlapping chain. The input list of entities should be sorted
and follow the spacy format.
"""
spacy_format_entities_without_overlap = []
current_overlapping_chain: List[Tuple[int, int, str]] = []
current_overlapping_chain_start = 0
current_overlapping_chain_end = 0
for i, current_entity in enumerate(sorted_spacy_format_entities):
current_entity = sorted_spacy_format_entities[i]
current_entity_start = current_entity[0]
current_entity_end = current_entity[1]
if len(current_overlapping_chain) == 0:
current_overlapping_chain.append(current_entity)
current_overlapping_chain_start = current_entity_start
current_overlapping_chain_end = current_entity_end
else:
min_end = min(current_entity_end, current_overlapping_chain_end)
max_start = max(current_entity_start, current_overlapping_chain_start)
if min_end - max_start > 0:
current_overlapping_chain.append(current_entity)
current_overlapping_chain_start = min(
current_entity_start, current_overlapping_chain_start
)
current_overlapping_chain_end = max(
current_entity_end, current_overlapping_chain_end
)
else:
selections_from_chain = select_subset_of_overlapping_chain(
current_overlapping_chain
)
current_overlapping_chain = []
spacy_format_entities_without_overlap.extend(selections_from_chain)
current_overlapping_chain.append(current_entity)
current_overlapping_chain_start = current_entity_start
current_overlapping_chain_end = current_entity_end
spacy_format_entities_without_overlap.extend(
select_subset_of_overlapping_chain(current_overlapping_chain)
)
return sorted(spacy_format_entities_without_overlap, key=lambda x: x[0])
|
Removes overlapping entities from the entity set, by greedilytaking the longest
entity from each overlapping chain. The input list of entities should be sorted
and follow the spacy format.
|
remove_overlapping_entities
|
python
|
allenai/scispacy
|
scispacy/data_util.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/data_util.py
|
Apache-2.0
|
def _handle_sentence(examples: List[Tuple[str, str]]) -> SpacyNerExample:
"""
Processes a single sentence by building it up as a space separated string
with its corresponding typed entity spans.
"""
start_index = -1
current_index = 0
in_entity = False
entity_type: str = ""
sent = ""
entities: List[Tuple[int, int, str]] = []
for word, entity in examples:
sent += word
sent += " "
if entity != "O":
if in_entity:
pass
else:
start_index = current_index
in_entity = True
entity_type = entity[2:].upper()
else:
if in_entity:
end_index = current_index - 1
entities.append((start_index, end_index, entity_type.replace("-", "_")))
in_entity = False
entity_type = ""
start_index = -1
current_index += len(word) + 1
if in_entity:
end_index = current_index - 1
entities.append((start_index, end_index, entity_type))
# Remove last space.
sent = sent[:-1]
return (sent, {"entities": entities})
|
Processes a single sentence by building it up as a space separated string
with its corresponding typed entity spans.
|
_handle_sentence
|
python
|
allenai/scispacy
|
scispacy/data_util.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/data_util.py
|
Apache-2.0
|
def read_ner_from_tsv(filename: str) -> List[SpacyNerExample]:
"""
Reads BIO formatted NER data from a TSV file, such as the
NER data found here:
https://github.com/cambridgeltl/MTL-Bioinformatics-2016
Data is expected to be 2 tab seperated tokens per line, with
sentences denoted by empty lines. Sentences read by this
function will be already tokenized, but returned as a string,
as this is the format required by SpaCy. Consider using the
WhitespaceTokenizer(scispacy/util.py) to split this data
with a SpaCy model.
Parameters
----------
filename : str
The path to the tsv data.
Returns
-------
spacy_format_data : List[SpacyNerExample]
The BIO tagged NER examples.
"""
spacy_format_data = []
examples: List[Tuple[str, str]] = []
for line in open(cached_path(filename)):
line = line.strip()
if line.startswith("-DOCSTART-"):
continue
# We have reached the end of a sentence.
if not line:
if not examples:
continue
spacy_format_data.append(_handle_sentence(examples))
examples = []
else:
word, entity = line.split("\t")
examples.append((word, entity))
if examples:
spacy_format_data.append(_handle_sentence(examples))
return spacy_format_data
|
Reads BIO formatted NER data from a TSV file, such as the
NER data found here:
https://github.com/cambridgeltl/MTL-Bioinformatics-2016
Data is expected to be 2 tab seperated tokens per line, with
sentences denoted by empty lines. Sentences read by this
function will be already tokenized, but returned as a string,
as this is the format required by SpaCy. Consider using the
WhitespaceTokenizer(scispacy/util.py) to split this data
with a SpaCy model.
Parameters
----------
filename : str
The path to the tsv data.
Returns
-------
spacy_format_data : List[SpacyNerExample]
The BIO tagged NER examples.
|
read_ner_from_tsv
|
python
|
allenai/scispacy
|
scispacy/data_util.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/data_util.py
|
Apache-2.0
|
def cached_path(
url_or_filename: Union[str, Path], cache_dir: Optional[str] = None
) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = DATASET_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
|
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
|
cached_path
|
python
|
allenai/scispacy
|
scispacy/file_cache.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/file_cache.py
|
Apache-2.0
|
def filename_to_url(filename: str, cache_dir: Optional[str] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = DATASET_CACHE
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
|
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
|
filename_to_url
|
python
|
allenai/scispacy
|
scispacy/file_cache.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/file_cache.py
|
Apache-2.0
|
def get_from_cache(url: str, cache_dir: Optional[str] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = DATASET_CACHE
os.makedirs(cache_dir, exist_ok=True)
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError(
"HEAD request failed for url {} with status code {}".format(
url, response.status_code
)
)
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file: # type: IO
print(f"{url} not found in cache, downloading to {temp_file.name}")
# GET file object
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
print(
f"Finished download, copying {temp_file.name} to cache at {cache_path}"
)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
|
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
|
get_from_cache
|
python
|
allenai/scispacy
|
scispacy/file_cache.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/file_cache.py
|
Apache-2.0
|
def expand_to_noun_compound(self, token: Token, doc: Doc):
"""
Expand a token to it's noun phrase based
on a simple POS tag heuristic.
"""
start = token.i
while True:
if start - 1 < 0:
break
previous_token = doc[start - 1]
if previous_token.pos_ in {"PROPN", "NOUN", "PRON"}:
start -= 1
else:
break
end = token.i + 1
while True:
if end >= len(doc):
break
next_token = doc[end]
if next_token.pos_ in {"PROPN", "NOUN", "PRON"}:
end += 1
else:
break
return doc[start:end]
|
Expand a token to it's noun phrase based
on a simple POS tag heuristic.
|
expand_to_noun_compound
|
python
|
allenai/scispacy
|
scispacy/hyponym_detector.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/hyponym_detector.py
|
Apache-2.0
|
def __call__(self, doc: Doc):
"""
Runs the matcher on the Doc object and sets token and
doc level attributes for hypernym and hyponym relations.
"""
# Find matches in doc
matches = self.matcher(doc)
# If none are found then return None
if not matches:
return doc
for match_id, start, end in matches:
predicate = self.nlp.vocab.strings[match_id]
# if the predicate is in the list where the hypernym is last, else hypernym is first
if predicate in self.last:
hypernym = doc[end - 1]
hyponym = doc[start]
else:
# An inelegent way to deal with the "such_NOUN_as pattern"
# since the first token is not the hypernym.
if doc[start].lemma_ == "such":
start += 1
hypernym = doc[start]
hyponym = doc[end - 1]
hypernym = self.find_noun_compound_head(hypernym)
hyponym = self.find_noun_compound_head(hyponym)
# For the document level, we expand to contain noun phrases.
hypernym_extended = self.expand_to_noun_compound(hypernym, doc)
hyponym_extended = self.expand_to_noun_compound(hyponym, doc)
doc._.hearst_patterns.append(
(predicate, hypernym_extended, hyponym_extended)
)
for token in hyponym.conjuncts:
token_extended = self.expand_to_noun_compound(token, doc)
if token != hypernym and token is not None:
doc._.hearst_patterns.append(
(predicate, hypernym_extended, token_extended)
)
return doc
|
Runs the matcher on the Doc object and sets token and
doc level attributes for hypernym and hyponym relations.
|
__call__
|
python
|
allenai/scispacy
|
scispacy/hyponym_detector.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/hyponym_detector.py
|
Apache-2.0
|
def get_metric(self, reset: bool = False):
"""
Returns
-------
A Dict per label containing following the span based metrics:
precision : float
recall : float
f1-measure : float
Additionally, an ``overall`` key is included, which provides the precision,
recall and f1-measure for all spans.
"""
all_tags: Set[str] = set()
all_tags.update(self._true_positives.keys())
all_tags.update(self._false_positives.keys())
all_tags.update(self._false_negatives.keys())
all_metrics = {}
for tag in all_tags:
precision, recall, f1_measure = self._compute_metrics(
self._true_positives[tag],
self._false_positives[tag],
self._false_negatives[tag],
)
precision_key = "precision" + "-" + tag
recall_key = "recall" + "-" + tag
f1_key = "f1-measure" + "-" + tag
all_metrics[precision_key] = precision
all_metrics[recall_key] = recall
all_metrics[f1_key] = f1_measure
# Compute the precision, recall and f1 for all spans jointly.
sum_true_positives = sum(
{v for k, v in self._true_positives.items() if k != "untyped"}
)
sum_false_positives = sum(
{v for k, v in self._false_positives.items() if k != "untyped"}
)
sum_false_negatives = sum(
{v for k, v in self._false_negatives.items() if k != "untyped"}
)
precision, recall, f1_measure = self._compute_metrics(
sum_true_positives, sum_false_positives, sum_false_negatives
)
all_metrics["precision-overall"] = precision
all_metrics["recall-overall"] = recall
all_metrics["f1-measure-overall"] = f1_measure
if reset:
self.reset()
return all_metrics
|
Returns
-------
A Dict per label containing following the span based metrics:
precision : float
recall : float
f1-measure : float
Additionally, an ``overall`` key is included, which provides the precision,
recall and f1-measure for all spans.
|
get_metric
|
python
|
allenai/scispacy
|
scispacy/per_class_scorer.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/per_class_scorer.py
|
Apache-2.0
|
def get_children(self, node: SemanticTypeNode) -> List[SemanticTypeNode]:
"""
Recursively build up a flat list of all a node's children.
"""
children = []
for child in node.children:
children.append(child)
children.extend(self.get_children(child))
return children
|
Recursively build up a flat list of all a node's children.
|
get_children
|
python
|
allenai/scispacy
|
scispacy/umls_semantic_type_tree.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/umls_semantic_type_tree.py
|
Apache-2.0
|
def get_parent(self, node: SemanticTypeNode) -> Optional[SemanticTypeNode]:
"""
Returns the parent of the input node, returning None if the input node is the root of the tree
"""
current_depth = node.level
possible_parents = self.get_nodes_at_depth(current_depth - 1)
for possible_parent in possible_parents:
for child in possible_parent.children:
if child.type_id == node.type_id:
return possible_parent
# If there are no parents, we are at the root and return None
return None
|
Returns the parent of the input node, returning None if the input node is the root of the tree
|
get_parent
|
python
|
allenai/scispacy
|
scispacy/umls_semantic_type_tree.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/umls_semantic_type_tree.py
|
Apache-2.0
|
def get_collapsed_type_id_map_at_level(self, level: int) -> Dict[str, str]:
"""
Constructs a label mapping from the original tree labels to a tree of a fixed depth,
collapsing labels greater than the depth specified to the closest parent which is
still present in the new fixed depth tree. This is effectively mapping to a _coarser_
label space.
"""
new_type_id_map: Dict[str, str] = {k: k for k in self.type_id_to_node.keys()}
for node in self.get_nodes_at_depth(level):
for child in self.get_children(node):
new_type_id_map[child.type_id] = node.type_id
return new_type_id_map
|
Constructs a label mapping from the original tree labels to a tree of a fixed depth,
collapsing labels greater than the depth specified to the closest parent which is
still present in the new fixed depth tree. This is effectively mapping to a _coarser_
label space.
|
get_collapsed_type_id_map_at_level
|
python
|
allenai/scispacy
|
scispacy/umls_semantic_type_tree.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/umls_semantic_type_tree.py
|
Apache-2.0
|
def construct_umls_tree_from_tsv(filepath: str) -> UmlsSemanticTypeTree:
"""
Reads in a tsv file which is formatted as a depth first traversal of
a hierarchy tree, where nodes are of the format:
Name TAB UMLS Semantic Type TAB Tree Depth
Event T051 1
Activity T052 2
Behavior T053 3
Social Behavior T054 4
Individual Behavior T055 4
Daily or Recreational Activity T056 3
"""
node_stack: Deque[SemanticTypeNode] = deque()
for line in open(cached_path(filepath), "r"):
name, type_id, level = line.split("\t")
name = name.strip()
int_level = int(level.strip())
node = SemanticTypeNode(type_id, name, [], int_level)
node_stack.append(node)
def attach_children(node: SemanticTypeNode, stack: Deque[SemanticTypeNode]):
while stack and stack[0].level > node.level:
popped = stack.popleft()
attach_children(popped, stack)
node.children.append(popped)
first = node_stack.popleft()
attach_children(first, node_stack)
return UmlsSemanticTypeTree(first)
|
Reads in a tsv file which is formatted as a depth first traversal of
a hierarchy tree, where nodes are of the format:
Name TAB UMLS Semantic Type TAB Tree Depth
Event T051 1
Activity T052 2
Behavior T053 3
Social Behavior T054 4
Individual Behavior T055 4
Daily or Recreational Activity T056 3
|
construct_umls_tree_from_tsv
|
python
|
allenai/scispacy
|
scispacy/umls_semantic_type_tree.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/umls_semantic_type_tree.py
|
Apache-2.0
|
def read_umls_file_headers(meta_path: str, filename: str) -> List[str]:
"""
Read the file descriptor MRFILES.RRF from a UMLS release and get column headers (names)
for the given file
MRFILES.RRF file format: a pipe-separated values
Useful columns:
column 0: name of one of the files in the META directory
column 2: column names of that file
Args:
meta_path: path to the META directory of an UMLS release
filename: name of the file to get its column headers
Returns:
a list of column names
"""
file_descriptors = f"{meta_path}/MRFILES.RRF" # to get column names
with open(file_descriptors, encoding="utf-8") as fin:
for line in fin:
splits = line.split("|")
found_filename = splits[0]
column_names = (splits[2] + ",").split(
","
) # ugly hack because all files end with an empty column
if found_filename in filename:
return column_names
assert False, f"Couldn't find column names for file {filename}"
return None
|
Read the file descriptor MRFILES.RRF from a UMLS release and get column headers (names)
for the given file
MRFILES.RRF file format: a pipe-separated values
Useful columns:
column 0: name of one of the files in the META directory
column 2: column names of that file
Args:
meta_path: path to the META directory of an UMLS release
filename: name of the file to get its column headers
Returns:
a list of column names
|
read_umls_file_headers
|
python
|
allenai/scispacy
|
scispacy/umls_utils.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/umls_utils.py
|
Apache-2.0
|
def read_umls_concepts(
meta_path: str,
concept_details: Dict,
source: Optional[str] = None,
lang: str = "ENG",
non_suppressed: bool = True,
):
"""
Read the concepts file MRCONSO.RRF from a UMLS release and store it in
concept_details dictionary. Each concept is represented with
- concept_id
- canonical_name
- aliases
- types
- definition
This function fills the first three. If a canonical name is not found, it is left empty.
MRFILES.RRF file format: a pipe-separated values
Useful columns: CUI, LAT, SUPPRESS, STR, ISPREF, TS, STT
Args:
meta_path: path to the META directory of an UMLS release
concept_details: a dictionary to be filled with concept informations
source: An optional source identifier, used as a filter to extract only a
specific source from UMLS.
lang: An optional language identifier, used to filter terms by language
non_suppressed: flag to indicate whether only non-suppressed concepts should be kept
"""
concepts_filename = "MRCONSO.RRF"
headers = read_umls_file_headers(meta_path, concepts_filename)
with open(f"{meta_path}/{concepts_filename}", encoding="utf-8") as fin:
for line in fin:
splits = line.strip().split("|")
assert len(headers) == len(splits), (headers, splits)
concept = dict(zip(headers, splits))
if (lang is not None and concept["LAT"] != lang) or (
non_suppressed and concept["SUPPRESS"] != "N"
):
continue # Keep non-suppressed concepts in target language only
if source is not None:
if concept["SAB"] != source:
continue
concept_id = concept["CUI"]
if concept_id not in concept_details: # a new concept
# add it to the dictionary with an empty list of aliases and types
concept_details[concept_id] = {
"concept_id": concept_id,
"aliases": [],
"types": [],
}
concept_name = concept["STR"]
# this condition is copied from S2. It checks if the concept name is canonical or not
is_canonical = (
concept["ISPREF"] == "Y"
and concept["TS"] == "P"
and concept["STT"] == "PF"
)
if not is_canonical or "canonical_name" in concept_details[concept_id]:
# not a canonical name or a canonical name already found
concept_details[concept_id]["aliases"].append(
concept_name
) # add it as an alias
else:
concept_details[concept_id][
"canonical_name"
] = concept_name # set as canonical name
|
Read the concepts file MRCONSO.RRF from a UMLS release and store it in
concept_details dictionary. Each concept is represented with
- concept_id
- canonical_name
- aliases
- types
- definition
This function fills the first three. If a canonical name is not found, it is left empty.
MRFILES.RRF file format: a pipe-separated values
Useful columns: CUI, LAT, SUPPRESS, STR, ISPREF, TS, STT
Args:
meta_path: path to the META directory of an UMLS release
concept_details: a dictionary to be filled with concept informations
source: An optional source identifier, used as a filter to extract only a
specific source from UMLS.
lang: An optional language identifier, used to filter terms by language
non_suppressed: flag to indicate whether only non-suppressed concepts should be kept
|
read_umls_concepts
|
python
|
allenai/scispacy
|
scispacy/umls_utils.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/umls_utils.py
|
Apache-2.0
|
def read_umls_types(meta_path: str, concept_details: Dict):
"""
Read the types file MRSTY.RRF from a UMLS release and store it in
concept_details dictionary. This function adds the `types` field
to the information of each concept
MRSTY.RRF file format: a pipe-separated values
Useful columns: CUI, TUI
Args:
meta_path: path to the META directory of an UMLS release
concept_details: a dictionary to be filled with concept informations
"""
types_filename = "MRSTY.RRF"
headers = read_umls_file_headers(meta_path, types_filename)
with open(f"{meta_path}/{types_filename}", encoding="utf-8") as fin:
for line in fin:
splits = line.strip().split("|")
assert len(headers) == len(splits)
concept_type = dict(zip(headers, splits))
concept = concept_details.get(concept_type["CUI"])
if (
concept is not None
): # a small number of types are for concepts that don't exist
concept["types"].append(concept_type["TUI"])
|
Read the types file MRSTY.RRF from a UMLS release and store it in
concept_details dictionary. This function adds the `types` field
to the information of each concept
MRSTY.RRF file format: a pipe-separated values
Useful columns: CUI, TUI
Args:
meta_path: path to the META directory of an UMLS release
concept_details: a dictionary to be filled with concept informations
|
read_umls_types
|
python
|
allenai/scispacy
|
scispacy/umls_utils.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/umls_utils.py
|
Apache-2.0
|
def read_umls_definitions(meta_path: str, concept_details: Dict):
"""
Read the types file MRDEF.RRF from a UMLS release and store it in
concept_details dictionary. This function adds the `definition` field
to the information of each concept
MRDEF.RRF file format: a pipe-separated values
Useful columns: CUI, SAB, SUPPRESS, DEF
Args:
meta_path: path to the META directory of an UMLS release
concept_details: a dictionary to be filled with concept informations
"""
definitions_filename = "MRDEF.RRF"
headers = read_umls_file_headers(meta_path, definitions_filename)
with open(f"{meta_path}/{definitions_filename}", encoding="utf-8") as fin:
headers = read_umls_file_headers(meta_path, definitions_filename)
for line in fin:
splits = line.strip().split("|")
assert len(headers) == len(splits)
definition = dict(zip(headers, splits))
if definition["SUPPRESS"] != "N":
continue
is_from_preferred_source = definition["SAB"] in DEF_SOURCES_PREFERRED
concept = concept_details.get(definition["CUI"])
if (
concept is None
): # a small number of definitions are for concepts that don't exist
continue
if (
"definition" not in concept
or is_from_preferred_source
and concept["is_from_preferred_source"] == "N"
):
concept["definition"] = definition["DEF"]
concept["is_from_preferred_source"] = (
"Y" if is_from_preferred_source else "N"
)
|
Read the types file MRDEF.RRF from a UMLS release and store it in
concept_details dictionary. This function adds the `definition` field
to the information of each concept
MRDEF.RRF file format: a pipe-separated values
Useful columns: CUI, SAB, SUPPRESS, DEF
Args:
meta_path: path to the META directory of an UMLS release
concept_details: a dictionary to be filled with concept informations
|
read_umls_definitions
|
python
|
allenai/scispacy
|
scispacy/umls_utils.py
|
https://github.com/allenai/scispacy/blob/master/scispacy/umls_utils.py
|
Apache-2.0
|
def count_frequencies(language_class: Language, input_path: Path):
"""
Given a file containing single documents per line
(for scispacy, these are Pubmed abstracts), split the text
using a science specific tokenizer and compute word and
document frequencies for all words.
"""
print(f"Processing {input_path}.")
tokenizer = combined_rule_tokenizer(language_class())
counts = Counter()
doc_counts = Counter()
for line in open(input_path, "r"):
words = [t.text for t in tokenizer(line)]
counts.update(words)
doc_counts.update(set(words))
return counts, doc_counts
|
Given a file containing single documents per line
(for scispacy, these are Pubmed abstracts), split the text
using a science specific tokenizer and compute word and
document frequencies for all words.
|
count_frequencies
|
python
|
allenai/scispacy
|
scripts/count_word_frequencies.py
|
https://github.com/allenai/scispacy/blob/master/scripts/count_word_frequencies.py
|
Apache-2.0
|
def merge_counts(frequencies: List[Tuple[Counter, Counter]], output_path: str):
"""
Merge a number of frequency counts generated from `count_frequencies`
into a single file, written to `output_path`.
"""
counts = Counter()
doc_counts = Counter()
for word_count, doc_count in frequencies:
counts.update(word_count)
doc_counts.update(doc_count)
with io.open(output_path, 'w+', encoding='utf8') as file_:
for word, count in counts.most_common():
if not word.isspace():
file_.write(f"{count}\t{doc_counts[word]}\t{repr(word)}\n")
|
Merge a number of frequency counts generated from `count_frequencies`
into a single file, written to `output_path`.
|
merge_counts
|
python
|
allenai/scispacy
|
scripts/count_word_frequencies.py
|
https://github.com/allenai/scispacy/blob/master/scripts/count_word_frequencies.py
|
Apache-2.0
|
def get_spacy_model(
spacy_model_name: str,
pos_tags: bool,
parse: bool,
ner: bool,
with_custom_tokenizer: bool = False,
with_sentence_segmenter: bool = False,
with_serializable_abbreviation_detector: Optional[bool] = None,
) -> SpacyModelType:
"""
In order to avoid loading spacy models repeatedly,
we'll save references to them, keyed by the options
we used to create the spacy model, so any particular
configuration only gets loaded once.
"""
options = (spacy_model_name, pos_tags, parse, ner, with_custom_tokenizer, with_sentence_segmenter, with_serializable_abbreviation_detector)
if options not in LOADED_SPACY_MODELS:
disable = ["vectors", "textcat"]
if not pos_tags:
disable.append("tagger")
if not parse:
disable.append("parser")
if not ner:
disable.append("ner")
try:
spacy_model = spacy.load(spacy_model_name, disable=disable)
except OSError:
print(f"Spacy models '{spacy_model_name}' not found. Downloading and installing.")
spacy_download(spacy_model_name)
spacy_model = spacy.load(spacy_model_name, disable=disable)
if with_custom_tokenizer:
spacy_model.tokenizer = combined_rule_tokenizer(spacy_model)
if with_sentence_segmenter:
spacy_model.add_pipe("pysbd_sentencizer", first=True)
if with_serializable_abbreviation_detector is not None:
spacy_model.add_pipe("abbreviation_detector", config={"make_serializable": with_serializable_abbreviation_detector})
LOADED_SPACY_MODELS[options] = spacy_model
return LOADED_SPACY_MODELS[options]
|
In order to avoid loading spacy models repeatedly,
we'll save references to them, keyed by the options
we used to create the spacy model, so any particular
configuration only gets loaded once.
|
get_spacy_model
|
python
|
allenai/scispacy
|
tests/conftest.py
|
https://github.com/allenai/scispacy/blob/master/tests/conftest.py
|
Apache-2.0
|
def __call__(self, position: Position, rng_key: Optional[PRNGKey]) -> State:
"""Initialize the algorithm's state.
Parameters
----------
position
A chain position.
Returns
-------
The kernel state that corresponds to the position.
"""
|
Initialize the algorithm's state.
Parameters
----------
position
A chain position.
Returns
-------
The kernel state that corresponds to the position.
|
__call__
|
python
|
blackjax-devs/blackjax
|
blackjax/base.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/base.py
|
Apache-2.0
|
def __call__(self, rng_key: PRNGKey, state: State) -> tuple[State, Info]:
"""Update the current state using the sampling algorithm.
Parameters
----------
rng_key:
The random state used by JAX's random numbers generator.
state:
The current kernel state. The kernel state contains the current
chain position as well as other information the kernel needs to
carry over from the previous step.
Returns
-------
A new state, as well as a NamedTuple that contains extra information
about the transition that does not need to be carried over to the next
step.
"""
|
Update the current state using the sampling algorithm.
Parameters
----------
rng_key:
The random state used by JAX's random numbers generator.
state:
The current kernel state. The kernel state contains the current
chain position as well as other information the kernel needs to
carry over from the previous step.
Returns
-------
A new state, as well as a NamedTuple that contains extra information
about the transition that does not need to be carried over to the next
step.
|
__call__
|
python
|
blackjax-devs/blackjax
|
blackjax/base.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/base.py
|
Apache-2.0
|
def potential_scale_reduction(
input_array: ArrayLike, chain_axis: int = 0, sample_axis: int = 1
) -> Array:
"""Gelman and Rubin (1992)'s potential scale reduction for computing multiple MCMC chain convergence.
Parameters
----------
input_array:
An array representing multiple chains of MCMC samples. The array must
contains a chain dimension and a sample dimension.
chain_axis
The axis indicating the multiple chains. Default to 0.
sample_axis
The axis indicating a single chain of MCMC samples. Default to 1.
Returns
-------
NDArray of the resulting statistics (r-hat), with the chain and sample dimensions squeezed.
Notes
-----
The diagnostic is computed by:
.. math:: \\hat{R} = \\frac{\\hat{V}}{W}
where :math:`W` is the within-chain variance and :math:`\\hat{V}` is the posterior variance
estimate for the pooled traces. This is the potential scale reduction factor, which
converges to unity when each of the traces is a sample from the target posterior. Values
greater than one indicate that one or more chains have not yet converged :cite:p:`stan_rhat,gelman1992inference`.
"""
assert (
input_array.shape[chain_axis] > 1
), "potential_scale_reduction as implemented only works for two or more chains."
num_samples = input_array.shape[sample_axis]
# Compute stats for each chain
per_chain_mean = input_array.mean(axis=sample_axis, keepdims=True)
per_chain_var = input_array.var(axis=sample_axis, ddof=1, keepdims=True)
# Compute between-chain stats
between_chain_variance = num_samples * per_chain_mean.var(
axis=chain_axis, ddof=1, keepdims=True
)
# Compute within-chain stats
within_chain_variance = per_chain_var.mean(axis=chain_axis, keepdims=True)
# Estimate of marginal posterior variance
rhat_value = jnp.sqrt(
(between_chain_variance / within_chain_variance + num_samples - 1)
/ (num_samples)
)
return rhat_value.squeeze()
|
Gelman and Rubin (1992)'s potential scale reduction for computing multiple MCMC chain convergence.
Parameters
----------
input_array:
An array representing multiple chains of MCMC samples. The array must
contains a chain dimension and a sample dimension.
chain_axis
The axis indicating the multiple chains. Default to 0.
sample_axis
The axis indicating a single chain of MCMC samples. Default to 1.
Returns
-------
NDArray of the resulting statistics (r-hat), with the chain and sample dimensions squeezed.
Notes
-----
The diagnostic is computed by:
.. math:: \hat{R} = \frac{\hat{V}}{W}
where :math:`W` is the within-chain variance and :math:`\hat{V}` is the posterior variance
estimate for the pooled traces. This is the potential scale reduction factor, which
converges to unity when each of the traces is a sample from the target posterior. Values
greater than one indicate that one or more chains have not yet converged :cite:p:`stan_rhat,gelman1992inference`.
|
potential_scale_reduction
|
python
|
blackjax-devs/blackjax
|
blackjax/diagnostics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/diagnostics.py
|
Apache-2.0
|
def effective_sample_size(
input_array: ArrayLike, chain_axis: int = 0, sample_axis: int = 1
) -> Array:
"""Compute estimate of the effective sample size (ess).
Parameters
----------
input_array:
An array representing multiple chains of MCMC samples. The array must
contains a chain dimension and a sample dimension.
chain_axis
The axis indicating the multiple chains. Default to 0.
sample_axis
The axis indicating a single chain of MCMC samples. Default to 1.
Returns
-------
NDArray of the resulting statistics (ess), with the chain and sample dimensions squeezed.
Notes
-----
The basic ess (:math:`N_{\\mathit{eff}}`) diagnostic is computed by:
.. math:: \\hat{N}_{\\mathit{eff}} = \\frac{MN}{\\hat{\\tau}}
.. math:: \\hat{\\tau} = -1 + 2 \\sum_{t'=0}^K \\hat{P}_{t'}
where :math:`M` is the number of chains, :math:`N` the number of draws,
:math:`\\hat{\\rho}_t` is the estimated _autocorrelation at lag :math:`t`, and
:math:`K` is the last integer for which :math:`\\hat{P}_{K} = \\hat{\\rho}_{2K} +
\\hat{\\rho}_{2K+1}` is still positive :cite:p:`stan_ess,gelman1995bayesian`.
The current implementation is similar to Stan, which uses Geyer's initial monotone sequence
criterion :cite:p:`geyer1992practical,geyer2011introduction`.
"""
input_shape = input_array.shape
sample_axis = sample_axis if sample_axis >= 0 else len(input_shape) + sample_axis
num_chains = input_shape[chain_axis]
num_samples = input_shape[sample_axis]
assert (
num_samples > 1
), f"The input array must have at least 2 samples, got only {num_samples}."
mean_across_chain = input_array.mean(axis=sample_axis, keepdims=True)
# Compute autocovariance estimates for every lag for the input array using FFT.
centered_array = input_array - mean_across_chain
m = next_fast_len(2 * num_samples)
ifft_ary = jnp.fft.rfft(centered_array, n=m, axis=sample_axis)
ifft_ary *= jnp.conjugate(ifft_ary)
autocov_value = jnp.fft.irfft(ifft_ary, n=m, axis=sample_axis)
autocov_value = (
jnp.take(autocov_value, jnp.arange(num_samples), axis=sample_axis) / num_samples
)
mean_autocov_var = autocov_value.mean(chain_axis, keepdims=True)
mean_var0 = (
jnp.take(mean_autocov_var, jnp.array([0]), axis=sample_axis)
* num_samples
/ (num_samples - 1.0)
)
weighted_var = mean_var0 * (num_samples - 1.0) / num_samples
weighted_var = jax.lax.cond(
num_chains > 1,
lambda mean_across_chain: weighted_var
+ mean_across_chain.var(axis=chain_axis, ddof=1, keepdims=True),
lambda _: weighted_var,
operand=mean_across_chain,
)
# Geyer's initial positive sequence
num_samples_even = num_samples - num_samples % 2
mean_autocov_var_tp1 = jnp.take(
mean_autocov_var, jnp.arange(1, num_samples_even), axis=sample_axis
)
rho_hat = jnp.concatenate(
[
jnp.ones_like(mean_var0),
1.0 - (mean_var0 - mean_autocov_var_tp1) / weighted_var,
],
axis=sample_axis,
)
rho_hat = jnp.moveaxis(rho_hat, sample_axis, 0)
rho_hat_even = rho_hat[0::2]
rho_hat_odd = rho_hat[1::2]
mask0 = (rho_hat_even + rho_hat_odd) > 0.0
carry_cond = jnp.ones_like(mask0[0])
max_t = jnp.zeros_like(mask0[0], dtype=int)
def positive_sequence_body_fn(state, mask_t):
t, carry_cond, max_t = state
next_mask = carry_cond & mask_t
next_max_t = jnp.where(next_mask, jnp.ones_like(max_t) * t, max_t)
return (t + 1, next_mask, next_max_t), next_mask
(*_, max_t_next), mask = jax.lax.scan(
positive_sequence_body_fn, (0, carry_cond, max_t), mask0
)
indices = jnp.indices(max_t_next.shape)
indices = tuple([max_t_next + 1] + [indices[i] for i in range(max_t_next.ndim)])
rho_hat_odd = jnp.where(mask, rho_hat_odd, jnp.zeros_like(rho_hat_odd))
# improve estimation
mask_even = mask.at[indices].set(rho_hat_even[indices] > 0)
rho_hat_even = jnp.where(mask_even, rho_hat_even, jnp.zeros_like(rho_hat_even))
# Geyer's initial monotone sequence
def monotone_sequence_body_fn(rho_hat_sum_tm1, rho_hat_sum_t):
update_mask = rho_hat_sum_t > rho_hat_sum_tm1
next_rho_hat_sum_t = jnp.where(update_mask, rho_hat_sum_tm1, rho_hat_sum_t)
return next_rho_hat_sum_t, (update_mask, next_rho_hat_sum_t)
rho_hat_sum = rho_hat_even + rho_hat_odd
_, (update_mask, update_value) = jax.lax.scan(
monotone_sequence_body_fn, rho_hat_sum[0], rho_hat_sum
)
rho_hat_even_final = jnp.where(update_mask, update_value / 2.0, rho_hat_even)
rho_hat_odd_final = jnp.where(update_mask, update_value / 2.0, rho_hat_odd)
# compute effective sample size
ess_raw = num_chains * num_samples
tau_hat = (
-1.0
+ 2.0 * jnp.sum(rho_hat_even_final + rho_hat_odd_final, axis=0)
- rho_hat_even_final[indices]
)
tau_hat = jnp.maximum(tau_hat, 1 / np.log10(ess_raw))
ess = ess_raw / tau_hat
return ess.squeeze()
|
Compute estimate of the effective sample size (ess).
Parameters
----------
input_array:
An array representing multiple chains of MCMC samples. The array must
contains a chain dimension and a sample dimension.
chain_axis
The axis indicating the multiple chains. Default to 0.
sample_axis
The axis indicating a single chain of MCMC samples. Default to 1.
Returns
-------
NDArray of the resulting statistics (ess), with the chain and sample dimensions squeezed.
Notes
-----
The basic ess (:math:`N_{\mathit{eff}}`) diagnostic is computed by:
.. math:: \hat{N}_{\mathit{eff}} = \frac{MN}{\hat{\tau}}
.. math:: \hat{\tau} = -1 + 2 \sum_{t'=0}^K \hat{P}_{t'}
where :math:`M` is the number of chains, :math:`N` the number of draws,
:math:`\hat{\rho}_t` is the estimated _autocorrelation at lag :math:`t`, and
:math:`K` is the last integer for which :math:`\hat{P}_{K} = \hat{\rho}_{2K} +
\hat{\rho}_{2K+1}` is still positive :cite:p:`stan_ess,gelman1995bayesian`.
The current implementation is similar to Stan, which uses Geyer's initial monotone sequence
criterion :cite:p:`geyer1992practical,geyer2011introduction`.
|
effective_sample_size
|
python
|
blackjax-devs/blackjax
|
blackjax/diagnostics.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/diagnostics.py
|
Apache-2.0
|
def _update_progress_bar(iter_num, chain_id):
"Updates progress bar of a JAX scan or loop"
chain_id = lax.cond(
# update every multiple of `print_rate` except at the end
(iter_num % print_rate == 0) | (iter_num == (num_samples - 1)),
lambda _: io_callback(_update_bar, array(0), iter_num, chain_id),
lambda _: chain_id,
operand=None,
)
_ = lax.cond(
iter_num == num_samples - 1,
lambda _: io_callback(_close_bar, None, iter_num + 1, chain_id),
lambda _: None,
operand=None,
)
return chain_id
|
Updates progress bar of a JAX scan or loop
|
_update_progress_bar
|
python
|
blackjax-devs/blackjax
|
blackjax/progress_bar.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/progress_bar.py
|
Apache-2.0
|
def _progress_bar_scan(func):
"""Decorator that adds a progress bar to `body_fun` used in `lax.scan`.
Note that `body_fun` must either be looping over `np.arange(num_samples)`,
or be looping over a tuple who's first element is `np.arange(num_samples)`
This means that `iter_num` is the current iteration number
"""
def wrapper_progress_bar(carry, x):
if type(x) is tuple:
iter_num, *_ = x
else:
iter_num = x
subcarry, chain_id = carry
chain_id = _update_progress_bar(iter_num, chain_id)
subcarry, y = func(subcarry, x)
return (subcarry, chain_id), y
return wrapper_progress_bar
|
Decorator that adds a progress bar to `body_fun` used in `lax.scan`.
Note that `body_fun` must either be looping over `np.arange(num_samples)`,
or be looping over a tuple who's first element is `np.arange(num_samples)`
This means that `iter_num` is the current iteration number
|
_progress_bar_scan
|
python
|
blackjax-devs/blackjax
|
blackjax/progress_bar.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/progress_bar.py
|
Apache-2.0
|
def linear_map(diag_or_dense_a, b, *, precision="highest"):
"""Perform a linear map of the form y = Ax.
Dispatch matrix multiplication to either jnp.dot or jnp.multiply.
Unlike jax.numpy.dot, this function output an Array that match the dtype
and shape of the 2nd input:
- diag_or_dense_a is a scalar or 1d vector, `diag_or_dense_a * b` is returned
- diag_or_dense_a is a 2d matrix, `diag_or_dense_a @ b` is returned
Note that unlike jax.numpy.dot, here we defaults to full (highest)
precision. This is more useful for numerical algorithms and will be the
default for jax.numpy in the future:
https://github.com/google/jax/pull/7859
Parameters
----------
diag_or_dense_a:
A diagonal (1d vector) or dense matrix (2d square matrix).
b:
A vector.
precision:
The precision of the computation. See jax.lax.dot_general for
more details.
Returns
-------
The result vector of the matrix multiplication.
"""
dtype = jnp.result_type(diag_or_dense_a.dtype, b.dtype)
diag_or_dense_a = diag_or_dense_a.astype(dtype)
b = b.astype(dtype)
ndim = jnp.ndim(diag_or_dense_a)
if ndim <= 1:
return lax.mul(diag_or_dense_a, b)
else:
return lax.dot(diag_or_dense_a, b, precision=precision)
|
Perform a linear map of the form y = Ax.
Dispatch matrix multiplication to either jnp.dot or jnp.multiply.
Unlike jax.numpy.dot, this function output an Array that match the dtype
and shape of the 2nd input:
- diag_or_dense_a is a scalar or 1d vector, `diag_or_dense_a * b` is returned
- diag_or_dense_a is a 2d matrix, `diag_or_dense_a @ b` is returned
Note that unlike jax.numpy.dot, here we defaults to full (highest)
precision. This is more useful for numerical algorithms and will be the
default for jax.numpy in the future:
https://github.com/google/jax/pull/7859
Parameters
----------
diag_or_dense_a:
A diagonal (1d vector) or dense matrix (2d square matrix).
b:
A vector.
precision:
The precision of the computation. See jax.lax.dot_general for
more details.
Returns
-------
The result vector of the matrix multiplication.
|
linear_map
|
python
|
blackjax-devs/blackjax
|
blackjax/util.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/util.py
|
Apache-2.0
|
def generate_gaussian_noise(
rng_key: PRNGKey,
position: ArrayLikeTree,
mu: Union[float, Array] = 0.0,
sigma: Union[float, Array] = 1.0,
) -> ArrayTree:
"""Generate N(mu, sigma) noise with output structure that match a given PyTree.
Parameters
----------
rng_key:
The pseudo-random number generator key used to generate random numbers.
position:
PyTree that the structure the output should to match.
mu:
The mean of the Gaussian distribution.
sigma:
The standard deviation of the Gaussian distribution.
Returns
-------
Gaussian noise following N(mu, sigma) that match the structure of position.
"""
p, unravel_fn = ravel_pytree(position)
sample = normal(rng_key, shape=p.shape, dtype=p.dtype)
return unravel_fn(mu + linear_map(sigma, sample))
|
Generate N(mu, sigma) noise with output structure that match a given PyTree.
Parameters
----------
rng_key:
The pseudo-random number generator key used to generate random numbers.
position:
PyTree that the structure the output should to match.
mu:
The mean of the Gaussian distribution.
sigma:
The standard deviation of the Gaussian distribution.
Returns
-------
Gaussian noise following N(mu, sigma) that match the structure of position.
|
generate_gaussian_noise
|
python
|
blackjax-devs/blackjax
|
blackjax/util.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/util.py
|
Apache-2.0
|
def generate_unit_vector(
rng_key: PRNGKey,
position: ArrayLikeTree,
) -> Array:
"""Generate a random unit vector with output structure that match a given PyTree.
Parameters
----------
rng_key:
The pseudo-random number generator key used to generate random numbers.
position:
PyTree that the structure the output should to match.
Returns
-------
Random unit vector that match the structure of position.
"""
p, unravel_fn = ravel_pytree(position)
sample = normal(rng_key, shape=p.shape, dtype=p.dtype)
return unravel_fn(sample / jnp.linalg.norm(sample))
|
Generate a random unit vector with output structure that match a given PyTree.
Parameters
----------
rng_key:
The pseudo-random number generator key used to generate random numbers.
position:
PyTree that the structure the output should to match.
Returns
-------
Random unit vector that match the structure of position.
|
generate_unit_vector
|
python
|
blackjax-devs/blackjax
|
blackjax/util.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/util.py
|
Apache-2.0
|
def index_pytree(input_pytree: ArrayLikeTree) -> ArrayTree:
"""Builds a PyTree with elements indicating its corresponding index on a flat array.
Various algorithms in BlackJAX take as input a 1 or 2 dimensional array which somehow
affects the sampling or approximation of a PyTree. For instance, in HMC a 1 or 2
dimensional inverse mass matrix is used when simulating Hamilonian dynamics on
PyTree position and momentum variables. It is usually unclear how the elements of the
array interact with the PyTree. This function demonstrates how all algorithms map an
array to a PyTree of equivalent dimension.
The function returns the index of a 1 dimensional array corresponding to each element of
the PyTree. This way the user can tell which element in the PyTree corresponds to which
column (and row) of a 1 dimensional (or 2 dimensional) array.
Parameters
----------
input_pytree:
Example PyTree.
Returns
-------
PyTree mapping each individual element of an arange array to elements in the PyTree.
"""
flat_input, unravel_fn = ravel_pytree(input_pytree)
(dim_input,) = flat_input.shape
array = jnp.arange(dim_input, dtype=flat_input.dtype)
return unravel_fn(array)
|
Builds a PyTree with elements indicating its corresponding index on a flat array.
Various algorithms in BlackJAX take as input a 1 or 2 dimensional array which somehow
affects the sampling or approximation of a PyTree. For instance, in HMC a 1 or 2
dimensional inverse mass matrix is used when simulating Hamilonian dynamics on
PyTree position and momentum variables. It is usually unclear how the elements of the
array interact with the PyTree. This function demonstrates how all algorithms map an
array to a PyTree of equivalent dimension.
The function returns the index of a 1 dimensional array corresponding to each element of
the PyTree. This way the user can tell which element in the PyTree corresponds to which
column (and row) of a 1 dimensional (or 2 dimensional) array.
Parameters
----------
input_pytree:
Example PyTree.
Returns
-------
PyTree mapping each individual element of an arange array to elements in the PyTree.
|
index_pytree
|
python
|
blackjax-devs/blackjax
|
blackjax/util.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/util.py
|
Apache-2.0
|
def run_inference_algorithm(
rng_key: PRNGKey,
inference_algorithm: Union[SamplingAlgorithm, VIAlgorithm],
num_steps: int,
initial_state: ArrayLikeTree = None,
initial_position: ArrayLikeTree = None,
progress_bar: bool = False,
transform: Callable = lambda state, info: (state, info),
) -> tuple:
"""Wrapper to run an inference algorithm.
Note that this utility function does not work for Stochastic Gradient MCMC samplers
like sghmc, as SG-MCMC samplers require additional control flow for batches of data
to be passed in during each sample.
Parameters
----------
rng_key
The random state used by JAX's random numbers generator.
initial_state
The initial state of the inference algorithm.
initial_position
The initial position of the inference algorithm. This is used when the initial state is not provided.
inference_algorithm
One of blackjax's sampling algorithms or variational inference algorithms.
num_steps
Number of MCMC steps.
progress_bar
Whether to display a progress bar.
transform
A transformation of the trace of states (and info) to be returned. This is useful for
computing determinstic variables, or returning a subset of the states.
By default, the states are returned as is.
Returns
-------
1. The final state.
2. The history of states.
"""
if initial_state is None and initial_position is None:
raise ValueError(
"Either `initial_state` or `initial_position` must be provided."
)
if initial_state is not None and initial_position is not None:
raise ValueError(
"Only one of `initial_state` or `initial_position` must be provided."
)
if initial_state is None:
rng_key, init_key = split(rng_key, 2)
initial_state = inference_algorithm.init(initial_position, init_key)
keys = split(rng_key, num_steps)
def one_step(state, xs):
_, rng_key = xs
state, info = inference_algorithm.step(rng_key, state)
return state, transform(state, info)
scan_fn = gen_scan_fn(num_steps, progress_bar)
xs = jnp.arange(num_steps), keys
final_state, history = scan_fn(one_step, initial_state, xs)
return final_state, history
|
Wrapper to run an inference algorithm.
Note that this utility function does not work for Stochastic Gradient MCMC samplers
like sghmc, as SG-MCMC samplers require additional control flow for batches of data
to be passed in during each sample.
Parameters
----------
rng_key
The random state used by JAX's random numbers generator.
initial_state
The initial state of the inference algorithm.
initial_position
The initial position of the inference algorithm. This is used when the initial state is not provided.
inference_algorithm
One of blackjax's sampling algorithms or variational inference algorithms.
num_steps
Number of MCMC steps.
progress_bar
Whether to display a progress bar.
transform
A transformation of the trace of states (and info) to be returned. This is useful for
computing determinstic variables, or returning a subset of the states.
By default, the states are returned as is.
Returns
-------
1. The final state.
2. The history of states.
|
run_inference_algorithm
|
python
|
blackjax-devs/blackjax
|
blackjax/util.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/util.py
|
Apache-2.0
|
def store_only_expectation_values(
sampling_algorithm,
state_transform=lambda x: x,
incremental_value_transform=lambda x: x,
burn_in=0,
):
"""Takes a sampling algorithm and constructs from it a new sampling algorithm object. The new sampling algorithm has the same
kernel but only stores the streaming expectation values of some observables, not the full states; to save memory.
It saves incremental_value_transform(E[state_transform(x)]) at each step i, where expectation is computed with samples up to i-th sample.
Example:
.. code::
init_key, state_key, run_key = jax.random.split(jax.random.PRNGKey(0),3)
model = StandardNormal(2)
initial_position = model.sample_init(init_key)
initial_state = blackjax.mcmc.mclmc.init(
position=initial_position, logdensity_fn=model.logdensity_fn, rng_key=state_key
)
integrator_type = "mclachlan"
L = 1.0
step_size = 0.1
num_steps = 4
integrator = map_integrator_type_to_integrator['mclmc'][integrator_type]
state_transform = lambda state: state.position
memory_efficient_sampling_alg, transform = store_only_expectation_values(
sampling_algorithm=sampling_alg,
state_transform=state_transform)
initial_state = memory_efficient_sampling_alg.init(initial_state)
final_state, trace_at_every_step = run_inference_algorithm(
rng_key=run_key,
initial_state=initial_state,
inference_algorithm=memory_efficient_sampling_alg,
num_steps=num_steps,
transform=transform,
progress_bar=True,
)
"""
def init_fn(state):
averaging_state = (0.0, state_transform(state))
return (state, averaging_state)
def update_fn(rng_key, state_and_incremental_val):
state, averaging_state = state_and_incremental_val
state, info = sampling_algorithm.step(
rng_key, state
) # update the state with the sampling algorithm
averaging_state = incremental_value_update(
state_transform(state),
averaging_state,
weight=(
averaging_state[0] >= burn_in
), # If we want to eliminate some number of steps as a burn-in
zero_prevention=1e-10 * (burn_in > 0),
)
# update the expectation value with the running average
return (state, averaging_state), info
def transform(state_and_incremental_val, info):
(state, (_, incremental_value)) = state_and_incremental_val
return incremental_value_transform(incremental_value), info
return SamplingAlgorithm(init_fn, update_fn), transform
|
Takes a sampling algorithm and constructs from it a new sampling algorithm object. The new sampling algorithm has the same
kernel but only stores the streaming expectation values of some observables, not the full states; to save memory.
It saves incremental_value_transform(E[state_transform(x)]) at each step i, where expectation is computed with samples up to i-th sample.
Example:
.. code::
init_key, state_key, run_key = jax.random.split(jax.random.PRNGKey(0),3)
model = StandardNormal(2)
initial_position = model.sample_init(init_key)
initial_state = blackjax.mcmc.mclmc.init(
position=initial_position, logdensity_fn=model.logdensity_fn, rng_key=state_key
)
integrator_type = "mclachlan"
L = 1.0
step_size = 0.1
num_steps = 4
integrator = map_integrator_type_to_integrator['mclmc'][integrator_type]
state_transform = lambda state: state.position
memory_efficient_sampling_alg, transform = store_only_expectation_values(
sampling_algorithm=sampling_alg,
state_transform=state_transform)
initial_state = memory_efficient_sampling_alg.init(initial_state)
final_state, trace_at_every_step = run_inference_algorithm(
rng_key=run_key,
initial_state=initial_state,
inference_algorithm=memory_efficient_sampling_alg,
num_steps=num_steps,
transform=transform,
progress_bar=True,
)
|
store_only_expectation_values
|
python
|
blackjax-devs/blackjax
|
blackjax/util.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/util.py
|
Apache-2.0
|
def incremental_value_update(
expectation, incremental_val, weight=1.0, zero_prevention=0.0
):
"""Compute the streaming average of a function O(x) using a weight.
Parameters:
----------
expectation
the value of the expectation at the current timestep
incremental_val
tuple of (total, average) where total is the sum of weights and average is the current average
weight
weight of the current state
zero_prevention
small value to prevent division by zero
Returns:
----------
new streaming average
"""
total, average = incremental_val
average = tree_map(
lambda exp, av: safediv(
total * av + weight * exp, (total + weight + zero_prevention)
),
expectation,
average,
)
total += weight
return total, average
|
Compute the streaming average of a function O(x) using a weight.
Parameters:
----------
expectation
the value of the expectation at the current timestep
incremental_val
tuple of (total, average) where total is the sum of weights and average is the current average
weight
weight of the current state
zero_prevention
small value to prevent division by zero
Returns:
----------
new streaming average
|
incremental_value_update
|
python
|
blackjax-devs/blackjax
|
blackjax/util.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/util.py
|
Apache-2.0
|
def adjusted_mclmc_find_L_and_step_size(
mclmc_kernel,
num_steps,
state,
rng_key,
target,
frac_tune1=0.1,
frac_tune2=0.1,
frac_tune3=0.0,
diagonal_preconditioning=True,
params=None,
max="avg",
num_windows=1,
tuning_factor=1.3,
):
"""
Finds the optimal value of the parameters for the MH-MCHMC algorithm.
Parameters
----------
mclmc_kernel
The kernel function used for the MCMC algorithm.
num_steps
The number of MCMC steps that will subsequently be run, after tuning.
state
The initial state of the MCMC algorithm.
rng_key
The random number generator key.
target
The target acceptance rate for the step size adaptation.
frac_tune1
The fraction of tuning for the first step of the adaptation.
frac_tune2
The fraction of tuning for the second step of the adaptation.
frac_tune3
The fraction of tuning for the third step of the adaptation.
diagonal_preconditioning
Whether to do diagonal preconditioning (i.e. a mass matrix)
params
Initial params to start tuning from (optional)
max
whether to calculate L from maximum or average eigenvalue. Average is advised.
num_windows
how many iterations of the tuning are carried out
tuning_factor
multiplicative factor for L
Returns
-------
A tuple containing the final state of the MCMC algorithm and the final hyperparameters.
"""
frac_tune1 /= num_windows
frac_tune2 /= num_windows
frac_tune3 /= num_windows
dim = pytree_size(state.position)
if params is None:
params = MCLMCAdaptationState(
jnp.sqrt(dim), jnp.sqrt(dim) * 0.2, inverse_mass_matrix=jnp.ones((dim,))
)
part1_key, part2_key = jax.random.split(rng_key, 2)
total_num_tuning_integrator_steps = 0
for i in range(num_windows):
window_key = jax.random.fold_in(part1_key, i)
(
state,
params,
eigenvector,
num_tuning_integrator_steps,
) = adjusted_mclmc_make_L_step_size_adaptation(
kernel=mclmc_kernel,
dim=dim,
frac_tune1=frac_tune1,
frac_tune2=frac_tune2,
target=target,
diagonal_preconditioning=diagonal_preconditioning,
max=max,
tuning_factor=tuning_factor,
)(
state, params, num_steps, window_key
)
total_num_tuning_integrator_steps += num_tuning_integrator_steps
if frac_tune3 != 0:
for i in range(num_windows):
part2_key = jax.random.fold_in(part2_key, i)
part2_key1, part2_key2 = jax.random.split(part2_key, 2)
(
state,
params,
num_tuning_integrator_steps,
) = adjusted_mclmc_make_adaptation_L(
mclmc_kernel,
frac=frac_tune3,
Lfactor=0.5,
max=max,
eigenvector=eigenvector,
)(
state, params, num_steps, part2_key1
)
total_num_tuning_integrator_steps += num_tuning_integrator_steps
(
state,
params,
_,
num_tuning_integrator_steps,
) = adjusted_mclmc_make_L_step_size_adaptation(
kernel=mclmc_kernel,
dim=dim,
frac_tune1=frac_tune1,
frac_tune2=0,
target=target,
fix_L_first_da=True,
diagonal_preconditioning=diagonal_preconditioning,
max=max,
tuning_factor=tuning_factor,
)(
state, params, num_steps, part2_key2
)
total_num_tuning_integrator_steps += num_tuning_integrator_steps
return state, params, total_num_tuning_integrator_steps
|
Finds the optimal value of the parameters for the MH-MCHMC algorithm.
Parameters
----------
mclmc_kernel
The kernel function used for the MCMC algorithm.
num_steps
The number of MCMC steps that will subsequently be run, after tuning.
state
The initial state of the MCMC algorithm.
rng_key
The random number generator key.
target
The target acceptance rate for the step size adaptation.
frac_tune1
The fraction of tuning for the first step of the adaptation.
frac_tune2
The fraction of tuning for the second step of the adaptation.
frac_tune3
The fraction of tuning for the third step of the adaptation.
diagonal_preconditioning
Whether to do diagonal preconditioning (i.e. a mass matrix)
params
Initial params to start tuning from (optional)
max
whether to calculate L from maximum or average eigenvalue. Average is advised.
num_windows
how many iterations of the tuning are carried out
tuning_factor
multiplicative factor for L
Returns
-------
A tuple containing the final state of the MCMC algorithm and the final hyperparameters.
|
adjusted_mclmc_find_L_and_step_size
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/adjusted_mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/adjusted_mclmc_adaptation.py
|
Apache-2.0
|
def adjusted_mclmc_make_L_step_size_adaptation(
kernel,
dim,
frac_tune1,
frac_tune2,
target,
diagonal_preconditioning,
fix_L_first_da=False,
max="avg",
tuning_factor=1.0,
):
"""Adapts the stepsize and L of the MCLMC kernel. Designed for adjusted MCLMC"""
def dual_avg_step(fix_L, update_da):
"""does one step of the dynamics and updates the estimate of the posterior size and optimal stepsize"""
def step(iteration_state, weight_and_key):
mask, rng_key = weight_and_key
(
previous_state,
params,
(adaptive_state, step_size_max),
previous_weight_and_average,
) = iteration_state
avg_num_integration_steps = params.L / params.step_size
state, info = kernel(
rng_key=rng_key,
state=previous_state,
avg_num_integration_steps=avg_num_integration_steps,
step_size=params.step_size,
inverse_mass_matrix=params.inverse_mass_matrix,
)
# step updating
success, state, step_size_max, energy_change = handle_nans(
previous_state,
state,
params.step_size,
step_size_max,
info.energy,
)
with_mask = lambda x, y: mask * x + (1 - mask) * y
log_step_size, log_step_size_avg, step, avg_error, mu = update_da(
adaptive_state, info.acceptance_rate
)
adaptive_state = DualAveragingAdaptationState(
with_mask(log_step_size, adaptive_state.log_step_size),
with_mask(log_step_size_avg, adaptive_state.log_step_size_avg),
with_mask(step, adaptive_state.step),
with_mask(avg_error, adaptive_state.avg_error),
with_mask(mu, adaptive_state.mu),
)
step_size = jax.lax.clamp(
1e-5, jnp.exp(adaptive_state.log_step_size), params.L / 1.1
)
adaptive_state = adaptive_state._replace(log_step_size=jnp.log(step_size))
x = ravel_pytree(state.position)[0]
# update the running average of x, x^2
previous_weight_and_average = incremental_value_update(
expectation=jnp.array([x, jnp.square(x)]),
incremental_val=previous_weight_and_average,
weight=(1 - mask) * success * step_size,
zero_prevention=mask,
)
params = params._replace(step_size=with_mask(step_size, params.step_size))
if not fix_L:
params = params._replace(
L=with_mask(params.L * (step_size / params.step_size), params.L),
)
state_position = state.position
return (
state,
params,
(adaptive_state, step_size_max),
previous_weight_and_average,
), (
info,
state_position,
)
return step
def step_size_adaptation(mask, state, params, keys, fix_L, initial_da, update_da):
return jax.lax.scan(
dual_avg_step(fix_L, update_da),
init=(
state,
params,
(initial_da(params.step_size), jnp.inf), # step size max
(0.0, jnp.array([jnp.zeros(dim), jnp.zeros(dim)])),
),
xs=(mask, keys),
)
def L_step_size_adaptation(state, params, num_steps, rng_key):
num_steps1, num_steps2 = int(num_steps * frac_tune1), int(
num_steps * frac_tune2
)
check_key, rng_key = jax.random.split(rng_key, 2)
rng_key_pass1, rng_key_pass2 = jax.random.split(rng_key, 2)
L_step_size_adaptation_keys_pass1 = jax.random.split(
rng_key_pass1, num_steps1 + num_steps2
)
L_step_size_adaptation_keys_pass2 = jax.random.split(rng_key_pass2, num_steps1)
# determine which steps to ignore in the streaming average
mask = 1 - jnp.concatenate((jnp.zeros(num_steps1), jnp.ones(num_steps2)))
initial_da, update_da, final_da = dual_averaging_adaptation(target=target)
(
(state, params, (dual_avg_state, step_size_max), (_, average)),
(info, position_samples),
) = step_size_adaptation(
mask,
state,
params,
L_step_size_adaptation_keys_pass1,
fix_L=fix_L_first_da,
initial_da=initial_da,
update_da=update_da,
)
num_tuning_integrator_steps = info.num_integration_steps.sum()
final_stepsize = final_da(dual_avg_state)
params = params._replace(step_size=final_stepsize)
# determine L
eigenvector = None
if num_steps2 != 0.0:
x_average, x_squared_average = average[0], average[1]
variances = x_squared_average - jnp.square(x_average)
if max == "max":
contract = lambda x: jnp.sqrt(jnp.max(x) * dim) * tuning_factor
elif max == "avg":
contract = lambda x: jnp.sqrt(jnp.sum(x)) * tuning_factor
else:
raise ValueError("max should be either 'max' or 'avg'")
change = jax.lax.clamp(
Lratio_lowerbound,
contract(variances) / params.L,
Lratio_upperbound,
)
params = params._replace(
L=params.L * change, step_size=params.step_size * change
)
if diagonal_preconditioning:
params = params._replace(inverse_mass_matrix=variances, L=jnp.sqrt(dim))
initial_da, update_da, final_da = dual_averaging_adaptation(target=target)
(
(state, params, (dual_avg_state, step_size_max), (_, average)),
(info, params_history),
) = step_size_adaptation(
jnp.ones(num_steps1),
state,
params,
L_step_size_adaptation_keys_pass2,
fix_L=True,
update_da=update_da,
initial_da=initial_da,
)
num_tuning_integrator_steps += info.num_integration_steps.sum()
params = params._replace(step_size=final_da(dual_avg_state))
return state, params, eigenvector, num_tuning_integrator_steps
return L_step_size_adaptation
|
Adapts the stepsize and L of the MCLMC kernel. Designed for adjusted MCLMC
|
adjusted_mclmc_make_L_step_size_adaptation
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/adjusted_mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/adjusted_mclmc_adaptation.py
|
Apache-2.0
|
def dual_avg_step(fix_L, update_da):
"""does one step of the dynamics and updates the estimate of the posterior size and optimal stepsize"""
def step(iteration_state, weight_and_key):
mask, rng_key = weight_and_key
(
previous_state,
params,
(adaptive_state, step_size_max),
previous_weight_and_average,
) = iteration_state
avg_num_integration_steps = params.L / params.step_size
state, info = kernel(
rng_key=rng_key,
state=previous_state,
avg_num_integration_steps=avg_num_integration_steps,
step_size=params.step_size,
inverse_mass_matrix=params.inverse_mass_matrix,
)
# step updating
success, state, step_size_max, energy_change = handle_nans(
previous_state,
state,
params.step_size,
step_size_max,
info.energy,
)
with_mask = lambda x, y: mask * x + (1 - mask) * y
log_step_size, log_step_size_avg, step, avg_error, mu = update_da(
adaptive_state, info.acceptance_rate
)
adaptive_state = DualAveragingAdaptationState(
with_mask(log_step_size, adaptive_state.log_step_size),
with_mask(log_step_size_avg, adaptive_state.log_step_size_avg),
with_mask(step, adaptive_state.step),
with_mask(avg_error, adaptive_state.avg_error),
with_mask(mu, adaptive_state.mu),
)
step_size = jax.lax.clamp(
1e-5, jnp.exp(adaptive_state.log_step_size), params.L / 1.1
)
adaptive_state = adaptive_state._replace(log_step_size=jnp.log(step_size))
x = ravel_pytree(state.position)[0]
# update the running average of x, x^2
previous_weight_and_average = incremental_value_update(
expectation=jnp.array([x, jnp.square(x)]),
incremental_val=previous_weight_and_average,
weight=(1 - mask) * success * step_size,
zero_prevention=mask,
)
params = params._replace(step_size=with_mask(step_size, params.step_size))
if not fix_L:
params = params._replace(
L=with_mask(params.L * (step_size / params.step_size), params.L),
)
state_position = state.position
return (
state,
params,
(adaptive_state, step_size_max),
previous_weight_and_average,
), (
info,
state_position,
)
return step
|
does one step of the dynamics and updates the estimate of the posterior size and optimal stepsize
|
dual_avg_step
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/adjusted_mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/adjusted_mclmc_adaptation.py
|
Apache-2.0
|
def adjusted_mclmc_make_adaptation_L(
kernel, frac, Lfactor, max="avg", eigenvector=None
):
"""determine L by the autocorrelations (around 10 effective samples are needed for this to be accurate)"""
def adaptation_L(state, params, num_steps, key):
num_steps = int(num_steps * frac)
adaptation_L_keys = jax.random.split(key, num_steps)
def step(state, key):
next_state, info = kernel(
rng_key=key,
state=state,
step_size=params.step_size,
avg_num_integration_steps=params.L / params.step_size,
inverse_mass_matrix=params.inverse_mass_matrix,
)
return next_state, (next_state.position, info)
state, (samples, info) = jax.lax.scan(
f=step,
init=state,
xs=adaptation_L_keys,
)
if max == "max":
contract = jnp.min
else:
contract = jnp.mean
flat_samples = jax.vmap(lambda x: ravel_pytree(x)[0])(samples)
if eigenvector is not None:
flat_samples = jnp.expand_dims(
jnp.einsum("ij,j", flat_samples, eigenvector), 1
)
# number of effective samples per 1 actual sample
ess = contract(effective_sample_size(flat_samples[None, ...])) / num_steps
return (
state,
params._replace(
L=jnp.clip(
Lfactor * params.L / jnp.mean(ess), max=params.L * Lratio_upperbound
)
),
info.num_integration_steps.sum(),
)
return adaptation_L
|
determine L by the autocorrelations (around 10 effective samples are needed for this to be accurate)
|
adjusted_mclmc_make_adaptation_L
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/adjusted_mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/adjusted_mclmc_adaptation.py
|
Apache-2.0
|
def handle_nans(previous_state, next_state, step_size, step_size_max, kinetic_change):
"""if there are nans, let's reduce the stepsize, and not update the state. The
function returns the old state in this case."""
reduced_step_size = 0.8
p, unravel_fn = ravel_pytree(next_state.position)
nonans = jnp.all(jnp.isfinite(p))
state, step_size, kinetic_change = jax.tree_util.tree_map(
lambda new, old: jax.lax.select(nonans, jnp.nan_to_num(new), old),
(next_state, step_size_max, kinetic_change),
(previous_state, step_size * reduced_step_size, 0.0),
)
return nonans, state, step_size, kinetic_change
|
if there are nans, let's reduce the stepsize, and not update the state. The
function returns the old state in this case.
|
handle_nans
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/adjusted_mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/adjusted_mclmc_adaptation.py
|
Apache-2.0
|
def get_filter_adapt_info_fn(
state_keys: Set[str] = set(),
info_keys: Set[str] = set(),
adapt_state_keys: Set[str] = set(),
):
"""Generate a function to filter what is saved in AdaptationInfo. Used
for adptation_info_fn parameters of the adaptation algorithms.
adaptation_info_fn=get_filter_adapt_info_fn() saves no auxiliary information
"""
def filter_tuple(tup, key_set):
mapfn = lambda key, val: None if key not in key_set else val
return jax.tree.map(mapfn, type(tup)(*tup._fields), tup)
def filter_fn(state, info, adaptation_state):
sample_state = filter_tuple(state, state_keys)
new_info = filter_tuple(info, info_keys)
new_adapt_state = filter_tuple(adaptation_state, adapt_state_keys)
return AdaptationInfo(sample_state, new_info, new_adapt_state)
return filter_fn
|
Generate a function to filter what is saved in AdaptationInfo. Used
for adptation_info_fn parameters of the adaptation algorithms.
adaptation_info_fn=get_filter_adapt_info_fn() saves no auxiliary information
|
get_filter_adapt_info_fn
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/base.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/base.py
|
Apache-2.0
|
def base(
jitter_generator: Callable,
next_random_arg_fn: Callable,
optim: optax.GradientTransformation,
target_acceptance_rate: float,
decay_rate: float,
) -> Tuple[Callable, Callable]:
"""Maximizing the Change in the Estimator of the Expected Square criterion
(trajectory length) and dual averaging procedure (step size) for the jittered
Hamiltonian Monte Carlo kernel :cite:p:`hoffman2021adaptive`.
This adaptation algorithm tunes the step size and trajectory length, i.e.
number of integration steps / step size, of the jittered HMC algorithm based
on statistics collected from a population of many chains. It maximizes the Change
in the Estimator of the Expected Square (ChEES) criterion to tune the trajectory
length and uses dual averaging targeting an acceptance rate of 0.651 of the harmonic
mean of the chain's acceptance probabilities to tune the step size.
Parameters
----------
jitter_generator
Optional function that generates a value in [0, 1] used to jitter the trajectory
lengths given a PRNGKey, used to propose the number of integration steps. If None,
then a quasi-random Halton is used to jitter the trajectory length.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
optim
Optax compatible optimizer, which conforms to the `optax.GradientTransformation` protocol.
target_acceptance_rate
Average acceptance rate to target with dual averaging.
decay_rate
Float representing how much to favor recent iterations over earlier ones in the optimization
of step size and trajectory length.
Returns
-------
init
Function that initializes the warmup.
update
Function that moves the warmup one step.
"""
da_init, da_update, _ = dual_averaging.dual_averaging()
def compute_parameters(
proposed_positions: ArrayLikeTree,
proposed_momentums: ArrayLikeTree,
initial_positions: ArrayLikeTree,
acceptance_probabilities: Array,
is_divergent: Array,
initial_adaptation_state: ChEESAdaptationState,
) -> ChEESAdaptationState:
"""Compute values for the parameters based on statistics collected from
multiple chains.
Parameters
----------
proposed_positions:
A PyTree that contains the position proposed by the HMC algorithm of
every chain (proposal that is accepted or rejected using MH).
proposed_momentums:
A PyTree that contains the momentum variable proposed by the HMC algorithm
of every chain (proposal that is accepted or rejected using MH).
initial_positions:
A PyTree that contains the initial position at the start of the HMC
algorithm of every chain.
acceptance_probabilities:
Metropolis-Hastings acceptance probabilty of proposals of every chain.
initial_adaptation_state:
ChEES adaptation step used to generate proposals and acceptance probabilities.
Returns
-------
New values of the step size and trajectory length of the jittered HMC algorithm.
"""
(
step_size,
log_step_size_ma,
trajectory_length,
log_trajectory_length_ma,
da_state,
optim_state,
random_generator_arg,
step,
) = initial_adaptation_state
harmonic_mean = 1.0 / jnp.mean(
1.0 / acceptance_probabilities, where=~is_divergent
)
da_state_ = da_update(da_state, target_acceptance_rate - harmonic_mean)
step_size_ = jnp.exp(da_state_.log_x)
new_step_size, new_da_state, new_log_step_size = jax.lax.cond(
jnp.isfinite(step_size_),
lambda _: (step_size_, da_state_, da_state_.log_x),
lambda _: (step_size, da_state, da_state.log_x),
None,
)
update_weight = step ** (-decay_rate)
new_log_step_size_ma = (
1.0 - update_weight
) * log_step_size_ma + update_weight * new_log_step_size
proposals_mean = jax.tree_util.tree_map(
lambda p: jnp.nanmean(p, axis=0), proposed_positions
)
initials_mean = jax.tree_util.tree_map(
lambda p: jnp.nanmean(p, axis=0), initial_positions
)
proposals_centered = jax.tree_util.tree_map(
lambda p, pm: p - pm, proposed_positions, proposals_mean
)
initials_centered = jax.tree_util.tree_map(
lambda p, pm: p - pm, initial_positions, initials_mean
)
vmap_flatten_op = jax.vmap(lambda p: jax.flatten_util.ravel_pytree(p)[0])
proposals_matrix = vmap_flatten_op(proposals_centered)
initials_matrix = vmap_flatten_op(initials_centered)
momentums_matrix = vmap_flatten_op(proposed_momentums)
trajectory_gradients = (
jitter_generator(random_generator_arg)
* trajectory_length
* jax.vmap(
lambda pm, im, mm: (jnp.dot(pm, pm) - jnp.dot(im, im)) * jnp.dot(pm, mm)
)(proposals_matrix, initials_matrix, momentums_matrix)
)
trajectory_gradient = jnp.sum(
acceptance_probabilities * trajectory_gradients, where=~is_divergent
) / jnp.sum(acceptance_probabilities, where=~is_divergent)
log_trajectory_length = jnp.log(trajectory_length)
updates, optim_state_ = optim.update(
trajectory_gradient, optim_state, log_trajectory_length
)
log_trajectory_length_ = optax.apply_updates(log_trajectory_length, updates)
new_log_trajectory_length, new_optim_state = jax.lax.cond(
jnp.isfinite(
jax.flatten_util.ravel_pytree(log_trajectory_length_)[0]
).all(),
lambda _: (log_trajectory_length_, optim_state_),
lambda _: (log_trajectory_length, optim_state),
None,
)
new_log_trajectory_length_ma = (
1.0 - update_weight
) * log_trajectory_length_ma + update_weight * new_log_trajectory_length
new_trajectory_length = jnp.exp(new_log_trajectory_length_ma)
return ChEESAdaptationState(
new_step_size,
new_log_step_size_ma,
new_trajectory_length,
new_log_trajectory_length_ma,
new_da_state,
new_optim_state,
next_random_arg_fn(random_generator_arg),
step + 1,
)
def init(random_generator_arg: Array, step_size: float):
return ChEESAdaptationState(
step_size=step_size,
log_step_size_moving_average=0.0,
trajectory_length=step_size,
log_trajectory_length_moving_average=0.0,
da_state=da_init(step_size),
optim_state=optim.init(step_size),
random_generator_arg=random_generator_arg,
step=1,
)
def update(
adaptation_state: ChEESAdaptationState,
proposed_positions: ArrayLikeTree,
proposed_momentums: ArrayLikeTree,
initial_positions: ArrayLikeTree,
acceptance_probabilities: Array,
is_divergent: Array,
):
"""Update the adaptation state and parameter values.
Parameters
----------
adaptation_state
The current state of the adaptation algorithm
proposed_positions:
The position proposed by the HMC algorithm of every chain.
proposed_momentums:
The momentum variable proposed by the HMC algorithm of every chain.
initial_positions:
The initial position at the start of the HMC algorithm of every chain.
acceptance_probabilities:
Metropolis-Hastings acceptance probabilty of proposals of every chain.
Returns
-------
New adaptation state that contains the step size and trajectory length of the
jittered HMC algorithm.
"""
new_state = compute_parameters(
proposed_positions,
proposed_momentums,
initial_positions,
acceptance_probabilities,
is_divergent,
adaptation_state,
)
return new_state
return init, update
|
Maximizing the Change in the Estimator of the Expected Square criterion
(trajectory length) and dual averaging procedure (step size) for the jittered
Hamiltonian Monte Carlo kernel :cite:p:`hoffman2021adaptive`.
This adaptation algorithm tunes the step size and trajectory length, i.e.
number of integration steps / step size, of the jittered HMC algorithm based
on statistics collected from a population of many chains. It maximizes the Change
in the Estimator of the Expected Square (ChEES) criterion to tune the trajectory
length and uses dual averaging targeting an acceptance rate of 0.651 of the harmonic
mean of the chain's acceptance probabilities to tune the step size.
Parameters
----------
jitter_generator
Optional function that generates a value in [0, 1] used to jitter the trajectory
lengths given a PRNGKey, used to propose the number of integration steps. If None,
then a quasi-random Halton is used to jitter the trajectory length.
next_random_arg_fn
Function that generates the next `random_generator_arg` from its previous value.
optim
Optax compatible optimizer, which conforms to the `optax.GradientTransformation` protocol.
target_acceptance_rate
Average acceptance rate to target with dual averaging.
decay_rate
Float representing how much to favor recent iterations over earlier ones in the optimization
of step size and trajectory length.
Returns
-------
init
Function that initializes the warmup.
update
Function that moves the warmup one step.
|
base
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/chees_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/chees_adaptation.py
|
Apache-2.0
|
def compute_parameters(
proposed_positions: ArrayLikeTree,
proposed_momentums: ArrayLikeTree,
initial_positions: ArrayLikeTree,
acceptance_probabilities: Array,
is_divergent: Array,
initial_adaptation_state: ChEESAdaptationState,
) -> ChEESAdaptationState:
"""Compute values for the parameters based on statistics collected from
multiple chains.
Parameters
----------
proposed_positions:
A PyTree that contains the position proposed by the HMC algorithm of
every chain (proposal that is accepted or rejected using MH).
proposed_momentums:
A PyTree that contains the momentum variable proposed by the HMC algorithm
of every chain (proposal that is accepted or rejected using MH).
initial_positions:
A PyTree that contains the initial position at the start of the HMC
algorithm of every chain.
acceptance_probabilities:
Metropolis-Hastings acceptance probabilty of proposals of every chain.
initial_adaptation_state:
ChEES adaptation step used to generate proposals and acceptance probabilities.
Returns
-------
New values of the step size and trajectory length of the jittered HMC algorithm.
"""
(
step_size,
log_step_size_ma,
trajectory_length,
log_trajectory_length_ma,
da_state,
optim_state,
random_generator_arg,
step,
) = initial_adaptation_state
harmonic_mean = 1.0 / jnp.mean(
1.0 / acceptance_probabilities, where=~is_divergent
)
da_state_ = da_update(da_state, target_acceptance_rate - harmonic_mean)
step_size_ = jnp.exp(da_state_.log_x)
new_step_size, new_da_state, new_log_step_size = jax.lax.cond(
jnp.isfinite(step_size_),
lambda _: (step_size_, da_state_, da_state_.log_x),
lambda _: (step_size, da_state, da_state.log_x),
None,
)
update_weight = step ** (-decay_rate)
new_log_step_size_ma = (
1.0 - update_weight
) * log_step_size_ma + update_weight * new_log_step_size
proposals_mean = jax.tree_util.tree_map(
lambda p: jnp.nanmean(p, axis=0), proposed_positions
)
initials_mean = jax.tree_util.tree_map(
lambda p: jnp.nanmean(p, axis=0), initial_positions
)
proposals_centered = jax.tree_util.tree_map(
lambda p, pm: p - pm, proposed_positions, proposals_mean
)
initials_centered = jax.tree_util.tree_map(
lambda p, pm: p - pm, initial_positions, initials_mean
)
vmap_flatten_op = jax.vmap(lambda p: jax.flatten_util.ravel_pytree(p)[0])
proposals_matrix = vmap_flatten_op(proposals_centered)
initials_matrix = vmap_flatten_op(initials_centered)
momentums_matrix = vmap_flatten_op(proposed_momentums)
trajectory_gradients = (
jitter_generator(random_generator_arg)
* trajectory_length
* jax.vmap(
lambda pm, im, mm: (jnp.dot(pm, pm) - jnp.dot(im, im)) * jnp.dot(pm, mm)
)(proposals_matrix, initials_matrix, momentums_matrix)
)
trajectory_gradient = jnp.sum(
acceptance_probabilities * trajectory_gradients, where=~is_divergent
) / jnp.sum(acceptance_probabilities, where=~is_divergent)
log_trajectory_length = jnp.log(trajectory_length)
updates, optim_state_ = optim.update(
trajectory_gradient, optim_state, log_trajectory_length
)
log_trajectory_length_ = optax.apply_updates(log_trajectory_length, updates)
new_log_trajectory_length, new_optim_state = jax.lax.cond(
jnp.isfinite(
jax.flatten_util.ravel_pytree(log_trajectory_length_)[0]
).all(),
lambda _: (log_trajectory_length_, optim_state_),
lambda _: (log_trajectory_length, optim_state),
None,
)
new_log_trajectory_length_ma = (
1.0 - update_weight
) * log_trajectory_length_ma + update_weight * new_log_trajectory_length
new_trajectory_length = jnp.exp(new_log_trajectory_length_ma)
return ChEESAdaptationState(
new_step_size,
new_log_step_size_ma,
new_trajectory_length,
new_log_trajectory_length_ma,
new_da_state,
new_optim_state,
next_random_arg_fn(random_generator_arg),
step + 1,
)
|
Compute values for the parameters based on statistics collected from
multiple chains.
Parameters
----------
proposed_positions:
A PyTree that contains the position proposed by the HMC algorithm of
every chain (proposal that is accepted or rejected using MH).
proposed_momentums:
A PyTree that contains the momentum variable proposed by the HMC algorithm
of every chain (proposal that is accepted or rejected using MH).
initial_positions:
A PyTree that contains the initial position at the start of the HMC
algorithm of every chain.
acceptance_probabilities:
Metropolis-Hastings acceptance probabilty of proposals of every chain.
initial_adaptation_state:
ChEES adaptation step used to generate proposals and acceptance probabilities.
Returns
-------
New values of the step size and trajectory length of the jittered HMC algorithm.
|
compute_parameters
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/chees_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/chees_adaptation.py
|
Apache-2.0
|
def update(
adaptation_state: ChEESAdaptationState,
proposed_positions: ArrayLikeTree,
proposed_momentums: ArrayLikeTree,
initial_positions: ArrayLikeTree,
acceptance_probabilities: Array,
is_divergent: Array,
):
"""Update the adaptation state and parameter values.
Parameters
----------
adaptation_state
The current state of the adaptation algorithm
proposed_positions:
The position proposed by the HMC algorithm of every chain.
proposed_momentums:
The momentum variable proposed by the HMC algorithm of every chain.
initial_positions:
The initial position at the start of the HMC algorithm of every chain.
acceptance_probabilities:
Metropolis-Hastings acceptance probabilty of proposals of every chain.
Returns
-------
New adaptation state that contains the step size and trajectory length of the
jittered HMC algorithm.
"""
new_state = compute_parameters(
proposed_positions,
proposed_momentums,
initial_positions,
acceptance_probabilities,
is_divergent,
adaptation_state,
)
return new_state
|
Update the adaptation state and parameter values.
Parameters
----------
adaptation_state
The current state of the adaptation algorithm
proposed_positions:
The position proposed by the HMC algorithm of every chain.
proposed_momentums:
The momentum variable proposed by the HMC algorithm of every chain.
initial_positions:
The initial position at the start of the HMC algorithm of every chain.
acceptance_probabilities:
Metropolis-Hastings acceptance probabilty of proposals of every chain.
Returns
-------
New adaptation state that contains the step size and trajectory length of the
jittered HMC algorithm.
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/chees_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/chees_adaptation.py
|
Apache-2.0
|
def chees_adaptation(
logdensity_fn: Callable,
num_chains: int,
*,
jitter_generator: Optional[Callable] = None,
jitter_amount: float = 1.0,
target_acceptance_rate: float = OPTIMAL_TARGET_ACCEPTANCE_RATE,
decay_rate: float = 0.5,
adaptation_info_fn: Callable = return_all_adapt_info,
) -> AdaptationAlgorithm:
"""Adapt the step size and trajectory length (number of integration steps / step size)
parameters of the jittered HMC algorthm.
The jittered HMC algorithm depends on the value of a step size, controlling
the discretization step of the integrator, and a trajectory length, given by the
number of integration steps / step size, jittered by using only a random percentage
of this trajectory length.
This adaptation algorithm tunes the trajectory length by heuristically maximizing
the Change in the Estimator of the Expected Square (ChEES) criterion over
an ensamble of parallel chains. At equilibrium, the algorithm aims at eliminating
correlations between target dimensions, making the HMC algorithm efficient.
Jittering requires generating a random sequence of uniform variables in [0, 1].
However, this adds another source of variance to the sampling procedure,
which may slow adaptation or lead to suboptimal mixing. To alleviate this,
rather than use uniform random noise to jitter the trajectory lengths, we use a
quasi-random Halton sequence, which ensures a more even distribution of trajectory
lengths.
Examples
--------
An HMC adapted kernel can be learned and used with the following code:
.. code::
warmup = blackjax.chees_adaptation(logdensity_fn, num_chains)
key_warmup, key_sample = jax.random.split(rng_key)
optim = optax.adam(learning_rate)
(last_states, parameters), _ = warmup.run(
key_warmup,
positions, #PyTree where each leaf has shape (num_chains, ...)
initial_step_size,
optim,
num_warmup_steps,
)
kernel = blackjax.dynamic_hmc(logdensity_fn, **parameters).step
new_states, info = jax.vmap(kernel)(key_sample, last_states)
Parameters
----------
logdensity_fn
The log density probability density function from which we wish to sample.
num_chains
Number of chains used for cross-chain warm-up training.
jitter_generator
Optional function that generates a value in [0, 1] used to jitter the trajectory
lengths given a PRNGKey, used to propose the number of integration steps. If None,
then a quasi-random Halton is used to jitter the trajectory length.
jitter_value
A percentage in [0, 1] representing how much of the calculated trajectory should be jitted.
target_acceptance_rate
Average acceptance rate to target with dual averaging. Defaults to optimal tuning for HMC.
decay_rate
Float representing how much to favor recent iterations over earlier ones in the optimization
of step size and trajectory length. A value of 1 gives equal weight to all history. A value
of 0 gives weight only to the most recent iteration.
adaptation_info_fn
Function to select the adaptation info returned. See return_all_adapt_info
and get_filter_adapt_info_fn in blackjax.adaptation.base. By default all
information is saved - this can result in excessive memory usage if the
information is unused.
Returns
-------
A function that returns the last cross-chain state, a sampling kernel with the
tuned parameter values, and all the warm-up states for diagnostics.
"""
def run(
rng_key: PRNGKey,
positions: ArrayLikeTree,
step_size: float,
optim: optax.GradientTransformation,
num_steps: int = 1000,
*,
max_sampling_steps: int = 1000,
):
assert all(
jax.tree_util.tree_flatten(
jax.tree_util.tree_map(lambda p: p.shape[0] == num_chains, positions)
)[0]
), "initial `positions` leading dimension must be equal to the `num_chains`"
num_dim = pytree_size(positions) // num_chains
next_random_arg_fn = lambda i: i + 1
init_random_arg = 0
if jitter_generator is not None:
rng_key, carry_key = jax.random.split(rng_key)
jitter_gn = lambda i: jitter_generator(
jax.random.fold_in(carry_key, i)
) * jitter_amount + (1.0 - jitter_amount)
else:
jitter_gn = lambda i: dynamic_hmc.halton_sequence(
i, np.ceil(np.log2(num_steps + max_sampling_steps))
) * jitter_amount + (1.0 - jitter_amount)
def integration_steps_fn(random_generator_arg, trajectory_length_adjusted):
return jnp.asarray(
jnp.ceil(jitter_gn(random_generator_arg) * trajectory_length_adjusted),
dtype=int,
)
step_fn = dynamic_hmc.build_kernel(
next_random_arg_fn=next_random_arg_fn,
integration_steps_fn=integration_steps_fn,
)
init, update = base(
jitter_gn, next_random_arg_fn, optim, target_acceptance_rate, decay_rate
)
def one_step(carry, rng_key):
states, adaptation_state = carry
keys = jax.random.split(rng_key, num_chains)
_step_fn = partial(
step_fn,
logdensity_fn=logdensity_fn,
step_size=adaptation_state.step_size,
inverse_mass_matrix=jnp.ones(num_dim),
trajectory_length_adjusted=adaptation_state.trajectory_length
/ adaptation_state.step_size,
)
new_states, info = jax.vmap(_step_fn)(keys, states)
new_adaptation_state = update(
adaptation_state,
info.proposal.position,
info.proposal.momentum,
states.position,
info.acceptance_rate,
info.is_divergent,
)
return (new_states, new_adaptation_state), adaptation_info_fn(
new_states, info, new_adaptation_state
)
batch_init = jax.vmap(
lambda p: dynamic_hmc.init(p, logdensity_fn, init_random_arg)
)
init_states = batch_init(positions)
init_adaptation_state = init(init_random_arg, step_size)
keys_step = jax.random.split(rng_key, num_steps)
(last_states, last_adaptation_state), info = jax.lax.scan(
one_step, (init_states, init_adaptation_state), keys_step
)
trajectory_length_adjusted = jnp.exp(
last_adaptation_state.log_trajectory_length_moving_average
- last_adaptation_state.log_step_size_moving_average
)
parameters = {
"step_size": jnp.exp(last_adaptation_state.log_step_size_moving_average),
"inverse_mass_matrix": jnp.ones(num_dim),
"next_random_arg_fn": next_random_arg_fn,
"integration_steps_fn": lambda arg: integration_steps_fn(
arg, trajectory_length_adjusted
),
}
return AdaptationResults(last_states, parameters), info
return AdaptationAlgorithm(run) # type: ignore[arg-type]
|
Adapt the step size and trajectory length (number of integration steps / step size)
parameters of the jittered HMC algorthm.
The jittered HMC algorithm depends on the value of a step size, controlling
the discretization step of the integrator, and a trajectory length, given by the
number of integration steps / step size, jittered by using only a random percentage
of this trajectory length.
This adaptation algorithm tunes the trajectory length by heuristically maximizing
the Change in the Estimator of the Expected Square (ChEES) criterion over
an ensamble of parallel chains. At equilibrium, the algorithm aims at eliminating
correlations between target dimensions, making the HMC algorithm efficient.
Jittering requires generating a random sequence of uniform variables in [0, 1].
However, this adds another source of variance to the sampling procedure,
which may slow adaptation or lead to suboptimal mixing. To alleviate this,
rather than use uniform random noise to jitter the trajectory lengths, we use a
quasi-random Halton sequence, which ensures a more even distribution of trajectory
lengths.
Examples
--------
An HMC adapted kernel can be learned and used with the following code:
.. code::
warmup = blackjax.chees_adaptation(logdensity_fn, num_chains)
key_warmup, key_sample = jax.random.split(rng_key)
optim = optax.adam(learning_rate)
(last_states, parameters), _ = warmup.run(
key_warmup,
positions, #PyTree where each leaf has shape (num_chains, ...)
initial_step_size,
optim,
num_warmup_steps,
)
kernel = blackjax.dynamic_hmc(logdensity_fn, **parameters).step
new_states, info = jax.vmap(kernel)(key_sample, last_states)
Parameters
----------
logdensity_fn
The log density probability density function from which we wish to sample.
num_chains
Number of chains used for cross-chain warm-up training.
jitter_generator
Optional function that generates a value in [0, 1] used to jitter the trajectory
lengths given a PRNGKey, used to propose the number of integration steps. If None,
then a quasi-random Halton is used to jitter the trajectory length.
jitter_value
A percentage in [0, 1] representing how much of the calculated trajectory should be jitted.
target_acceptance_rate
Average acceptance rate to target with dual averaging. Defaults to optimal tuning for HMC.
decay_rate
Float representing how much to favor recent iterations over earlier ones in the optimization
of step size and trajectory length. A value of 1 gives equal weight to all history. A value
of 0 gives weight only to the most recent iteration.
adaptation_info_fn
Function to select the adaptation info returned. See return_all_adapt_info
and get_filter_adapt_info_fn in blackjax.adaptation.base. By default all
information is saved - this can result in excessive memory usage if the
information is unused.
Returns
-------
A function that returns the last cross-chain state, a sampling kernel with the
tuned parameter values, and all the warm-up states for diagnostics.
|
chees_adaptation
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/chees_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/chees_adaptation.py
|
Apache-2.0
|
def mass_matrix_adaptation(
is_diagonal_matrix: bool = True,
) -> tuple[Callable, Callable, Callable]:
"""Adapts the values in the mass matrix by computing the covariance
between parameters.
Parameters
----------
is_diagonal_matrix
When True the algorithm adapts and returns a diagonal mass matrix
(default), otherwise adaps and returns a dense mass matrix.
Returns
-------
init
A function that initializes the step of the mass matrix adaptation.
update
A function that updates the state of the mass matrix.
final
A function that computes the inverse mass matrix based on the current
state.
"""
wc_init, wc_update, wc_final = welford_algorithm(is_diagonal_matrix)
def init(n_dims: int) -> MassMatrixAdaptationState:
"""Initialize the matrix adaptation.
Parameters
----------
ndims
The number of dimensions of the mass matrix, which corresponds to
the number of dimensions of the chain position.
"""
if is_diagonal_matrix:
inverse_mass_matrix = jnp.ones(n_dims)
else:
inverse_mass_matrix = jnp.identity(n_dims)
wc_state = wc_init(n_dims)
return MassMatrixAdaptationState(inverse_mass_matrix, wc_state)
def update(
mm_state: MassMatrixAdaptationState, position: ArrayLike
) -> MassMatrixAdaptationState:
"""Update the algorithm's state.
Parameters
----------
state:
The current state of the mass matrix adapation.
position:
The current position of the chain.
"""
inverse_mass_matrix, wc_state = mm_state
position, _ = jax.flatten_util.ravel_pytree(position)
wc_state = wc_update(wc_state, position)
return MassMatrixAdaptationState(inverse_mass_matrix, wc_state)
def final(mm_state: MassMatrixAdaptationState) -> MassMatrixAdaptationState:
"""Final iteration of the mass matrix adaptation.
In this step we compute the mass matrix from the covariance matrix computed
by the Welford algorithm, and re-initialize the later.
"""
_, wc_state = mm_state
covariance, count, mean = wc_final(wc_state)
# Regularize the covariance matrix, see Stan
scaled_covariance = (count / (count + 5)) * covariance
shrinkage = 1e-3 * (5 / (count + 5))
if is_diagonal_matrix:
inverse_mass_matrix = scaled_covariance + shrinkage
else:
inverse_mass_matrix = scaled_covariance + shrinkage * jnp.identity(
mean.shape[0]
)
ndims = jnp.shape(inverse_mass_matrix)[-1]
new_mm_state = MassMatrixAdaptationState(inverse_mass_matrix, wc_init(ndims))
return new_mm_state
return init, update, final
|
Adapts the values in the mass matrix by computing the covariance
between parameters.
Parameters
----------
is_diagonal_matrix
When True the algorithm adapts and returns a diagonal mass matrix
(default), otherwise adaps and returns a dense mass matrix.
Returns
-------
init
A function that initializes the step of the mass matrix adaptation.
update
A function that updates the state of the mass matrix.
final
A function that computes the inverse mass matrix based on the current
state.
|
mass_matrix_adaptation
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mass_matrix.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mass_matrix.py
|
Apache-2.0
|
def init(n_dims: int) -> MassMatrixAdaptationState:
"""Initialize the matrix adaptation.
Parameters
----------
ndims
The number of dimensions of the mass matrix, which corresponds to
the number of dimensions of the chain position.
"""
if is_diagonal_matrix:
inverse_mass_matrix = jnp.ones(n_dims)
else:
inverse_mass_matrix = jnp.identity(n_dims)
wc_state = wc_init(n_dims)
return MassMatrixAdaptationState(inverse_mass_matrix, wc_state)
|
Initialize the matrix adaptation.
Parameters
----------
ndims
The number of dimensions of the mass matrix, which corresponds to
the number of dimensions of the chain position.
|
init
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mass_matrix.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mass_matrix.py
|
Apache-2.0
|
def update(
mm_state: MassMatrixAdaptationState, position: ArrayLike
) -> MassMatrixAdaptationState:
"""Update the algorithm's state.
Parameters
----------
state:
The current state of the mass matrix adapation.
position:
The current position of the chain.
"""
inverse_mass_matrix, wc_state = mm_state
position, _ = jax.flatten_util.ravel_pytree(position)
wc_state = wc_update(wc_state, position)
return MassMatrixAdaptationState(inverse_mass_matrix, wc_state)
|
Update the algorithm's state.
Parameters
----------
state:
The current state of the mass matrix adapation.
position:
The current position of the chain.
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mass_matrix.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mass_matrix.py
|
Apache-2.0
|
def final(mm_state: MassMatrixAdaptationState) -> MassMatrixAdaptationState:
"""Final iteration of the mass matrix adaptation.
In this step we compute the mass matrix from the covariance matrix computed
by the Welford algorithm, and re-initialize the later.
"""
_, wc_state = mm_state
covariance, count, mean = wc_final(wc_state)
# Regularize the covariance matrix, see Stan
scaled_covariance = (count / (count + 5)) * covariance
shrinkage = 1e-3 * (5 / (count + 5))
if is_diagonal_matrix:
inverse_mass_matrix = scaled_covariance + shrinkage
else:
inverse_mass_matrix = scaled_covariance + shrinkage * jnp.identity(
mean.shape[0]
)
ndims = jnp.shape(inverse_mass_matrix)[-1]
new_mm_state = MassMatrixAdaptationState(inverse_mass_matrix, wc_init(ndims))
return new_mm_state
|
Final iteration of the mass matrix adaptation.
In this step we compute the mass matrix from the covariance matrix computed
by the Welford algorithm, and re-initialize the later.
|
final
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mass_matrix.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mass_matrix.py
|
Apache-2.0
|
def welford_algorithm(is_diagonal_matrix: bool) -> tuple[Callable, Callable, Callable]:
r"""Welford's online estimator of covariance.
It is possible to compute the variance of a population of values in an
on-line fashion to avoid storing intermediate results. The naive recurrence
relations between the sample mean and variance at a step and the next are
however not numerically stable.
Welford's algorithm uses the sum of square of differences
:math:`M_{2,n} = \sum_{i=1}^n \left(x_i-\overline{x_n}\right)^2`
for updating where :math:`x_n` is the current mean and the following
recurrence relationships
Parameters
----------
is_diagonal_matrix
When True the algorithm adapts and returns a diagonal mass matrix
(default), otherwise adaps and returns a dense mass matrix.
Note
----
It might seem pedantic to separate the Welford algorithm from mass adaptation,
but this covariance estimator is used in other parts of the library.
"""
def init(n_dims: int) -> WelfordAlgorithmState:
"""Initialize the covariance estimation.
When the matrix is diagonal it is sufficient to work with an array that contains
the diagonal value. Otherwise we need to work with the matrix in full.
Parameters
----------
n_dims: int
The number of dimensions of the problem, which corresponds to the size
of the corresponding square mass matrix.
"""
sample_size = 0
mean = jnp.zeros((n_dims,))
if is_diagonal_matrix:
m2 = jnp.zeros((n_dims,))
else:
m2 = jnp.zeros((n_dims, n_dims))
return WelfordAlgorithmState(mean, m2, sample_size)
def update(
wa_state: WelfordAlgorithmState, value: ArrayLike
) -> WelfordAlgorithmState:
"""Update the M2 matrix using the new value.
Parameters
----------
wa_state:
The current state of the Welford Algorithm
value: Array, shape (1,)
The new sample (typically position of the chain) used to update m2
"""
mean, m2, sample_size = wa_state
sample_size = sample_size + 1
delta = value - mean
mean = mean + delta / sample_size
updated_delta = value - mean
if is_diagonal_matrix:
new_m2 = m2 + delta * updated_delta
else:
new_m2 = m2 + jnp.outer(updated_delta, delta)
return WelfordAlgorithmState(mean, new_m2, sample_size)
def final(
wa_state: WelfordAlgorithmState,
) -> tuple[Array, int, Array]:
mean, m2, sample_size = wa_state
covariance = m2 / (sample_size - 1)
return covariance, sample_size, mean
return init, update, final
|
Welford's online estimator of covariance.
It is possible to compute the variance of a population of values in an
on-line fashion to avoid storing intermediate results. The naive recurrence
relations between the sample mean and variance at a step and the next are
however not numerically stable.
Welford's algorithm uses the sum of square of differences
:math:`M_{2,n} = \sum_{i=1}^n \left(x_i-\overline{x_n}\right)^2`
for updating where :math:`x_n` is the current mean and the following
recurrence relationships
Parameters
----------
is_diagonal_matrix
When True the algorithm adapts and returns a diagonal mass matrix
(default), otherwise adaps and returns a dense mass matrix.
Note
----
It might seem pedantic to separate the Welford algorithm from mass adaptation,
but this covariance estimator is used in other parts of the library.
|
welford_algorithm
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mass_matrix.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mass_matrix.py
|
Apache-2.0
|
def init(n_dims: int) -> WelfordAlgorithmState:
"""Initialize the covariance estimation.
When the matrix is diagonal it is sufficient to work with an array that contains
the diagonal value. Otherwise we need to work with the matrix in full.
Parameters
----------
n_dims: int
The number of dimensions of the problem, which corresponds to the size
of the corresponding square mass matrix.
"""
sample_size = 0
mean = jnp.zeros((n_dims,))
if is_diagonal_matrix:
m2 = jnp.zeros((n_dims,))
else:
m2 = jnp.zeros((n_dims, n_dims))
return WelfordAlgorithmState(mean, m2, sample_size)
|
Initialize the covariance estimation.
When the matrix is diagonal it is sufficient to work with an array that contains
the diagonal value. Otherwise we need to work with the matrix in full.
Parameters
----------
n_dims: int
The number of dimensions of the problem, which corresponds to the size
of the corresponding square mass matrix.
|
init
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mass_matrix.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mass_matrix.py
|
Apache-2.0
|
def update(
wa_state: WelfordAlgorithmState, value: ArrayLike
) -> WelfordAlgorithmState:
"""Update the M2 matrix using the new value.
Parameters
----------
wa_state:
The current state of the Welford Algorithm
value: Array, shape (1,)
The new sample (typically position of the chain) used to update m2
"""
mean, m2, sample_size = wa_state
sample_size = sample_size + 1
delta = value - mean
mean = mean + delta / sample_size
updated_delta = value - mean
if is_diagonal_matrix:
new_m2 = m2 + delta * updated_delta
else:
new_m2 = m2 + jnp.outer(updated_delta, delta)
return WelfordAlgorithmState(mean, new_m2, sample_size)
|
Update the M2 matrix using the new value.
Parameters
----------
wa_state:
The current state of the Welford Algorithm
value: Array, shape (1,)
The new sample (typically position of the chain) used to update m2
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mass_matrix.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mass_matrix.py
|
Apache-2.0
|
def mclmc_find_L_and_step_size(
mclmc_kernel,
num_steps,
state,
rng_key,
frac_tune1=0.1,
frac_tune2=0.1,
frac_tune3=0.1,
desired_energy_var=5e-4,
trust_in_estimate=1.5,
num_effective_samples=150,
diagonal_preconditioning=True,
params=None,
):
"""
Finds the optimal value of the parameters for the MCLMC algorithm.
Parameters
----------
mclmc_kernel
The kernel function used for the MCMC algorithm.
num_steps
The number of MCMC steps that will subsequently be run, after tuning.
state
The initial state of the MCMC algorithm.
rng_key
The random number generator key.
frac_tune1
The fraction of tuning for the first step of the adaptation.
frac_tune2
The fraction of tuning for the second step of the adaptation.
frac_tune3
The fraction of tuning for the third step of the adaptation.
desired_energy_va
The desired energy variance for the MCMC algorithm.
trust_in_estimate
The trust in the estimate of optimal stepsize.
num_effective_samples
The number of effective samples for the MCMC algorithm.
diagonal_preconditioning
Whether to do diagonal preconditioning (i.e. a mass matrix)
params
Initial params to start tuning from (optional)
Returns
-------
A tuple containing the final state of the MCMC algorithm and the final hyperparameters.
Example
-------
.. code::
kernel = lambda inverse_mass_matrix : blackjax.mcmc.mclmc.build_kernel(
logdensity_fn=logdensity_fn,
integrator=integrator,
inverse_mass_matrix=inverse_mass_matrix,
)
(
blackjax_state_after_tuning,
blackjax_mclmc_sampler_params,
) = blackjax.mclmc_find_L_and_step_size(
mclmc_kernel=kernel,
num_steps=num_steps,
state=initial_state,
rng_key=tune_key,
diagonal_preconditioning=preconditioning,
)
"""
dim = pytree_size(state.position)
if params is None:
params = MCLMCAdaptationState(
jnp.sqrt(dim), jnp.sqrt(dim) * 0.25, inverse_mass_matrix=jnp.ones((dim,))
)
part1_key, part2_key = jax.random.split(rng_key, 2)
total_num_tuning_integrator_steps = 0
num_steps1, num_steps2 = round(num_steps * frac_tune1), round(
num_steps * frac_tune2
)
num_steps2 += diagonal_preconditioning * (num_steps2 // 3)
num_steps3 = round(num_steps * frac_tune3)
state, params = make_L_step_size_adaptation(
kernel=mclmc_kernel,
dim=dim,
frac_tune1=frac_tune1,
frac_tune2=frac_tune2,
desired_energy_var=desired_energy_var,
trust_in_estimate=trust_in_estimate,
num_effective_samples=num_effective_samples,
diagonal_preconditioning=diagonal_preconditioning,
)(state, params, num_steps, part1_key)
total_num_tuning_integrator_steps += num_steps1 + num_steps2
if num_steps3 >= 2: # at least 2 samples for ESS estimation
state, params = make_adaptation_L(
mclmc_kernel(params.inverse_mass_matrix), frac=frac_tune3, Lfactor=0.4
)(state, params, num_steps, part2_key)
total_num_tuning_integrator_steps += num_steps3
return state, params, total_num_tuning_integrator_steps
|
Finds the optimal value of the parameters for the MCLMC algorithm.
Parameters
----------
mclmc_kernel
The kernel function used for the MCMC algorithm.
num_steps
The number of MCMC steps that will subsequently be run, after tuning.
state
The initial state of the MCMC algorithm.
rng_key
The random number generator key.
frac_tune1
The fraction of tuning for the first step of the adaptation.
frac_tune2
The fraction of tuning for the second step of the adaptation.
frac_tune3
The fraction of tuning for the third step of the adaptation.
desired_energy_va
The desired energy variance for the MCMC algorithm.
trust_in_estimate
The trust in the estimate of optimal stepsize.
num_effective_samples
The number of effective samples for the MCMC algorithm.
diagonal_preconditioning
Whether to do diagonal preconditioning (i.e. a mass matrix)
params
Initial params to start tuning from (optional)
Returns
-------
A tuple containing the final state of the MCMC algorithm and the final hyperparameters.
Example
-------
.. code::
kernel = lambda inverse_mass_matrix : blackjax.mcmc.mclmc.build_kernel(
logdensity_fn=logdensity_fn,
integrator=integrator,
inverse_mass_matrix=inverse_mass_matrix,
)
(
blackjax_state_after_tuning,
blackjax_mclmc_sampler_params,
) = blackjax.mclmc_find_L_and_step_size(
mclmc_kernel=kernel,
num_steps=num_steps,
state=initial_state,
rng_key=tune_key,
diagonal_preconditioning=preconditioning,
)
|
mclmc_find_L_and_step_size
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mclmc_adaptation.py
|
Apache-2.0
|
def make_L_step_size_adaptation(
kernel,
dim,
frac_tune1,
frac_tune2,
diagonal_preconditioning,
desired_energy_var=1e-3,
trust_in_estimate=1.5,
num_effective_samples=150,
):
"""Adapts the stepsize and L of the MCLMC kernel. Designed for unadjusted MCLMC"""
decay_rate = (num_effective_samples - 1.0) / (num_effective_samples + 1.0)
def predictor(previous_state, params, adaptive_state, rng_key):
"""does one step with the dynamics and updates the prediction for the optimal stepsize
Designed for the unadjusted MCHMC"""
time, x_average, step_size_max = adaptive_state
rng_key, nan_key = jax.random.split(rng_key)
# dynamics
next_state, info = kernel(params.inverse_mass_matrix)(
rng_key=rng_key,
state=previous_state,
L=params.L,
step_size=params.step_size,
)
# step updating
success, state, step_size_max, energy_change = handle_nans(
previous_state,
next_state,
params.step_size,
step_size_max,
info.energy_change,
nan_key,
)
# Warning: var = 0 if there were nans, but we will give it a very small weight
xi = (
jnp.square(energy_change) / (dim * desired_energy_var)
) + 1e-8 # 1e-8 is added to avoid divergences in log xi
weight = jnp.exp(
-0.5 * jnp.square(jnp.log(xi) / (6.0 * trust_in_estimate))
) # the weight reduces the impact of stepsizes which are much larger on much smaller than the desired one.
x_average = decay_rate * x_average + weight * (
xi / jnp.power(params.step_size, 6.0)
)
time = decay_rate * time + weight
step_size = jnp.power(
x_average / time, -1.0 / 6.0
) # We use the Var[E] = O(eps^6) relation here.
step_size = (step_size < step_size_max) * step_size + (
step_size > step_size_max
) * step_size_max # if the proposed stepsize is above the stepsize where we have seen divergences
params_new = params._replace(step_size=step_size)
adaptive_state = (time, x_average, step_size_max)
return state, params_new, adaptive_state, success
def step(iteration_state, weight_and_key):
"""does one step of the dynamics and updates the estimate of the posterior size and optimal stepsize"""
mask, rng_key = weight_and_key
state, params, adaptive_state, streaming_avg = iteration_state
state, params, adaptive_state, success = predictor(
state, params, adaptive_state, rng_key
)
x = ravel_pytree(state.position)[0]
# update the running average of x, x^2
streaming_avg = incremental_value_update(
expectation=jnp.array([x, jnp.square(x)]),
incremental_val=streaming_avg,
weight=mask * success * params.step_size,
)
return (state, params, adaptive_state, streaming_avg), None
run_steps = lambda xs, state, params: jax.lax.scan(
step,
init=(
state,
params,
(0.0, 0.0, jnp.inf),
(0.0, jnp.array([jnp.zeros(dim), jnp.zeros(dim)])),
),
xs=xs,
)[0]
def L_step_size_adaptation(state, params, num_steps, rng_key):
num_steps1, num_steps2 = round(num_steps * frac_tune1), round(
num_steps * frac_tune2
)
L_step_size_adaptation_keys = jax.random.split(
rng_key, num_steps1 + num_steps2 + 1
)
L_step_size_adaptation_keys, final_key = (
L_step_size_adaptation_keys[:-1],
L_step_size_adaptation_keys[-1],
)
# we use the last num_steps2 to compute the diagonal preconditioner
mask = jnp.concatenate((jnp.zeros(num_steps1), jnp.ones(num_steps2)))
# run the steps
state, params, _, (_, average) = run_steps(
xs=(mask, L_step_size_adaptation_keys), state=state, params=params
)
L = params.L
# determine L
inverse_mass_matrix = params.inverse_mass_matrix
if num_steps2 > 1:
x_average, x_squared_average = average[0], average[1]
variances = x_squared_average - jnp.square(x_average)
L = jnp.sqrt(jnp.sum(variances))
if diagonal_preconditioning:
inverse_mass_matrix = variances
params = params._replace(inverse_mass_matrix=inverse_mass_matrix)
L = jnp.sqrt(dim)
# readjust the stepsize
steps = round(num_steps2 / 3) # we do some small number of steps
keys = jax.random.split(final_key, steps)
state, params, _, (_, average) = run_steps(
xs=(jnp.ones(steps), keys), state=state, params=params
)
return state, MCLMCAdaptationState(L, params.step_size, inverse_mass_matrix)
return L_step_size_adaptation
|
Adapts the stepsize and L of the MCLMC kernel. Designed for unadjusted MCLMC
|
make_L_step_size_adaptation
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mclmc_adaptation.py
|
Apache-2.0
|
def predictor(previous_state, params, adaptive_state, rng_key):
"""does one step with the dynamics and updates the prediction for the optimal stepsize
Designed for the unadjusted MCHMC"""
time, x_average, step_size_max = adaptive_state
rng_key, nan_key = jax.random.split(rng_key)
# dynamics
next_state, info = kernel(params.inverse_mass_matrix)(
rng_key=rng_key,
state=previous_state,
L=params.L,
step_size=params.step_size,
)
# step updating
success, state, step_size_max, energy_change = handle_nans(
previous_state,
next_state,
params.step_size,
step_size_max,
info.energy_change,
nan_key,
)
# Warning: var = 0 if there were nans, but we will give it a very small weight
xi = (
jnp.square(energy_change) / (dim * desired_energy_var)
) + 1e-8 # 1e-8 is added to avoid divergences in log xi
weight = jnp.exp(
-0.5 * jnp.square(jnp.log(xi) / (6.0 * trust_in_estimate))
) # the weight reduces the impact of stepsizes which are much larger on much smaller than the desired one.
x_average = decay_rate * x_average + weight * (
xi / jnp.power(params.step_size, 6.0)
)
time = decay_rate * time + weight
step_size = jnp.power(
x_average / time, -1.0 / 6.0
) # We use the Var[E] = O(eps^6) relation here.
step_size = (step_size < step_size_max) * step_size + (
step_size > step_size_max
) * step_size_max # if the proposed stepsize is above the stepsize where we have seen divergences
params_new = params._replace(step_size=step_size)
adaptive_state = (time, x_average, step_size_max)
return state, params_new, adaptive_state, success
|
does one step with the dynamics and updates the prediction for the optimal stepsize
Designed for the unadjusted MCHMC
|
predictor
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mclmc_adaptation.py
|
Apache-2.0
|
def step(iteration_state, weight_and_key):
"""does one step of the dynamics and updates the estimate of the posterior size and optimal stepsize"""
mask, rng_key = weight_and_key
state, params, adaptive_state, streaming_avg = iteration_state
state, params, adaptive_state, success = predictor(
state, params, adaptive_state, rng_key
)
x = ravel_pytree(state.position)[0]
# update the running average of x, x^2
streaming_avg = incremental_value_update(
expectation=jnp.array([x, jnp.square(x)]),
incremental_val=streaming_avg,
weight=mask * success * params.step_size,
)
return (state, params, adaptive_state, streaming_avg), None
|
does one step of the dynamics and updates the estimate of the posterior size and optimal stepsize
|
step
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mclmc_adaptation.py
|
Apache-2.0
|
def make_adaptation_L(kernel, frac, Lfactor):
"""determine L by the autocorrelations (around 10 effective samples are needed for this to be accurate)"""
def adaptation_L(state, params, num_steps, key):
num_steps_3 = round(num_steps * frac)
adaptation_L_keys = jax.random.split(key, num_steps_3)
def step(state, key):
next_state, _ = kernel(
rng_key=key,
state=state,
L=params.L,
step_size=params.step_size,
)
return next_state, next_state.position
state, samples = jax.lax.scan(
f=step,
init=state,
xs=adaptation_L_keys,
)
flat_samples = jax.vmap(lambda x: ravel_pytree(x)[0])(samples)
ess = effective_sample_size(flat_samples[None, ...])
return state, params._replace(
L=Lfactor * params.step_size * jnp.mean(num_steps_3 / ess)
)
return adaptation_L
|
determine L by the autocorrelations (around 10 effective samples are needed for this to be accurate)
|
make_adaptation_L
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mclmc_adaptation.py
|
Apache-2.0
|
def handle_nans(
previous_state, next_state, step_size, step_size_max, kinetic_change, key
):
"""if there are nans, let's reduce the stepsize, and not update the state. The
function returns the old state in this case."""
reduced_step_size = 0.8
p, unravel_fn = ravel_pytree(next_state.position)
q, unravel_fn = ravel_pytree(next_state.momentum)
nonans = jnp.logical_and(jnp.all(jnp.isfinite(p)), jnp.all(jnp.isfinite(q)))
state, step_size, kinetic_change = jax.tree_util.tree_map(
lambda new, old: jax.lax.select(nonans, jnp.nan_to_num(new), old),
(next_state, step_size_max, kinetic_change),
(previous_state, step_size * reduced_step_size, 0.0),
)
state = jax.lax.cond(
jnp.isnan(next_state.logdensity),
lambda: state._replace(
momentum=generate_unit_vector(key, previous_state.position)
),
lambda: state,
)
return nonans, state, step_size, kinetic_change
|
if there are nans, let's reduce the stepsize, and not update the state. The
function returns the old state in this case.
|
handle_nans
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/mclmc_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/mclmc_adaptation.py
|
Apache-2.0
|
def base():
"""Maximum-Eigenvalue Adaptation of damping and step size for the generalized
Hamiltonian Monte Carlo kernel :cite:p:`hoffman2022tuning`.
This algorithm performs a cross-chain adaptation scheme for the generalized
HMC algorithm that automatically selects values for the generalized HMC's
tunable parameters based on statistics collected from a population of many
chains. It uses heuristics determined by the maximum eigenvalue of the
covariance and gradient matrices given by the grouped samples of all chains
with shape.
This is an implementation of Algorithm 3 of :cite:p:`hoffman2022tuning` using cross-chain
adaptation instead of parallel ensemble chain adaptation.
Returns
-------
init
Function that initializes the warmup.
update
Function that moves the warmup one step.
"""
def compute_parameters(
positions: ArrayLikeTree, logdensity_grad: ArrayLikeTree, current_iteration: int
):
"""Compute values for the parameters based on statistics collected from
multiple chains.
Parameters
----------
positions:
A PyTree that contains the current position of every chains.
logdensity_grad:
A PyTree that contains the gradients of the logdensity
function evaluated at the current position of every chains.
current_iteration:
The current iteration index in the adaptation process.
Returns
-------
New values of the step size, and the alpha and delta parameters
of the generalized HMC algorithm.
"""
mean_position = jax.tree.map(lambda p: p.mean(axis=0), positions)
sd_position = jax.tree.map(lambda p: p.std(axis=0), positions)
normalized_positions = jax.tree.map(
lambda p, mu, sd: (p - mu) / sd,
positions,
mean_position,
sd_position,
)
batch_grad_scaled = jax.tree.map(
lambda grad, sd: grad * sd, logdensity_grad, sd_position
)
epsilon = jnp.minimum(
0.5 / jnp.sqrt(maximum_eigenvalue(batch_grad_scaled)), 1.0
)
gamma = jnp.maximum(
1.0 / jnp.sqrt(maximum_eigenvalue(normalized_positions)),
1.0 / ((current_iteration + 1) * epsilon),
)
alpha = 1.0 - jnp.exp(-2.0 * epsilon * gamma)
delta = alpha / 2
return epsilon, sd_position, alpha, delta
def init(
positions: ArrayLikeTree, logdensity_grad: ArrayLikeTree
) -> MEADSAdaptationState:
parameters = compute_parameters(positions, logdensity_grad, 0)
return MEADSAdaptationState(0, *parameters)
def update(
adaptation_state: MEADSAdaptationState,
positions: ArrayLikeTree,
logdensity_grad: ArrayLikeTree,
) -> MEADSAdaptationState:
"""Update the adaptation state and parameter values.
We find new optimal values for the parameters of the generalized HMC
kernel using heuristics based on the maximum eigenvalue of the
covariance and gradient matrices given by an ensemble of chains.
Parameters
----------
adaptation_state
The current state of the adaptation algorithm
positions
The current position of every chain.
logdensity_grad
The gradients of the logdensity function evaluated at the
current position of every chain.
Returns
-------
New adaptation state that contains the step size, alpha and delta
parameters of the generalized HMC kernel.
"""
current_iteration = adaptation_state.current_iteration
step_size, position_sigma, alpha, delta = compute_parameters(
positions, logdensity_grad, current_iteration
)
return MEADSAdaptationState(
current_iteration + 1, step_size, position_sigma, alpha, delta
)
return init, update
|
Maximum-Eigenvalue Adaptation of damping and step size for the generalized
Hamiltonian Monte Carlo kernel :cite:p:`hoffman2022tuning`.
This algorithm performs a cross-chain adaptation scheme for the generalized
HMC algorithm that automatically selects values for the generalized HMC's
tunable parameters based on statistics collected from a population of many
chains. It uses heuristics determined by the maximum eigenvalue of the
covariance and gradient matrices given by the grouped samples of all chains
with shape.
This is an implementation of Algorithm 3 of :cite:p:`hoffman2022tuning` using cross-chain
adaptation instead of parallel ensemble chain adaptation.
Returns
-------
init
Function that initializes the warmup.
update
Function that moves the warmup one step.
|
base
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/meads_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/meads_adaptation.py
|
Apache-2.0
|
def compute_parameters(
positions: ArrayLikeTree, logdensity_grad: ArrayLikeTree, current_iteration: int
):
"""Compute values for the parameters based on statistics collected from
multiple chains.
Parameters
----------
positions:
A PyTree that contains the current position of every chains.
logdensity_grad:
A PyTree that contains the gradients of the logdensity
function evaluated at the current position of every chains.
current_iteration:
The current iteration index in the adaptation process.
Returns
-------
New values of the step size, and the alpha and delta parameters
of the generalized HMC algorithm.
"""
mean_position = jax.tree.map(lambda p: p.mean(axis=0), positions)
sd_position = jax.tree.map(lambda p: p.std(axis=0), positions)
normalized_positions = jax.tree.map(
lambda p, mu, sd: (p - mu) / sd,
positions,
mean_position,
sd_position,
)
batch_grad_scaled = jax.tree.map(
lambda grad, sd: grad * sd, logdensity_grad, sd_position
)
epsilon = jnp.minimum(
0.5 / jnp.sqrt(maximum_eigenvalue(batch_grad_scaled)), 1.0
)
gamma = jnp.maximum(
1.0 / jnp.sqrt(maximum_eigenvalue(normalized_positions)),
1.0 / ((current_iteration + 1) * epsilon),
)
alpha = 1.0 - jnp.exp(-2.0 * epsilon * gamma)
delta = alpha / 2
return epsilon, sd_position, alpha, delta
|
Compute values for the parameters based on statistics collected from
multiple chains.
Parameters
----------
positions:
A PyTree that contains the current position of every chains.
logdensity_grad:
A PyTree that contains the gradients of the logdensity
function evaluated at the current position of every chains.
current_iteration:
The current iteration index in the adaptation process.
Returns
-------
New values of the step size, and the alpha and delta parameters
of the generalized HMC algorithm.
|
compute_parameters
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/meads_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/meads_adaptation.py
|
Apache-2.0
|
def update(
adaptation_state: MEADSAdaptationState,
positions: ArrayLikeTree,
logdensity_grad: ArrayLikeTree,
) -> MEADSAdaptationState:
"""Update the adaptation state and parameter values.
We find new optimal values for the parameters of the generalized HMC
kernel using heuristics based on the maximum eigenvalue of the
covariance and gradient matrices given by an ensemble of chains.
Parameters
----------
adaptation_state
The current state of the adaptation algorithm
positions
The current position of every chain.
logdensity_grad
The gradients of the logdensity function evaluated at the
current position of every chain.
Returns
-------
New adaptation state that contains the step size, alpha and delta
parameters of the generalized HMC kernel.
"""
current_iteration = adaptation_state.current_iteration
step_size, position_sigma, alpha, delta = compute_parameters(
positions, logdensity_grad, current_iteration
)
return MEADSAdaptationState(
current_iteration + 1, step_size, position_sigma, alpha, delta
)
|
Update the adaptation state and parameter values.
We find new optimal values for the parameters of the generalized HMC
kernel using heuristics based on the maximum eigenvalue of the
covariance and gradient matrices given by an ensemble of chains.
Parameters
----------
adaptation_state
The current state of the adaptation algorithm
positions
The current position of every chain.
logdensity_grad
The gradients of the logdensity function evaluated at the
current position of every chain.
Returns
-------
New adaptation state that contains the step size, alpha and delta
parameters of the generalized HMC kernel.
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/meads_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/meads_adaptation.py
|
Apache-2.0
|
def meads_adaptation(
logdensity_fn: Callable,
num_chains: int,
adaptation_info_fn: Callable = return_all_adapt_info,
) -> AdaptationAlgorithm:
"""Adapt the parameters of the Generalized HMC algorithm.
The Generalized HMC algorithm depends on three parameters, each controlling
one element of its behaviour: step size controls the integrator's dynamics,
alpha controls the persistency of the momentum variable, and delta controls
the deterministic transformation of the slice variable used to perform the
non-reversible Metropolis-Hastings accept/reject step.
The step size parameter is chosen to ensure the stability of the velocity
verlet integrator, the alpha parameter to make the influence of the current
state on future states of the momentum variable to decay exponentially, and
the delta parameter to maximize the acceptance of proposal but with good
mixing properties for the slice variable. These characteristics are targeted
by controlling heuristics based on the maximum eigenvalues of the correlation
and gradient matrices of the cross-chain samples, under simpifyng assumptions.
Good tuning is fundamental for the non-reversible Generalized HMC sampling
algorithm to explore the target space efficienty and output uncorrelated, or
as uncorrelated as possible, samples from the target space. Furthermore, the
single integrator step of the algorithm lends itself for fast sampling
on parallel computer architectures.
Parameters
----------
logdensity_fn
The log density probability density function from which we wish to sample.
num_chains
Number of chains used for cross-chain warm-up training.
adaptation_info_fn
Function to select the adaptation info returned. See return_all_adapt_info
and get_filter_adapt_info_fn in blackjax.adaptation.base. By default all
information is saved - this can result in excessive memory usage if the
information is unused.
Returns
-------
A function that returns the last cross-chain state, a sampling kernel with the
tuned parameter values, and all the warm-up states for diagnostics.
"""
ghmc_kernel = mcmc.ghmc.build_kernel()
adapt_init, adapt_update = base()
batch_init = jax.vmap(lambda p, r: mcmc.ghmc.init(p, r, logdensity_fn))
def one_step(carry, rng_key):
states, adaptation_state = carry
keys = jax.random.split(rng_key, num_chains)
new_states, info = jax.vmap(
ghmc_kernel, in_axes=(0, 0, None, None, None, None, None)
)(
keys,
states,
logdensity_fn,
adaptation_state.step_size,
adaptation_state.position_sigma,
adaptation_state.alpha,
adaptation_state.delta,
)
new_adaptation_state = adapt_update(
adaptation_state, new_states.position, new_states.logdensity_grad
)
return (new_states, new_adaptation_state), adaptation_info_fn(
new_states, info, new_adaptation_state
)
def run(rng_key: PRNGKey, positions: ArrayLikeTree, num_steps: int = 1000):
key_init, key_adapt = jax.random.split(rng_key)
rng_keys = jax.random.split(key_init, num_chains)
init_states = batch_init(positions, rng_keys)
init_adaptation_state = adapt_init(positions, init_states.logdensity_grad)
keys = jax.random.split(key_adapt, num_steps)
(last_states, last_adaptation_state), info = jax.lax.scan(
one_step, (init_states, init_adaptation_state), keys
)
parameters = {
"step_size": last_adaptation_state.step_size,
"momentum_inverse_scale": last_adaptation_state.position_sigma,
"alpha": last_adaptation_state.alpha,
"delta": last_adaptation_state.delta,
}
return AdaptationResults(last_states, parameters), info
return AdaptationAlgorithm(run) # type: ignore[arg-type]
|
Adapt the parameters of the Generalized HMC algorithm.
The Generalized HMC algorithm depends on three parameters, each controlling
one element of its behaviour: step size controls the integrator's dynamics,
alpha controls the persistency of the momentum variable, and delta controls
the deterministic transformation of the slice variable used to perform the
non-reversible Metropolis-Hastings accept/reject step.
The step size parameter is chosen to ensure the stability of the velocity
verlet integrator, the alpha parameter to make the influence of the current
state on future states of the momentum variable to decay exponentially, and
the delta parameter to maximize the acceptance of proposal but with good
mixing properties for the slice variable. These characteristics are targeted
by controlling heuristics based on the maximum eigenvalues of the correlation
and gradient matrices of the cross-chain samples, under simpifyng assumptions.
Good tuning is fundamental for the non-reversible Generalized HMC sampling
algorithm to explore the target space efficienty and output uncorrelated, or
as uncorrelated as possible, samples from the target space. Furthermore, the
single integrator step of the algorithm lends itself for fast sampling
on parallel computer architectures.
Parameters
----------
logdensity_fn
The log density probability density function from which we wish to sample.
num_chains
Number of chains used for cross-chain warm-up training.
adaptation_info_fn
Function to select the adaptation info returned. See return_all_adapt_info
and get_filter_adapt_info_fn in blackjax.adaptation.base. By default all
information is saved - this can result in excessive memory usage if the
information is unused.
Returns
-------
A function that returns the last cross-chain state, a sampling kernel with the
tuned parameter values, and all the warm-up states for diagnostics.
|
meads_adaptation
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/meads_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/meads_adaptation.py
|
Apache-2.0
|
def maximum_eigenvalue(matrix: ArrayLikeTree) -> Array:
"""Estimate the largest eigenvalues of a matrix.
We calculate an unbiased estimate of the ratio between the sum of the
squared eigenvalues and the sum of the eigenvalues from the input
matrix. This ratio approximates the largest eigenvalue well except in
cases when there are a large number of small eigenvalues significantly
larger than 0 but significantly smaller than the largest eigenvalue.
This unbiased estimate is used instead of directly computing an unbiased
estimate of the largest eigenvalue because of the latter's large
variance.
Parameters
----------
matrix
A PyTree with equal batch shape as the first dimension of every leaf.
The PyTree for each batch is flattened into a one dimensional array and
these arrays are stacked vertically, giving a matrix with one row
for every batch.
"""
X = jax.vmap(lambda m: jax.flatten_util.ravel_pytree(m)[0])(matrix)
n, _ = X.shape
S = X @ X.T
diag_S = jnp.diag(S)
lamda = jnp.sum(diag_S) / n
lamda_sq = (jnp.sum(S**2) - jnp.sum(diag_S**2)) / (n * (n - 1))
return lamda_sq / lamda
|
Estimate the largest eigenvalues of a matrix.
We calculate an unbiased estimate of the ratio between the sum of the
squared eigenvalues and the sum of the eigenvalues from the input
matrix. This ratio approximates the largest eigenvalue well except in
cases when there are a large number of small eigenvalues significantly
larger than 0 but significantly smaller than the largest eigenvalue.
This unbiased estimate is used instead of directly computing an unbiased
estimate of the largest eigenvalue because of the latter's large
variance.
Parameters
----------
matrix
A PyTree with equal batch shape as the first dimension of every leaf.
The PyTree for each batch is flattened into a one dimensional array and
these arrays are stacked vertically, giving a matrix with one row
for every batch.
|
maximum_eigenvalue
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/meads_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/meads_adaptation.py
|
Apache-2.0
|
def base(
target_acceptance_rate: float = 0.80,
):
"""Warmup scheme for sampling procedures based on euclidean manifold HMC.
This adaptation runs in two steps:
1. The Pathfinder algorithm is ran and we subsequently compute an estimate
for the value of the inverse mass matrix, as well as a new initialization
point for the markov chain that is supposedly closer to the typical set.
2. We then start sampling with the MCMC algorithm and use the samples to
adapt the value of the step size using an optimization algorithm so that
the mcmc algorithm reaches a given target acceptance rate.
Parameters
----------
target_acceptance_rate:
The target acceptance rate for the step size adaptation.
Returns
-------
init
Function that initializes the warmup.
update
Function that moves the warmup one step.
final
Function that returns the step size and mass matrix given a warmup state.
"""
da_init, da_update, da_final = dual_averaging_adaptation(target_acceptance_rate)
def init(
alpha,
beta,
gamma,
initial_step_size: float,
) -> PathfinderAdaptationState:
"""Initialze the adaptation state and parameter values.
We use the Pathfinder algorithm to compute an estimate of the inverse
mass matrix that will stay constant throughout the rest of the
adaptation.
Parameters
----------
alpha, beta, gamma
Factored representation of the inverse Hessian computed by the
Pathfinder algorithm.
initial_step_size
The initial value for the step size.
"""
inverse_mass_matrix = lbfgs_inverse_hessian_formula_1(alpha, beta, gamma)
da_state = da_init(initial_step_size)
warmup_state = PathfinderAdaptationState(
da_state, initial_step_size, inverse_mass_matrix
)
return warmup_state
def update(
adaptation_state: PathfinderAdaptationState,
position: ArrayLikeTree,
acceptance_rate: float,
) -> PathfinderAdaptationState:
"""Update the adaptation state and parameter values.
Since the value of the inverse mass matrix is already known we only
update the state of the step size adaptation algorithm.
Parameters
----------
adaptation_state
Current adptation state.
position
Current value of the model parameters.
acceptance_rate
Value of the acceptance rate for the last MCMC step.
Returns
-------
The updated states of the chain and the warmup.
"""
new_ss_state = da_update(adaptation_state.ss_state, acceptance_rate)
new_step_size = jnp.exp(new_ss_state.log_step_size)
return PathfinderAdaptationState(
new_ss_state, new_step_size, adaptation_state.inverse_mass_matrix
)
def final(warmup_state: PathfinderAdaptationState) -> tuple[float, Array]:
"""Return the final values for the step size and inverse mass matrix."""
step_size = jnp.exp(warmup_state.ss_state.log_step_size_avg)
inverse_mass_matrix = warmup_state.inverse_mass_matrix
return step_size, inverse_mass_matrix
return init, update, final
|
Warmup scheme for sampling procedures based on euclidean manifold HMC.
This adaptation runs in two steps:
1. The Pathfinder algorithm is ran and we subsequently compute an estimate
for the value of the inverse mass matrix, as well as a new initialization
point for the markov chain that is supposedly closer to the typical set.
2. We then start sampling with the MCMC algorithm and use the samples to
adapt the value of the step size using an optimization algorithm so that
the mcmc algorithm reaches a given target acceptance rate.
Parameters
----------
target_acceptance_rate:
The target acceptance rate for the step size adaptation.
Returns
-------
init
Function that initializes the warmup.
update
Function that moves the warmup one step.
final
Function that returns the step size and mass matrix given a warmup state.
|
base
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/pathfinder_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/pathfinder_adaptation.py
|
Apache-2.0
|
def init(
alpha,
beta,
gamma,
initial_step_size: float,
) -> PathfinderAdaptationState:
"""Initialze the adaptation state and parameter values.
We use the Pathfinder algorithm to compute an estimate of the inverse
mass matrix that will stay constant throughout the rest of the
adaptation.
Parameters
----------
alpha, beta, gamma
Factored representation of the inverse Hessian computed by the
Pathfinder algorithm.
initial_step_size
The initial value for the step size.
"""
inverse_mass_matrix = lbfgs_inverse_hessian_formula_1(alpha, beta, gamma)
da_state = da_init(initial_step_size)
warmup_state = PathfinderAdaptationState(
da_state, initial_step_size, inverse_mass_matrix
)
return warmup_state
|
Initialze the adaptation state and parameter values.
We use the Pathfinder algorithm to compute an estimate of the inverse
mass matrix that will stay constant throughout the rest of the
adaptation.
Parameters
----------
alpha, beta, gamma
Factored representation of the inverse Hessian computed by the
Pathfinder algorithm.
initial_step_size
The initial value for the step size.
|
init
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/pathfinder_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/pathfinder_adaptation.py
|
Apache-2.0
|
def update(
adaptation_state: PathfinderAdaptationState,
position: ArrayLikeTree,
acceptance_rate: float,
) -> PathfinderAdaptationState:
"""Update the adaptation state and parameter values.
Since the value of the inverse mass matrix is already known we only
update the state of the step size adaptation algorithm.
Parameters
----------
adaptation_state
Current adptation state.
position
Current value of the model parameters.
acceptance_rate
Value of the acceptance rate for the last MCMC step.
Returns
-------
The updated states of the chain and the warmup.
"""
new_ss_state = da_update(adaptation_state.ss_state, acceptance_rate)
new_step_size = jnp.exp(new_ss_state.log_step_size)
return PathfinderAdaptationState(
new_ss_state, new_step_size, adaptation_state.inverse_mass_matrix
)
|
Update the adaptation state and parameter values.
Since the value of the inverse mass matrix is already known we only
update the state of the step size adaptation algorithm.
Parameters
----------
adaptation_state
Current adptation state.
position
Current value of the model parameters.
acceptance_rate
Value of the acceptance rate for the last MCMC step.
Returns
-------
The updated states of the chain and the warmup.
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/pathfinder_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/pathfinder_adaptation.py
|
Apache-2.0
|
def final(warmup_state: PathfinderAdaptationState) -> tuple[float, Array]:
"""Return the final values for the step size and inverse mass matrix."""
step_size = jnp.exp(warmup_state.ss_state.log_step_size_avg)
inverse_mass_matrix = warmup_state.inverse_mass_matrix
return step_size, inverse_mass_matrix
|
Return the final values for the step size and inverse mass matrix.
|
final
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/pathfinder_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/pathfinder_adaptation.py
|
Apache-2.0
|
def pathfinder_adaptation(
algorithm,
logdensity_fn: Callable,
initial_step_size: float = 1.0,
target_acceptance_rate: float = 0.80,
adaptation_info_fn: Callable = return_all_adapt_info,
**extra_parameters,
) -> AdaptationAlgorithm:
"""Adapt the value of the inverse mass matrix and step size parameters of
algorithms in the HMC fmaily.
Parameters
----------
algorithm
The algorithm whose parameters are being tuned.
logdensity_fn
The log density probability density function from which we wish to sample.
initial_step_size
The initial step size used in the algorithm.
target_acceptance_rate
The acceptance rate that we target during step size adaptation.
adaptation_info_fn
Function to select the adaptation info returned. See return_all_adapt_info
and get_filter_adapt_info_fn in blackjax.adaptation.base. By default all
information is saved - this can result in excessive memory usage if the
information is unused.
**extra_parameters
The extra parameters to pass to the algorithm, e.g. the number of
integration steps for HMC.
Returns
-------
A function that returns the last chain state and a sampling kernel with the
tuned parameter values from an initial state.
"""
mcmc_kernel = algorithm.build_kernel()
adapt_init, adapt_update, adapt_final = base(
target_acceptance_rate,
)
def one_step(carry, rng_key):
state, adaptation_state = carry
new_state, info = mcmc_kernel(
rng_key,
state,
logdensity_fn,
adaptation_state.step_size,
adaptation_state.inverse_mass_matrix,
**extra_parameters,
)
new_adaptation_state = adapt_update(
adaptation_state, new_state.position, info.acceptance_rate
)
return (
(new_state, new_adaptation_state),
adaptation_info_fn(new_state, info, new_adaptation_state),
)
def run(rng_key: PRNGKey, position: ArrayLikeTree, num_steps: int = 400):
init_key, sample_key, rng_key = jax.random.split(rng_key, 3)
pathfinder_state, _ = vi.pathfinder.approximate(
init_key, logdensity_fn, position
)
init_warmup_state = adapt_init(
pathfinder_state.alpha,
pathfinder_state.beta,
pathfinder_state.gamma,
initial_step_size,
)
init_position, _ = vi.pathfinder.sample(sample_key, pathfinder_state)
init_state = algorithm.init(init_position, logdensity_fn)
keys = jax.random.split(rng_key, num_steps)
last_state, info = jax.lax.scan(
one_step,
(init_state, init_warmup_state),
keys,
)
last_chain_state, last_warmup_state = last_state
step_size, inverse_mass_matrix = adapt_final(last_warmup_state)
parameters = {
"step_size": step_size,
"inverse_mass_matrix": inverse_mass_matrix,
**extra_parameters,
}
return AdaptationResults(last_chain_state, parameters), info
return AdaptationAlgorithm(run)
|
Adapt the value of the inverse mass matrix and step size parameters of
algorithms in the HMC fmaily.
Parameters
----------
algorithm
The algorithm whose parameters are being tuned.
logdensity_fn
The log density probability density function from which we wish to sample.
initial_step_size
The initial step size used in the algorithm.
target_acceptance_rate
The acceptance rate that we target during step size adaptation.
adaptation_info_fn
Function to select the adaptation info returned. See return_all_adapt_info
and get_filter_adapt_info_fn in blackjax.adaptation.base. By default all
information is saved - this can result in excessive memory usage if the
information is unused.
**extra_parameters
The extra parameters to pass to the algorithm, e.g. the number of
integration steps for HMC.
Returns
-------
A function that returns the last chain state and a sampling kernel with the
tuned parameter values from an initial state.
|
pathfinder_adaptation
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/pathfinder_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/pathfinder_adaptation.py
|
Apache-2.0
|
def dual_averaging_adaptation(
target: float, t0: int = 10, gamma: float = 0.05, kappa: float = 0.75
) -> tuple[Callable, Callable, Callable]:
"""Tune the step size in order to achieve a desired target acceptance rate.
Let us note :math:`\\epsilon` the current step size, :math:`\\alpha_t` the
metropolis acceptance rate at time :math:`t` and :math:`\\delta` the desired
aceptance rate. We define:
.. math:
H_t = \\delta - \\alpha_t
the error at time t. We would like to find a procedure that adapts the
value of :math:`\\epsilon` such that :math:`h(x) =\\mathbb{E}\\left[H_t|\\epsilon\\right] = 0`
Following :cite:p:`nesterov2009primal`, the authors of :cite:p:`hoffman2014no` proposed the following update scheme. If
we note :math:`x = \\log \\epsilon` we follow:
.. math:
x_{t+1} \\LongLeftArrow \\mu - \\frac{\\sqrt{t}}{\\gamma} \\frac{1}{t+t_0} \\sum_{i=1}^t H_i
\\overline{x}_{t+1} \\LongLeftArrow x_{t+1}\\, t^{-\\kappa} + \\left(1-t^\\kappa\\right)\\overline{x}_t
:math:`\\overline{x}_{t}` is guaranteed to converge to a value such that
:math:`h(\\overline{x}_t)` converges to 0, i.e. the Metropolis acceptance
rate converges to the desired rate.
See reference :cite:p:`hoffman2014no` (section 3.2.1) for a detailed discussion.
Parameters
----------
t0: float >= 0
Free parameter that stabilizes the initial iterations of the algorithm.
Large values may slow down convergence. Introduced in :cite:p:`hoffman2014no` with a default
value of 10.
gamma:
Controls the speed of convergence of the scheme. The authors of :cite:p:`hoffman2014no` recommend
a value of 0.05.
kappa: float in [0.5, 1]
Controls the weights of past steps in the current update. The scheme will
quickly forget earlier step for a small value of `kappa`. Introduced
in :cite:p:`hoffman2014no`, with a recommended value of .75
target:
Target acceptance rate.
Returns
-------
init
A function that initializes the state of the dual averaging scheme.
update
A function that updates the state of the dual averaging scheme.
"""
da_init, da_update, da_final = dual_averaging(t0, gamma, kappa)
def init(inital_step_size: float) -> DualAveragingAdaptationState:
"""Initialize the state of the dual averaging scheme.
The parameter :math:`\\mu` is set to :math:`\\log(10 \\epsilon_1)`
where :math:`\\epsilon_1` is the initial value of the step size.
"""
return DualAveragingAdaptationState(*da_init(inital_step_size))
def update(
da_state: DualAveragingAdaptationState, acceptance_rate: float
) -> DualAveragingAdaptationState:
"""Update the state of the Dual Averaging adaptive algorithm.
Parameters
----------
da_state:
The current state of the dual averaging algorithm.
acceptance_rate: float in [0, 1]
The current metropolis acceptance rate.
Returns
-------
The updated state of the dual averaging algorithm.
"""
gradient = target - acceptance_rate
return DualAveragingAdaptationState(*da_update(da_state, gradient))
def final(da_state: DualAveragingAdaptationState) -> float:
return jnp.exp(da_state.log_step_size_avg)
return init, update, final
|
Tune the step size in order to achieve a desired target acceptance rate.
Let us note :math:`\epsilon` the current step size, :math:`\alpha_t` the
metropolis acceptance rate at time :math:`t` and :math:`\delta` the desired
aceptance rate. We define:
.. math:
H_t = \delta - \alpha_t
the error at time t. We would like to find a procedure that adapts the
value of :math:`\epsilon` such that :math:`h(x) =\mathbb{E}\left[H_t|\epsilon\right] = 0`
Following :cite:p:`nesterov2009primal`, the authors of :cite:p:`hoffman2014no` proposed the following update scheme. If
we note :math:`x = \log \epsilon` we follow:
.. math:
x_{t+1} \LongLeftArrow \mu - \frac{\sqrt{t}}{\gamma} \frac{1}{t+t_0} \sum_{i=1}^t H_i
\overline{x}_{t+1} \LongLeftArrow x_{t+1}\, t^{-\kappa} + \left(1-t^\kappa\right)\overline{x}_t
:math:`\overline{x}_{t}` is guaranteed to converge to a value such that
:math:`h(\overline{x}_t)` converges to 0, i.e. the Metropolis acceptance
rate converges to the desired rate.
See reference :cite:p:`hoffman2014no` (section 3.2.1) for a detailed discussion.
Parameters
----------
t0: float >= 0
Free parameter that stabilizes the initial iterations of the algorithm.
Large values may slow down convergence. Introduced in :cite:p:`hoffman2014no` with a default
value of 10.
gamma:
Controls the speed of convergence of the scheme. The authors of :cite:p:`hoffman2014no` recommend
a value of 0.05.
kappa: float in [0.5, 1]
Controls the weights of past steps in the current update. The scheme will
quickly forget earlier step for a small value of `kappa`. Introduced
in :cite:p:`hoffman2014no`, with a recommended value of .75
target:
Target acceptance rate.
Returns
-------
init
A function that initializes the state of the dual averaging scheme.
update
A function that updates the state of the dual averaging scheme.
|
dual_averaging_adaptation
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/step_size.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/step_size.py
|
Apache-2.0
|
def update(
da_state: DualAveragingAdaptationState, acceptance_rate: float
) -> DualAveragingAdaptationState:
"""Update the state of the Dual Averaging adaptive algorithm.
Parameters
----------
da_state:
The current state of the dual averaging algorithm.
acceptance_rate: float in [0, 1]
The current metropolis acceptance rate.
Returns
-------
The updated state of the dual averaging algorithm.
"""
gradient = target - acceptance_rate
return DualAveragingAdaptationState(*da_update(da_state, gradient))
|
Update the state of the Dual Averaging adaptive algorithm.
Parameters
----------
da_state:
The current state of the dual averaging algorithm.
acceptance_rate: float in [0, 1]
The current metropolis acceptance rate.
Returns
-------
The updated state of the dual averaging algorithm.
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/step_size.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/step_size.py
|
Apache-2.0
|
def find_reasonable_step_size(
rng_key: PRNGKey,
kernel_generator: Callable[[float], Callable],
reference_state: HMCState,
initial_step_size: float,
target_accept: float = 0.65,
) -> float:
"""Find a reasonable initial step size during warmup.
While the dual averaging scheme is guaranteed to converge to a reasonable
value for the step size starting from any value, choosing a good first
value can speed up the convergence. This heuristics doubles and halves the
step size until the acceptance probability of the HMC proposal crosses the
target value :cite:p:`hoffman2014no`.
Parameters
----------
rng_key
Key used by JAX's random number generator.
kernel_generator
A function that takes a step size as an input and returns the corresponding
sampling kernel.
reference_hmc_state
The location (HMC state) where this first step size must be found. This function
never advances the chain.
inverse_mass_matrix
The inverse mass matrix relative to which the step size must be found.
initial_step_size
The first step size used to start the search.
target_accept
Once that value of the metropolis acceptance probability is reached we
estimate that we have found a "reasonable" first step size.
Returns
-------
float
A reasonable first value for the step size.
"""
fp_limit = jnp.finfo(jax.lax.dtype(initial_step_size))
def do_continue(rss_state: ReasonableStepSizeState) -> bool:
"""Decides whether the search should continue.
The search stops when it crosses the `target_accept` threshold, i.e.
when the current direction is opposite to the previous direction.
Note
----
Per JAX's documentation :cite:p:`jax_finfo` the `jnp.finfo` object is cached so we do not
occur any performance penalty when calling it repeatedly inside this
function.
"""
_, direction, previous_direction, step_size = rss_state
not_too_large = (step_size < fp_limit.max) | (direction <= 0)
not_too_small = (step_size > fp_limit.tiny) | (direction >= 0)
is_step_size_not_extreme = not_too_large & not_too_small
has_acceptance_rate_not_crossed_threshold = (previous_direction == 0) | (
direction == previous_direction
)
return is_step_size_not_extreme & has_acceptance_rate_not_crossed_threshold
def update(rss_state: ReasonableStepSizeState) -> ReasonableStepSizeState:
"""Perform one step of the step size search."""
i, direction, _, step_size = rss_state
subkey = jax.random.fold_in(rng_key, i)
step_size = (2.0**direction) * step_size
kernel = kernel_generator(step_size)
_, info = kernel(subkey, reference_state)
new_direction = jnp.where(target_accept < info.acceptance_rate, 1, -1)
return ReasonableStepSizeState(i + 1, new_direction, direction, step_size)
rss_state = ReasonableStepSizeState(0, 0, 0, initial_step_size)
rss_state = jax.lax.while_loop(do_continue, update, rss_state)
return rss_state.step_size
|
Find a reasonable initial step size during warmup.
While the dual averaging scheme is guaranteed to converge to a reasonable
value for the step size starting from any value, choosing a good first
value can speed up the convergence. This heuristics doubles and halves the
step size until the acceptance probability of the HMC proposal crosses the
target value :cite:p:`hoffman2014no`.
Parameters
----------
rng_key
Key used by JAX's random number generator.
kernel_generator
A function that takes a step size as an input and returns the corresponding
sampling kernel.
reference_hmc_state
The location (HMC state) where this first step size must be found. This function
never advances the chain.
inverse_mass_matrix
The inverse mass matrix relative to which the step size must be found.
initial_step_size
The first step size used to start the search.
target_accept
Once that value of the metropolis acceptance probability is reached we
estimate that we have found a "reasonable" first step size.
Returns
-------
float
A reasonable first value for the step size.
|
find_reasonable_step_size
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/step_size.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/step_size.py
|
Apache-2.0
|
def do_continue(rss_state: ReasonableStepSizeState) -> bool:
"""Decides whether the search should continue.
The search stops when it crosses the `target_accept` threshold, i.e.
when the current direction is opposite to the previous direction.
Note
----
Per JAX's documentation :cite:p:`jax_finfo` the `jnp.finfo` object is cached so we do not
occur any performance penalty when calling it repeatedly inside this
function.
"""
_, direction, previous_direction, step_size = rss_state
not_too_large = (step_size < fp_limit.max) | (direction <= 0)
not_too_small = (step_size > fp_limit.tiny) | (direction >= 0)
is_step_size_not_extreme = not_too_large & not_too_small
has_acceptance_rate_not_crossed_threshold = (previous_direction == 0) | (
direction == previous_direction
)
return is_step_size_not_extreme & has_acceptance_rate_not_crossed_threshold
|
Decides whether the search should continue.
The search stops when it crosses the `target_accept` threshold, i.e.
when the current direction is opposite to the previous direction.
Note
----
Per JAX's documentation :cite:p:`jax_finfo` the `jnp.finfo` object is cached so we do not
occur any performance penalty when calling it repeatedly inside this
function.
|
do_continue
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/step_size.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/step_size.py
|
Apache-2.0
|
def update(rss_state: ReasonableStepSizeState) -> ReasonableStepSizeState:
"""Perform one step of the step size search."""
i, direction, _, step_size = rss_state
subkey = jax.random.fold_in(rng_key, i)
step_size = (2.0**direction) * step_size
kernel = kernel_generator(step_size)
_, info = kernel(subkey, reference_state)
new_direction = jnp.where(target_accept < info.acceptance_rate, 1, -1)
return ReasonableStepSizeState(i + 1, new_direction, direction, step_size)
|
Perform one step of the step size search.
|
update
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/step_size.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/step_size.py
|
Apache-2.0
|
def base(
is_mass_matrix_diagonal: bool,
target_acceptance_rate: float = 0.80,
) -> tuple[Callable, Callable, Callable]:
"""Warmup scheme for sampling procedures based on euclidean manifold HMC.
The schedule and algorithms used match Stan's :cite:p:`stan_hmc_param` as closely as possible.
Unlike several other libraries, we separate the warmup and sampling phases
explicitly. This ensure a better modularity; a change in the warmup does
not affect the sampling. It also allows users to run their own warmup
should they want to.
We also decouple generating a new sample with the mcmc algorithm and
updating the values of the parameters.
Stan's warmup consists in the three following phases:
1. A fast adaptation window where only the step size is adapted using
Nesterov's dual averaging scheme to match a target acceptance rate.
2. A succession of slow adapation windows (where the size of a window is
double that of the previous window) where both the mass matrix and the step
size are adapted. The mass matrix is recomputed at the end of each window;
the step size is re-initialized to a "reasonable" value.
3. A last fast adaptation window where only the step size is adapted.
Schematically:
+---------+---+------+------------+------------------------+------+
| fast | s | slow | slow | slow | fast |
+---------+---+------+------------+------------------------+------+
|1 |2 |3 |3 |3 |3 |
+---------+---+------+------------+------------------------+------+
Step (1) consists in find a "reasonable" first step size that is used to
initialize the dual averaging scheme. In (2) we initialize the mass matrix
to the matrix. In (3) we compute the mass matrix to use in the kernel and
re-initialize the mass matrix adaptation. The step size is still adapated
in slow adaptation windows, and is not re-initialized between windows.
Parameters
----------
is_mass_matrix_diagonal
Create and adapt a diagonal mass matrix if True, a dense matrix
otherwise.
target_acceptance_rate:
The target acceptance rate for the step size adaptation.
Returns
-------
init
Function that initializes the warmup.
update
Function that moves the warmup one step.
final
Function that returns the step size and mass matrix given a warmup
state.
"""
mm_init, mm_update, mm_final = mass_matrix_adaptation(is_mass_matrix_diagonal)
da_init, da_update, da_final = dual_averaging_adaptation(target_acceptance_rate)
def init(
position: ArrayLikeTree, initial_step_size: float
) -> WindowAdaptationState:
"""Initialze the adaptation state and parameter values.
Unlike the original Stan window adaptation we do not use the
`find_reasonable_step_size` algorithm which we found to be unnecessary.
We may reconsider this choice in the future.
"""
num_dimensions = pytree_size(position)
imm_state = mm_init(num_dimensions)
ss_state = da_init(initial_step_size)
return WindowAdaptationState(
ss_state,
imm_state,
initial_step_size,
imm_state.inverse_mass_matrix,
)
def fast_update(
position: ArrayLikeTree,
acceptance_rate: float,
warmup_state: WindowAdaptationState,
) -> WindowAdaptationState:
"""Update the adaptation state when in a "fast" window.
Only the step size is adapted in fast windows. "Fast" refers to the fact
that the optimization algorithms are relatively fast to converge
compared to the covariance estimation with Welford's algorithm
"""
del position
new_ss_state = da_update(warmup_state.ss_state, acceptance_rate)
new_step_size = jnp.exp(new_ss_state.log_step_size)
return WindowAdaptationState(
new_ss_state,
warmup_state.imm_state,
new_step_size,
warmup_state.inverse_mass_matrix,
)
def slow_update(
position: ArrayLikeTree,
acceptance_rate: float,
warmup_state: WindowAdaptationState,
) -> WindowAdaptationState:
"""Update the adaptation state when in a "slow" window.
Both the mass matrix adaptation *state* and the step size state are
adapted in slow windows. The value of the step size is updated as well,
but the new value of the inverse mass matrix is only computed at the end
of the slow window. "Slow" refers to the fact that we need many samples
to get a reliable estimation of the covariance matrix used to update the
value of the mass matrix.
"""
new_imm_state = mm_update(warmup_state.imm_state, position)
new_ss_state = da_update(warmup_state.ss_state, acceptance_rate)
new_step_size = jnp.exp(new_ss_state.log_step_size)
return WindowAdaptationState(
new_ss_state, new_imm_state, new_step_size, warmup_state.inverse_mass_matrix
)
def slow_final(warmup_state: WindowAdaptationState) -> WindowAdaptationState:
"""Update the parameters at the end of a slow adaptation window.
We compute the value of the mass matrix and reset the mass matrix
adapation's internal state since middle windows are "memoryless".
"""
new_imm_state = mm_final(warmup_state.imm_state)
new_ss_state = da_init(da_final(warmup_state.ss_state))
new_step_size = jnp.exp(new_ss_state.log_step_size)
return WindowAdaptationState(
new_ss_state,
new_imm_state,
new_step_size,
new_imm_state.inverse_mass_matrix,
)
def update(
adaptation_state: WindowAdaptationState,
adaptation_stage: tuple,
position: ArrayLikeTree,
acceptance_rate: float,
) -> WindowAdaptationState:
"""Update the adaptation state and parameter values.
Parameters
----------
adaptation_state
Current adptation state.
adaptation_stage
The current stage of the warmup: whether this is a slow window,
a fast window and if we are at the last step of a slow window.
position
Current value of the model parameters.
acceptance_rate
Value of the acceptance rate for the last mcmc step.
Returns
-------
The updated adaptation state.
"""
stage, is_middle_window_end = adaptation_stage
warmup_state = jax.lax.switch(
stage,
(fast_update, slow_update),
position,
acceptance_rate,
adaptation_state,
)
warmup_state = jax.lax.cond(
is_middle_window_end,
slow_final,
lambda x: x,
warmup_state,
)
return warmup_state
def final(warmup_state: WindowAdaptationState) -> tuple[float, Array]:
"""Return the final values for the step size and mass matrix."""
step_size = jnp.exp(warmup_state.ss_state.log_step_size_avg)
inverse_mass_matrix = warmup_state.imm_state.inverse_mass_matrix
return step_size, inverse_mass_matrix
return init, update, final
|
Warmup scheme for sampling procedures based on euclidean manifold HMC.
The schedule and algorithms used match Stan's :cite:p:`stan_hmc_param` as closely as possible.
Unlike several other libraries, we separate the warmup and sampling phases
explicitly. This ensure a better modularity; a change in the warmup does
not affect the sampling. It also allows users to run their own warmup
should they want to.
We also decouple generating a new sample with the mcmc algorithm and
updating the values of the parameters.
Stan's warmup consists in the three following phases:
1. A fast adaptation window where only the step size is adapted using
Nesterov's dual averaging scheme to match a target acceptance rate.
2. A succession of slow adapation windows (where the size of a window is
double that of the previous window) where both the mass matrix and the step
size are adapted. The mass matrix is recomputed at the end of each window;
the step size is re-initialized to a "reasonable" value.
3. A last fast adaptation window where only the step size is adapted.
Schematically:
+---------+---+------+------------+------------------------+------+
| fast | s | slow | slow | slow | fast |
+---------+---+------+------------+------------------------+------+
|1 |2 |3 |3 |3 |3 |
+---------+---+------+------------+------------------------+------+
Step (1) consists in find a "reasonable" first step size that is used to
initialize the dual averaging scheme. In (2) we initialize the mass matrix
to the matrix. In (3) we compute the mass matrix to use in the kernel and
re-initialize the mass matrix adaptation. The step size is still adapated
in slow adaptation windows, and is not re-initialized between windows.
Parameters
----------
is_mass_matrix_diagonal
Create and adapt a diagonal mass matrix if True, a dense matrix
otherwise.
target_acceptance_rate:
The target acceptance rate for the step size adaptation.
Returns
-------
init
Function that initializes the warmup.
update
Function that moves the warmup one step.
final
Function that returns the step size and mass matrix given a warmup
state.
|
base
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/window_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/window_adaptation.py
|
Apache-2.0
|
def init(
position: ArrayLikeTree, initial_step_size: float
) -> WindowAdaptationState:
"""Initialze the adaptation state and parameter values.
Unlike the original Stan window adaptation we do not use the
`find_reasonable_step_size` algorithm which we found to be unnecessary.
We may reconsider this choice in the future.
"""
num_dimensions = pytree_size(position)
imm_state = mm_init(num_dimensions)
ss_state = da_init(initial_step_size)
return WindowAdaptationState(
ss_state,
imm_state,
initial_step_size,
imm_state.inverse_mass_matrix,
)
|
Initialze the adaptation state and parameter values.
Unlike the original Stan window adaptation we do not use the
`find_reasonable_step_size` algorithm which we found to be unnecessary.
We may reconsider this choice in the future.
|
init
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/window_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/window_adaptation.py
|
Apache-2.0
|
def fast_update(
position: ArrayLikeTree,
acceptance_rate: float,
warmup_state: WindowAdaptationState,
) -> WindowAdaptationState:
"""Update the adaptation state when in a "fast" window.
Only the step size is adapted in fast windows. "Fast" refers to the fact
that the optimization algorithms are relatively fast to converge
compared to the covariance estimation with Welford's algorithm
"""
del position
new_ss_state = da_update(warmup_state.ss_state, acceptance_rate)
new_step_size = jnp.exp(new_ss_state.log_step_size)
return WindowAdaptationState(
new_ss_state,
warmup_state.imm_state,
new_step_size,
warmup_state.inverse_mass_matrix,
)
|
Update the adaptation state when in a "fast" window.
Only the step size is adapted in fast windows. "Fast" refers to the fact
that the optimization algorithms are relatively fast to converge
compared to the covariance estimation with Welford's algorithm
|
fast_update
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/window_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/window_adaptation.py
|
Apache-2.0
|
def slow_update(
position: ArrayLikeTree,
acceptance_rate: float,
warmup_state: WindowAdaptationState,
) -> WindowAdaptationState:
"""Update the adaptation state when in a "slow" window.
Both the mass matrix adaptation *state* and the step size state are
adapted in slow windows. The value of the step size is updated as well,
but the new value of the inverse mass matrix is only computed at the end
of the slow window. "Slow" refers to the fact that we need many samples
to get a reliable estimation of the covariance matrix used to update the
value of the mass matrix.
"""
new_imm_state = mm_update(warmup_state.imm_state, position)
new_ss_state = da_update(warmup_state.ss_state, acceptance_rate)
new_step_size = jnp.exp(new_ss_state.log_step_size)
return WindowAdaptationState(
new_ss_state, new_imm_state, new_step_size, warmup_state.inverse_mass_matrix
)
|
Update the adaptation state when in a "slow" window.
Both the mass matrix adaptation *state* and the step size state are
adapted in slow windows. The value of the step size is updated as well,
but the new value of the inverse mass matrix is only computed at the end
of the slow window. "Slow" refers to the fact that we need many samples
to get a reliable estimation of the covariance matrix used to update the
value of the mass matrix.
|
slow_update
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/window_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/window_adaptation.py
|
Apache-2.0
|
def slow_final(warmup_state: WindowAdaptationState) -> WindowAdaptationState:
"""Update the parameters at the end of a slow adaptation window.
We compute the value of the mass matrix and reset the mass matrix
adapation's internal state since middle windows are "memoryless".
"""
new_imm_state = mm_final(warmup_state.imm_state)
new_ss_state = da_init(da_final(warmup_state.ss_state))
new_step_size = jnp.exp(new_ss_state.log_step_size)
return WindowAdaptationState(
new_ss_state,
new_imm_state,
new_step_size,
new_imm_state.inverse_mass_matrix,
)
|
Update the parameters at the end of a slow adaptation window.
We compute the value of the mass matrix and reset the mass matrix
adapation's internal state since middle windows are "memoryless".
|
slow_final
|
python
|
blackjax-devs/blackjax
|
blackjax/adaptation/window_adaptation.py
|
https://github.com/blackjax-devs/blackjax/blob/master/blackjax/adaptation/window_adaptation.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.