sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def run(self, host="localhost", port=8000, shutdown_timeout=60.0, **kwargs):
"""
This function starts the service's network intefaces.
Args:
port (int): The port for the http server.
"""
print("Running service on http://localhost:%i. " % port + \
"Press Ctrl+C to terminate.")
# apply the configuration to the service config
self.config.port = port
self.config.host = host
# start the loop
try:
# if an event broker has been created for this service
if self.event_broker:
# start the broker
self.event_broker.start()
# announce the service
self.loop.run_until_complete(self.announce())
# the handler for the http server
http_handler = self.app.make_handler()
# create an asyncio server
self._http_server = self.loop.create_server(http_handler, host, port)
# grab the handler for the server callback
self._server_handler = self.loop.run_until_complete(self._http_server)
# start the event loop
self.loop.run_forever()
# if the user interrupted the server
except KeyboardInterrupt:
# keep going
pass
# when we're done
finally:
try:
# clean up the service
self.cleanup()
# if we end up closing before any variables get assigned
except UnboundLocalError:
# just ignore it (there was nothing to close)
pass
# close the event loop
self.loop.close() | This function starts the service's network intefaces.
Args:
port (int): The port for the http server. | entailment |
def cleanup(self):
"""
This function is called when the service has finished running
regardless of intentionally or not.
"""
# if an event broker has been created for this service
if self.event_broker:
# stop the event broker
self.event_broker.stop()
# attempt
try:
# close the http server
self._server_handler.close()
self.loop.run_until_complete(self._server_handler.wait_closed())
self.loop.run_until_complete(self._http_handler.finish_connections(shutdown_timeout))
# if there was no handler
except AttributeError:
# keep going
pass
# more cleanup
self.loop.run_until_complete(self.app.shutdown())
self.loop.run_until_complete(self.app.cleanup()) | This function is called when the service has finished running
regardless of intentionally or not. | entailment |
def add_http_endpoint(self, url, request_handler):
"""
This method provides a programatic way of added invidual routes
to the http server.
Args:
url (str): the url to be handled by the request_handler
request_handler (nautilus.network.RequestHandler): The request handler
"""
self.app.router.add_route('*', url, request_handler) | This method provides a programatic way of added invidual routes
to the http server.
Args:
url (str): the url to be handled by the request_handler
request_handler (nautilus.network.RequestHandler): The request handler | entailment |
def route(cls, route, config=None):
"""
This method provides a decorator for adding endpoints to the
http server.
Args:
route (str): The url to be handled by the RequestHandled
config (dict): Configuration for the request handler
Example:
.. code-block:: python
import nautilus
from nauilus.network.http import RequestHandler
class MyService(nautilus.Service):
# ...
@MyService.route('/')
class HelloWorld(RequestHandler):
def get(self):
return self.finish('hello world')
"""
def decorator(wrapped_class, **kwds):
# add the endpoint at the given route
cls._routes.append(
dict(url=route, request_handler=wrapped_class)
)
# return the class undecorated
return wrapped_class
# return the decorator
return decorator | This method provides a decorator for adding endpoints to the
http server.
Args:
route (str): The url to be handled by the RequestHandled
config (dict): Configuration for the request handler
Example:
.. code-block:: python
import nautilus
from nauilus.network.http import RequestHandler
class MyService(nautilus.Service):
# ...
@MyService.route('/')
class HelloWorld(RequestHandler):
def get(self):
return self.finish('hello world') | entailment |
def generate_session_token(secret_key, **payload):
"""
This function generates a session token signed by the secret key which
can be used to extract the user credentials in a verifiable way.
"""
return jwt.encode(payload, secret_key, algorithm=token_encryption_algorithm()).decode('utf-8') | This function generates a session token signed by the secret key which
can be used to extract the user credentials in a verifiable way. | entailment |
def summarize_mutation(mutation_name, event, inputs, outputs, isAsync=False):
"""
This function provides a standard representation of mutations to be
used when services announce themselves
"""
return dict(
name=mutation_name,
event=event,
isAsync=isAsync,
inputs=inputs,
outputs=outputs,
) | This function provides a standard representation of mutations to be
used when services announce themselves | entailment |
def new(cls, password, rounds):
"""Creates a PasswordHash from the given password."""
if isinstance(password, str):
password = password.encode('utf-8')
return cls(cls._new(password, rounds)) | Creates a PasswordHash from the given password. | entailment |
def coerce(cls, key, value):
"""Ensure that loaded values are PasswordHashes."""
if isinstance(value, PasswordHash):
return value
return super(PasswordHash, cls).coerce(key, value) | Ensure that loaded values are PasswordHashes. | entailment |
def rehash(self, password):
"""Recreates the internal hash."""
self.hash = self._new(password, self.desired_rounds)
self.rounds = self.desired_rounds | Recreates the internal hash. | entailment |
def init_db(self):
"""
This function configures the database used for models to make
the configuration parameters.
"""
# get the database url from the configuration
db_url = self.config.get('database_url', 'sqlite:///nautilus.db')
# configure the nautilus database to the url
nautilus.database.init_db(db_url) | This function configures the database used for models to make
the configuration parameters. | entailment |
def auth_criteria(self):
"""
This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements.
"""
# the dictionary we will return
auth = {}
# go over each attribute of the service
for attr in dir(self):
# make sure we could hit an infinite loop
if attr != 'auth_criteria':
# get the actual attribute
attribute = getattr(self, attr)
# if the service represents an auth criteria
if isinstance(attribute, Callable) and hasattr(attribute, '_service_auth'):
# add the criteria to the final results
auth[getattr(self, attr)._service_auth] = attribute
# return the auth mapping
return auth | This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements. | entailment |
async def login_user(self, password, **kwds):
"""
This function handles the registration of the given user credentials in the database
"""
# find the matching user with the given email
user_data = (await self._get_matching_user(fields=list(kwds.keys()), **kwds))['data']
try:
# look for a matching entry in the local database
passwordEntry = self.model.select().where(
self.model.user == user_data[root_query()][0]['pk']
)[0]
# if we couldn't acess the id of the result
except (KeyError, IndexError) as e:
# yell loudly
raise RuntimeError('Could not find matching registered user')
# if the given password matches the stored hash
if passwordEntry and passwordEntry.password == password:
# the remote entry for the user
user = user_data[root_query()][0]
# then return a dictionary with the user and sessionToken
return {
'user': user,
'sessionToken': self._user_session_token(user)
}
# otherwise the passwords don't match
raise RuntimeError("Incorrect credentials") | This function handles the registration of the given user credentials in the database | entailment |
async def register_user(self, password, **kwds):
"""
This function is used to provide a sessionToken for later requests.
Args:
uid (str): The
"""
# so make one
user = await self._create_remote_user(password=password, **kwds)
# if there is no pk field
if not 'pk' in user:
# make sure the user has a pk field
user['pk'] = user['id']
# the query to find a matching query
match_query = self.model.user == user['id']
# if the user has already been registered
if self.model.select().where(match_query).count() > 0:
# yell loudly
raise RuntimeError('The user is already registered.')
# create an entry in the user password table
password = self.model(user=user['id'], password=password)
# save it to the database
password.save()
# return a dictionary with the user we created and a session token for later use
return {
'user': user,
'sessionToken': self._user_session_token(user)
} | This function is used to provide a sessionToken for later requests.
Args:
uid (str): The | entailment |
async def object_resolver(self, object_name, fields, obey_auth=False, current_user=None, **filters):
"""
This function resolves a given object in the remote backend services
"""
try:
# check if an object with that name has been registered
registered = [model for model in self._external_service_data['models'] \
if model['name']==object_name][0]
# if there is no connection data yet
except AttributeError:
raise ValueError("No objects are registered with this schema yet.")
# if we dont recognize the model that was requested
except IndexError:
raise ValueError("Cannot query for object {} on this service.".format(object_name))
# the valid fields for this object
valid_fields = [field['name'] for field in registered['fields']]
# figure out if any invalid fields were requested
invalid_fields = [field for field in fields if field not in valid_fields]
try:
# make sure we never treat pk as invalid
invalid_fields.remove('pk')
# if they weren't asking for pk as a field
except ValueError:
pass
# if there were
if invalid_fields:
# yell loudly
raise ValueError("Cannot query for fields {!r} on {}".format(
invalid_fields, registered['name']
))
# make sure we include the id in the request
fields.append('pk')
# the query for model records
query = query_for_model(fields, **filters)
# the action type for the question
action_type = get_crud_action('read', object_name)
# query the appropriate stream for the information
response = await self.event_broker.ask(
action_type=action_type,
payload=query
)
# treat the reply like a json object
response_data = json.loads(response)
# if something went wrong
if 'errors' in response_data and response_data['errors']:
# return an empty response
raise ValueError(','.join(response_data['errors']))
# grab the valid list of matches
result = response_data['data'][root_query()]
# grab the auth handler for the object
auth_criteria = self.auth_criteria.get(object_name)
# if we care about auth requirements and there is one for this object
if obey_auth and auth_criteria:
# build a second list of authorized entries
authorized_results = []
# for each query result
for query_result in result:
# create a graph entity for the model
graph_entity = GraphEntity(self, model_type=object_name, id=query_result['pk'])
# if the auth handler passes
if await auth_criteria(model=graph_entity, user_id=current_user):
# add the result to the final list
authorized_results.append(query_result)
# overwrite the query result
result = authorized_results
# apply the auth handler to the result
return result | This function resolves a given object in the remote backend services | entailment |
async def mutation_resolver(self, mutation_name, args, fields):
"""
the default behavior for mutations is to look up the event,
publish the correct event type with the args as the body,
and return the fields contained in the result
"""
try:
# make sure we can identify the mutation
mutation_summary = [mutation for mutation in \
self._external_service_data['mutations'] \
if mutation['name'] == mutation_name][0]
# if we couldn't get the first entry in the list
except KeyError as e:
# make sure the error is reported
raise ValueError("Could not execute mutation named: " + mutation_name)
# the function to use for running the mutation depends on its schronicity
# event_function = self.event_broker.ask \
# if mutation_summary['isAsync'] else self.event_broker.send
event_function = self.event_broker.ask
# send the event and wait for a response
value = await event_function(
action_type=mutation_summary['event'],
payload=args
)
try:
# return a dictionary with the values we asked for
return json.loads(value)
# if the result was not valid json
except json.decoder.JSONDecodeError:
# just throw the value
raise RuntimeError(value) | the default behavior for mutations is to look up the event,
publish the correct event type with the args as the body,
and return the fields contained in the result | entailment |
async def _check_for_matching_user(self, **user_filters):
"""
This function checks if there is a user with the same uid in the
remote user service
Args:
**kwds : the filters of the user to check for
Returns:
(bool): wether or not there is a matching user
"""
# there is a matching user if there are no errors and no results from
user_data = self._get_matching_user(user_filters)
# return true if there were no errors and at lease one result
return not user_data['errors'] and len(user_data['data'][root_query()]) | This function checks if there is a user with the same uid in the
remote user service
Args:
**kwds : the filters of the user to check for
Returns:
(bool): wether or not there is a matching user | entailment |
async def _create_remote_user(self, **payload):
"""
This method creates a service record in the remote user service
with the given email.
Args:
uid (str): the user identifier to create
Returns:
(dict): a summary of the user that was created
"""
# the action for reading user entries
read_action = get_crud_action(method='create', model='user')
# see if there is a matching user
user_data = await self.event_broker.ask(
action_type=read_action,
payload=payload
)
# treat the reply like a json object
return json.loads(user_data) | This method creates a service record in the remote user service
with the given email.
Args:
uid (str): the user identifier to create
Returns:
(dict): a summary of the user that was created | entailment |
def calculate_wer(reference, hypothesis):
"""
Calculation of WER with Levenshtein distance.
Works only for iterables up to 254 elements (uint8).
O(nm) time and space complexity.
>>> calculate_wer("who is there".split(), "is there".split())
1
>>> calculate_wer("who is there".split(), "".split())
3
>>> calculate_wer("".split(), "who is there".split())
3
"""
# initialisation
import numpy
d = numpy.zeros((len(reference)+1)*(len(hypothesis)+1), dtype=numpy.uint8)
d = d.reshape((len(reference)+1, len(hypothesis)+1))
for i in range(len(reference)+1):
for j in range(len(hypothesis)+1):
if i == 0:
d[0][j] = j
elif j == 0:
d[i][0] = i
# computation
for i in range(1, len(reference)+1):
for j in range(1, len(hypothesis)+1):
if reference[i-1] == hypothesis[j-1]:
d[i][j] = d[i-1][j-1]
else:
substitution = d[i-1][j-1] + 1
insertion = d[i][j-1] + 1
deletion = d[i-1][j] + 1
d[i][j] = min(substitution, insertion, deletion)
return d[len(reference)][len(hypothesis)]/float(len(reference)) | Calculation of WER with Levenshtein distance.
Works only for iterables up to 254 elements (uint8).
O(nm) time and space complexity.
>>> calculate_wer("who is there".split(), "is there".split())
1
>>> calculate_wer("who is there".split(), "".split())
3
>>> calculate_wer("".split(), "who is there".split())
3 | entailment |
def get_parser():
"""Get a parser object"""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-s1", dest="s1", help="sequence 1")
parser.add_argument("-s2", dest="s2", help="sequence 2")
return parser | Get a parser object | entailment |
async def _async_request_soup(url):
'''
Perform a GET web request and return a bs4 parser
'''
from bs4 import BeautifulSoup
import aiohttp
_LOGGER.debug('GET %s', url)
async with aiohttp.ClientSession() as session:
resp = await session.get(url)
text = await resp.text()
return BeautifulSoup(text, 'html.parser') | Perform a GET web request and return a bs4 parser | entailment |
async def async_determine_channel(channel):
'''
Check whether the current channel is correct. If not try to determine it
using fuzzywuzzy
'''
from fuzzywuzzy import process
channel_data = await async_get_channels()
if not channel_data:
_LOGGER.error('No channel data. Cannot determine requested channel.')
return
channels = [c for c in channel_data.get('data', {}).keys()]
if channel in channels:
return channel
else:
res = process.extractOne(channel, channels)[0]
_LOGGER.debug('No direct match found for %s. Resort to guesswork.'
'Guessed %s', channel, res)
return res | Check whether the current channel is correct. If not try to determine it
using fuzzywuzzy | entailment |
async def async_get_channels(no_cache=False, refresh_interval=4):
'''
Get channel list and corresponding urls
'''
# Check cache
now = datetime.datetime.now()
max_cache_age = datetime.timedelta(hours=refresh_interval)
if not no_cache and 'channels' in _CACHE:
cache = _CACHE.get('channels')
cache_age = cache.get('last_updated')
if now - cache_age < max_cache_age:
_LOGGER.debug('Found channel list in cache.')
return cache
else:
_LOGGER.debug('Found outdated channel list in cache. Update it.')
_CACHE.pop('channels')
soup = await _async_request_soup(BASE_URL + '/plan.html')
channels = {}
for li_item in soup.find_all('li'):
try:
child = li_item.findChild()
if not child or child.name != 'a':
continue
href = child.get('href')
if not href or not href.startswith('/programme/chaine'):
continue
channels[child.get('title')] = BASE_URL + href
except Exception as exc:
_LOGGER.error('Exception occured while fetching the channel '
'list: %s', exc)
if channels:
_CACHE['channels'] = {'last_updated': now, 'data': channels}
return _CACHE['channels'] | Get channel list and corresponding urls | entailment |
def resize_program_image(img_url, img_size=300):
'''
Resize a program's thumbnail to the desired dimension
'''
match = re.match(r'.+/(\d+)x(\d+)/.+', img_url)
if not match:
_LOGGER.warning('Could not compute current image resolution of %s',
img_url)
return img_url
res_x = int(match.group(1))
res_y = int(match.group(2))
# aspect_ratio = res_x / res_y
target_res_y = int(img_size * res_y / res_x)
return re.sub(
r'{}x{}'.format(res_x, res_y),
r'{}x{}'.format(img_size, target_res_y),
img_url) | Resize a program's thumbnail to the desired dimension | entailment |
def get_current_program_progress(program):
'''
Get the current progress of the program in %
'''
now = datetime.datetime.now()
program_duration = get_program_duration(program)
if not program_duration:
return
progress = now - program.get('start_time')
return progress.seconds * 100 / program_duration | Get the current progress of the program in % | entailment |
def get_program_duration(program):
'''
Get a program's duration in seconds
'''
program_start = program.get('start_time')
program_end = program.get('end_time')
if not program_start or not program_end:
_LOGGER.error('Could not determine program start and/or end times.')
_LOGGER.debug('Program data: %s', program)
return
program_duration = program_end - program_start
return program_duration.seconds | Get a program's duration in seconds | entailment |
def get_remaining_time(program):
'''
Get the remaining time in seconds of a program that is currently on.
'''
now = datetime.datetime.now()
program_start = program.get('start_time')
program_end = program.get('end_time')
if not program_start or not program_end:
_LOGGER.error('Could not determine program start and/or end times.')
_LOGGER.debug('Program data: %s', program)
return
if now > program_end:
_LOGGER.error('The provided program has already ended.')
_LOGGER.debug('Program data: %s', program)
return 0
progress = now - program_start
return progress.seconds | Get the remaining time in seconds of a program that is currently on. | entailment |
def extract_program_summary(data):
'''
Extract the summary data from a program's detail page
'''
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, 'html.parser')
try:
return soup.find(
'div', {'class': 'episode-synopsis'}
).find_all('div')[-1].text.strip()
except Exception:
_LOGGER.info('No summary found for program: %s',
soup.find('a', {'class': 'prog_name'}))
return "No summary" | Extract the summary data from a program's detail page | entailment |
async def async_set_summary(program):
'''
Set a program's summary
'''
import aiohttp
async with aiohttp.ClientSession() as session:
resp = await session.get(program.get('url'))
text = await resp.text()
summary = extract_program_summary(text)
program['summary'] = summary
return program | Set a program's summary | entailment |
async def async_get_program_guide(channel, no_cache=False, refresh_interval=4):
'''
Get the program data for a channel
'''
chan = await async_determine_channel(channel)
now = datetime.datetime.now()
max_cache_age = datetime.timedelta(hours=refresh_interval)
if not no_cache and 'guide' in _CACHE and _CACHE.get('guide').get(chan):
cache = _CACHE.get('guide').get(chan)
cache_age = cache.get('last_updated')
if now - cache_age < max_cache_age:
_LOGGER.debug('Found program guide in cache.')
return cache.get('data')
else:
_LOGGER.debug('Found outdated program guide in cache. Update it.')
_CACHE['guide'].pop(chan)
chans = await async_get_channels()
url = chans.get('data', {}).get(chan)
if not url:
_LOGGER.error('Could not determine URL for %s', chan)
return
soup = await _async_request_soup(url)
programs = []
for prg_item in soup.find_all('div', {'class': 'program-infos'}):
try:
prog_info = prg_item.find('a', {'class': 'prog_name'})
prog_name = prog_info.text.strip()
prog_url = prog_info.get('href')
if not prog_url:
_LOGGER.warning('Failed to retrive the detail URL for program %s. '
'The summary will be empty', prog_name)
prog_type = prg_item.find('span', {'class': 'prog_type'}).text.strip()
prog_times = prg_item.find('div', {'class': 'prog_progress'})
prog_start = datetime.datetime.fromtimestamp(
int(prog_times.get('data-start')))
prog_end = datetime.datetime.fromtimestamp(
int(prog_times.get('data-end')))
img = prg_item.find_previous_sibling().find(
'img', {'class': 'prime_broadcast_image'})
prog_img = img.get('data-src') if img else None
programs.append(
{'name': prog_name, 'type': prog_type, 'img': prog_img,
'url': prog_url, 'summary': None, 'start_time': prog_start,
'end_time': prog_end})
except Exception as exc:
_LOGGER.error('Exception occured while fetching the program '
'guide for channel %s: %s', chan, exc)
import traceback
traceback.print_exc()
# Set the program summaries asynchronously
tasks = [async_set_summary(prog) for prog in programs]
programs = await asyncio.gather(*tasks)
if programs:
if 'guide' not in _CACHE:
_CACHE['guide'] = {}
_CACHE['guide'][chan] = {'last_updated': now, 'data': programs}
return programs | Get the program data for a channel | entailment |
async def async_get_current_program(channel, no_cache=False):
'''
Get the current program info
'''
chan = await async_determine_channel(channel)
guide = await async_get_program_guide(chan, no_cache)
if not guide:
_LOGGER.warning('Could not retrieve TV program for %s', channel)
return
now = datetime.datetime.now()
for prog in guide:
start = prog.get('start_time')
end = prog.get('end_time')
if now > start and now < end:
return prog | Get the current program info | entailment |
def publish(self, distribution, storage=""):
"""
Get or create publish
"""
try:
return self._publishes[distribution]
except KeyError:
self._publishes[distribution] = Publish(self.client, distribution, timestamp=self.timestamp, storage=(storage or self.storage))
return self._publishes[distribution] | Get or create publish | entailment |
def add(self, snapshot, distributions, component='main', storage=""):
""" Add mirror or repo to publish """
for dist in distributions:
self.publish(dist, storage=storage).add(snapshot, component) | Add mirror or repo to publish | entailment |
def _publish_match(self, publish, names=False, name_only=False):
"""
Check if publish name matches list of names or regex patterns
"""
if names:
for name in names:
if not name_only and isinstance(name, re._pattern_type):
if re.match(name, publish.name):
return True
else:
operand = name if name_only else [name, './%s' % name]
if publish in operand:
return True
return False
else:
return True | Check if publish name matches list of names or regex patterns | entailment |
def get_repo_information(config, client, fill_repo=False, components=[]):
""" fill two dictionnaries : one containing all the packages for every repository
and the second one associating to every component of every publish its repository"""
repo_dict = {}
publish_dict = {}
for origin in ['repo', 'mirror']:
for name, repo in config.get(origin, {}).items():
if components and repo.get('component') not in components:
continue
if fill_repo and origin == 'repo':
packages = Publish._get_packages("repos", name)
repo_dict[name] = packages
for distribution in repo.get('distributions'):
publish_name = str.join('/', distribution.split('/')[:-1])
publish_dict[(publish_name, repo.get('component'))] = name
return (repo_dict, publish_dict) | fill two dictionnaries : one containing all the packages for every repository
and the second one associating to every component of every publish its repository | entailment |
def compare(self, other, components=[]):
"""
Compare two publishes
It expects that other publish is same or older than this one
Return tuple (diff, equal) of dict {'component': ['snapshot']}
"""
lg.debug("Comparing publish %s (%s) and %s (%s)" % (self.name, self.storage or "local", other.name, other.storage or "local"))
diff, equal = ({}, {})
for component, snapshots in self.components.items():
if component not in list(other.components.keys()):
# Component is missing in other
diff[component] = snapshots
continue
equal_snapshots = list(set(snapshots).intersection(other.components[component]))
if equal_snapshots:
lg.debug("Equal snapshots for %s: %s" % (component, equal_snapshots))
equal[component] = equal_snapshots
diff_snapshots = list(set(snapshots).difference(other.components[component]))
if diff_snapshots:
lg.debug("Different snapshots for %s: %s" % (component, diff_snapshots))
diff[component] = diff_snapshots
return (diff, equal) | Compare two publishes
It expects that other publish is same or older than this one
Return tuple (diff, equal) of dict {'component': ['snapshot']} | entailment |
def _get_publish(self):
"""
Find this publish on remote
"""
publishes = self._get_publishes(self.client)
for publish in publishes:
if publish['Distribution'] == self.distribution and \
publish['Prefix'].replace("/", "_") == (self.prefix or '.') and \
publish['Storage'] == self.storage:
return publish
raise NoSuchPublish("Publish %s (%s) does not exist" % (self.name, self.storage or "local")) | Find this publish on remote | entailment |
def save_publish(self, save_path):
"""
Serialize publish in YAML
"""
timestamp = time.strftime("%Y%m%d%H%M%S")
yaml_dict = {}
yaml_dict["publish"] = self.name
yaml_dict["name"] = timestamp
yaml_dict["components"] = []
yaml_dict["storage"] = self.storage
for component, snapshots in self.components.items():
packages = self.get_packages(component)
package_dict = []
for package in packages:
(arch, name, version, ref) = self.parse_package_ref(package)
package_dict.append({'package': name, 'version': version, 'arch': arch, 'ref': ref})
snapshot = self._find_snapshot(snapshots[0])
yaml_dict["components"].append({'component': component, 'snapshot': snapshot['Name'],
'description': snapshot['Description'], 'packages': package_dict})
name = self.name.replace('/', '-')
lg.info("Saving publish %s in %s" % (name, save_path))
with open(save_path, 'w') as save_file:
yaml.dump(yaml_dict, save_file, default_flow_style=False) | Serialize publish in YAML | entailment |
def restore_publish(self, config, components, recreate=False):
"""
Restore publish from config file
"""
if "all" in components:
components = []
try:
self.load()
publish = True
except NoSuchPublish:
publish = False
new_publish_snapshots = []
to_publish = []
created_snapshots = []
for saved_component in config.get('components', []):
component_name = saved_component.get('component')
if not component_name:
raise Exception("Corrupted file")
if components and component_name not in components:
continue
saved_packages = []
if not saved_component.get('packages'):
raise Exception("Component %s is empty" % component_name)
for package in saved_component.get('packages'):
package_ref = '{} {} {} {}'.format(package.get('arch'), package.get('package'), package.get('version'), package.get('ref'))
saved_packages.append(package_ref)
to_publish.append(component_name)
timestamp = time.strftime("%Y%m%d%H%M%S")
snapshot_name = '{}-{}-{}'.format("restored", timestamp, saved_component.get('snapshot'))
lg.debug("Creating snapshot %s for component %s of packages: %s"
% (snapshot_name, component_name, saved_packages))
try:
self.client.do_post(
'/snapshots',
data={
'Name': snapshot_name,
'SourceSnapshots': [],
'Description': saved_component.get('description'),
'PackageRefs': saved_packages,
}
)
created_snapshots.append(snapshot_name)
except AptlyException as e:
if e.res.status_code == 404:
# delete all the previously created
# snapshots because the file is corrupted
self._remove_snapshots(created_snapshots)
raise Exception("Source snapshot or packages don't exist")
else:
raise
new_publish_snapshots.append({
'Component': component_name,
'Name': snapshot_name
})
if components:
self.publish_snapshots = [x for x in self.publish_snapshots if x['Component'] not in components and x['Component'] not in to_publish]
check_components = [x for x in new_publish_snapshots if x['Component'] in components]
if len(check_components) != len(components):
self._remove_snapshots(created_snapshots)
raise Exception("Not possible to find all the components required in the backup file")
self.publish_snapshots += new_publish_snapshots
self.do_publish(recreate=recreate, merge_snapshots=False) | Restore publish from config file | entailment |
def load(self):
"""
Load publish info from remote
"""
publish = self._get_publish()
self.architectures = publish['Architectures']
for source in publish['Sources']:
component = source['Component']
snapshot = source['Name']
self.publish_snapshots.append({
'Component': component,
'Name': snapshot
})
snapshot_remote = self._find_snapshot(snapshot)
for source in self._get_source_snapshots(snapshot_remote, fallback_self=True):
self.add(source, component) | Load publish info from remote | entailment |
def get_packages(self, component=None, components=[], packages=None):
"""
Return package refs for given components
"""
if component:
components = [component]
package_refs = []
for snapshot in self.publish_snapshots:
if component and snapshot['Component'] not in components:
# We don't want packages for this component
continue
component_refs = self._get_packages(self.client, "snapshots", snapshot['Name'])
if packages:
# Filter package names
for ref in component_refs:
if self.parse_package_ref(ref)[1] in packages:
package_refs.append(ref)
else:
package_refs.extend(component_refs)
return package_refs | Return package refs for given components | entailment |
def parse_package_ref(self, ref):
"""
Return tuple of architecture, package_name, version, id
"""
if not ref:
return None
parsed = re.match('(.*)\ (.*)\ (.*)\ (.*)', ref)
return parsed.groups() | Return tuple of architecture, package_name, version, id | entailment |
def add(self, snapshot, component='main'):
"""
Add snapshot of component to publish
"""
try:
self.components[component].append(snapshot)
except KeyError:
self.components[component] = [snapshot] | Add snapshot of component to publish | entailment |
def _find_snapshot(self, name):
"""
Find snapshot on remote by name or regular expression
"""
remote_snapshots = self._get_snapshots(self.client)
for remote in reversed(remote_snapshots):
if remote["Name"] == name or \
re.match(name, remote["Name"]):
return remote
return None | Find snapshot on remote by name or regular expression | entailment |
def _get_source_snapshots(self, snapshot, fallback_self=False):
"""
Get list of source snapshot names of given snapshot
TODO: we have to decide by description at the moment
"""
if not snapshot:
return []
source_snapshots = re.findall(r"'([\w\d\.-]+)'", snapshot['Description'])
if not source_snapshots and fallback_self:
source_snapshots = [snapshot['Name']]
source_snapshots.sort()
return source_snapshots | Get list of source snapshot names of given snapshot
TODO: we have to decide by description at the moment | entailment |
def merge_snapshots(self):
"""
Create component snapshots by merging other snapshots of same component
"""
self.publish_snapshots = []
for component, snapshots in self.components.items():
if len(snapshots) <= 1:
# Only one snapshot, no need to merge
lg.debug("Component %s has only one snapshot %s, not creating merge snapshot" % (component, snapshots))
self.publish_snapshots.append({
'Component': component,
'Name': snapshots[0]
})
continue
# Look if merged snapshot doesn't already exist
remote_snapshot = self._find_snapshot(r'^%s%s-%s-\d+' % (self.merge_prefix, self.name.replace('./', '').replace('/', '-'), component))
if remote_snapshot:
source_snapshots = self._get_source_snapshots(remote_snapshot)
# Check if latest merged snapshot has same source snapshots like us
snapshots_want = list(snapshots)
snapshots_want.sort()
lg.debug("Comparing snapshots: snapshot_name=%s, snapshot_sources=%s, wanted_sources=%s" % (remote_snapshot['Name'], source_snapshots, snapshots_want))
if snapshots_want == source_snapshots:
lg.info("Remote merge snapshot already exists: %s (%s)" % (remote_snapshot['Name'], source_snapshots))
self.publish_snapshots.append({
'Component': component,
'Name': remote_snapshot['Name']
})
continue
snapshot_name = '%s%s-%s-%s' % (self.merge_prefix, self.name.replace('./', '').replace('/', '-'), component, self.timestamp)
lg.info("Creating merge snapshot %s for component %s of snapshots %s" % (snapshot_name, component, snapshots))
package_refs = []
for snapshot in snapshots:
# Get package refs from each snapshot
packages = self._get_packages(self.client, "snapshots", snapshot)
package_refs.extend(packages)
try:
self.client.do_post(
'/snapshots',
data={
'Name': snapshot_name,
'SourceSnapshots': snapshots,
'Description': "Merged from sources: %s" % ', '.join("'%s'" % snap for snap in snapshots),
'PackageRefs': package_refs,
}
)
except AptlyException as e:
if e.res.status_code == 400:
lg.warning("Error creating snapshot %s, assuming it already exists" % snapshot_name)
else:
raise
self.publish_snapshots.append({
'Component': component,
'Name': snapshot_name
}) | Create component snapshots by merging other snapshots of same component | entailment |
def timing_decorator(func):
"""Prints the time func takes to execute."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Wrapper for printing execution time.
Parameters
----------
print_time: bool, optional
whether or not to print time function takes.
"""
print_time = kwargs.pop('print_time', False)
if not print_time:
return func(*args, **kwargs)
else:
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(func.__name__ + ' took %.3f seconds' %
(end_time - start_time))
return result
return wrapper | Prints the time func takes to execute. | entailment |
def save_load_result(func):
"""Saves and/or loads func output (must be picklable)."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Default behavior is no saving and loading. Specify save_name to save
and load.
Parameters
----------
save_name: str, optional
File name including directory and excluding extension.
save: bool, optional
Whether or not to save.
load: bool, optional
Whether or not to load.
overwrite existing: bool, optional
When the save path already contains file: if True, file will be
overwritten, if False the data will be saved with the system time
appended to the file name.
warn_if_error: bool, optional
Whether or not to issue UserWarning if load=True and save_name
is not None but there is an error loading.
Returns
-------
Result
func output.
"""
save_name = kwargs.pop('save_name', None)
save = kwargs.pop('save', save_name is not None)
load = kwargs.pop('load', save_name is not None)
overwrite_existing = kwargs.pop('overwrite_existing', True)
warn_if_error = kwargs.pop('warn_if_error', False)
if load:
if save_name is None:
warnings.warn(
('{} has load=True but cannot load because '
'save_name=None'.format(func.__name__)),
UserWarning)
else:
try:
return pickle_load(save_name)
except (OSError, IOError) as err:
if warn_if_error:
msg = ('{} had {} loading file {}.'.format(
func.__name__, type(err).__name__, save_name))
msg = ' Continuing without loading.'
warnings.warn(msg, UserWarning)
result = func(*args, **kwargs)
if save:
if save_name is None:
warnings.warn((func.__name__ + ' has save=True but cannot ' +
'save because save_name=None'), UserWarning)
else:
pickle_save(result, save_name,
overwrite_existing=overwrite_existing)
return result
return wrapper | Saves and/or loads func output (must be picklable). | entailment |
def pickle_save(data, name, **kwargs):
"""Saves object with pickle.
Parameters
----------
data: anything picklable
Object to save.
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
overwrite existing: bool, optional
When the save path already contains file: if True, file will be
overwritten, if False the data will be saved with the system time
appended to the file name.
"""
extension = kwargs.pop('extension', '.pkl')
overwrite_existing = kwargs.pop('overwrite_existing', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
filename = name + extension
# Check if the target directory exists and if not make it
dirname = os.path.dirname(filename)
if not os.path.exists(dirname) and dirname != '':
os.makedirs(dirname)
if os.path.isfile(filename) and not overwrite_existing:
print(filename + ' already exists! Saving with time appended')
filename = name + '_' + time.asctime().replace(' ', '_')
filename += extension
# check if permission error is defined (was not before python 3.3)
# and otherwise use IOError
try:
PermissionError
except NameError:
PermissionError = IOError
try:
outfile = open(filename, 'wb')
pickle.dump(data, outfile)
outfile.close()
except (MemoryError, PermissionError) as err:
warnings.warn((type(err).__name__ + ' in pickle_save: continue without'
' saving.'), UserWarning) | Saves object with pickle.
Parameters
----------
data: anything picklable
Object to save.
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
overwrite existing: bool, optional
When the save path already contains file: if True, file will be
overwritten, if False the data will be saved with the system time
appended to the file name. | entailment |
def pickle_load(name, extension='.pkl'):
"""Load data with pickle.
Parameters
----------
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
Returns
-------
Contents of file path.
"""
filename = name + extension
infile = open(filename, 'rb')
data = pickle.load(infile)
infile.close()
return data | Load data with pickle.
Parameters
----------
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
Returns
-------
Contents of file path. | entailment |
def bootstrap_resample_run(ns_run, threads=None, ninit_sep=False,
random_seed=False):
"""Bootstrap resamples threads of nested sampling run, returning a new
(resampled) nested sampling run.
Get the individual threads for a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
threads: None or list of numpy arrays, optional
ninit_sep: bool
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
random_seed: None, bool or int, optional
Set numpy random seed. Default is to use None (so a random seed is
chosen from the computer's internal state) to ensure reliable results
when multiprocessing. Can set to an integer or to False to not edit the
seed.
Returns
-------
ns_run_temp: dict
Nested sampling run dictionary.
"""
if random_seed is not False:
# save the random state so we don't affect other bits of the code
state = np.random.get_state()
np.random.seed(random_seed)
if threads is None:
threads = nestcheck.ns_run_utils.get_run_threads(ns_run)
n_threads = len(threads)
if ninit_sep:
try:
ninit = ns_run['settings']['ninit']
assert np.all(ns_run['thread_min_max'][:ninit, 0] == -np.inf), (
'ninit_sep assumes the initial threads are labeled '
'(0,...,ninit-1), so these should start by sampling the whole '
'prior.')
inds = np.random.randint(0, ninit, ninit)
inds = np.append(inds, np.random.randint(ninit, n_threads,
n_threads - ninit))
except KeyError:
warnings.warn((
'bootstrap_resample_run has kwarg ninit_sep=True but '
'ns_run["settings"]["ninit"] does not exist. Doing bootstrap '
'with ninit_sep=False'), UserWarning)
ninit_sep = False
if not ninit_sep:
inds = np.random.randint(0, n_threads, n_threads)
threads_temp = [threads[i] for i in inds]
resampled_run = nestcheck.ns_run_utils.combine_threads(threads_temp)
try:
resampled_run['settings'] = ns_run['settings']
except KeyError:
pass
if random_seed is not False:
# if we have used a random seed then return to the original state
np.random.set_state(state)
return resampled_run | Bootstrap resamples threads of nested sampling run, returning a new
(resampled) nested sampling run.
Get the individual threads for a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
threads: None or list of numpy arrays, optional
ninit_sep: bool
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
random_seed: None, bool or int, optional
Set numpy random seed. Default is to use None (so a random seed is
chosen from the computer's internal state) to ensure reliable results
when multiprocessing. Can set to an integer or to False to not edit the
seed.
Returns
-------
ns_run_temp: dict
Nested sampling run dictionary. | entailment |
def run_std_bootstrap(ns_run, estimator_list, **kwargs):
"""
Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
kwargs: dict
kwargs for run_bootstrap_values
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list.
"""
bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)
stds = np.zeros(bs_values.shape[0])
for j, _ in enumerate(stds):
stds[j] = np.std(bs_values[j, :], ddof=1)
return stds | Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
kwargs: dict
kwargs for run_bootstrap_values
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list. | entailment |
def run_bootstrap_values(ns_run, estimator_list, **kwargs):
"""Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
ninit_sep: bool, optional
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
flip_skew: bool, optional
Determine if distribution of bootstrap values should be flipped about
its mean to better represent our probability distribution on the true
value - see "Bayesian astrostatistics: a backward look to the future"
(Loredo, 2012 Figure 2) for an explanation.
If true, the samples :math:`X` are mapped to :math:`2 \mu - X`, where
:math:`\mu` is the mean sample value.
This leaves the mean and standard deviation unchanged.
random_seeds: list, optional
list of random_seed arguments for bootstrap_resample_run.
Defaults to range(n_simulate) in order to give reproducible results.
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list.
"""
ninit_sep = kwargs.pop('ninit_sep', False)
flip_skew = kwargs.pop('flip_skew', True)
n_simulate = kwargs.pop('n_simulate') # No default, must specify
random_seeds = kwargs.pop('random_seeds', range(n_simulate))
assert len(random_seeds) == n_simulate
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = nestcheck.ns_run_utils.get_run_threads(ns_run)
bs_values = np.zeros((len(estimator_list), n_simulate))
for i, random_seed in enumerate(random_seeds):
ns_run_temp = bootstrap_resample_run(
ns_run, threads=threads, ninit_sep=ninit_sep,
random_seed=random_seed)
bs_values[:, i] = nestcheck.ns_run_utils.run_estimators(
ns_run_temp, estimator_list)
del ns_run_temp
if flip_skew:
estimator_means = np.mean(bs_values, axis=1)
for i, mu in enumerate(estimator_means):
bs_values[i, :] = (2 * mu) - bs_values[i, :]
return bs_values | Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
ninit_sep: bool, optional
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
flip_skew: bool, optional
Determine if distribution of bootstrap values should be flipped about
its mean to better represent our probability distribution on the true
value - see "Bayesian astrostatistics: a backward look to the future"
(Loredo, 2012 Figure 2) for an explanation.
If true, the samples :math:`X` are mapped to :math:`2 \mu - X`, where
:math:`\mu` is the mean sample value.
This leaves the mean and standard deviation unchanged.
random_seeds: list, optional
list of random_seed arguments for bootstrap_resample_run.
Defaults to range(n_simulate) in order to give reproducible results.
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list. | entailment |
def run_ci_bootstrap(ns_run, estimator_list, **kwargs):
"""Uses bootstrap resampling to calculate credible intervals on the
distribution of sampling errors (the uncertainty on the calculation)
for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
cred_int: float
n_simulate: int
ninit_sep: bool, optional
Returns
-------
output: 1d numpy array
Credible interval on sampling error on calculation result for each
estimator in estimator_list.
"""
cred_int = kwargs.pop('cred_int') # No default, must specify
bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)
# estimate specific confidence intervals
# formulae for alpha CI on estimator T = 2 T(x) - G^{-1}(T(x*))
# where G is the CDF of the bootstrap resamples
expected_estimators = nestcheck.ns_run_utils.run_estimators(
ns_run, estimator_list)
cdf = ((np.asarray(range(bs_values.shape[1])) + 0.5) /
bs_values.shape[1])
ci_output = expected_estimators * 2
for i, _ in enumerate(ci_output):
ci_output[i] -= np.interp(
1. - cred_int, cdf, np.sort(bs_values[i, :]))
return ci_output | Uses bootstrap resampling to calculate credible intervals on the
distribution of sampling errors (the uncertainty on the calculation)
for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
cred_int: float
n_simulate: int
ninit_sep: bool, optional
Returns
-------
output: 1d numpy array
Credible interval on sampling error on calculation result for each
estimator in estimator_list. | entailment |
def run_std_simulate(ns_run, estimator_list, n_simulate=None):
"""Uses the 'simulated weights' method to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
Note that the simulated weights method is not accurate for parameter
estimation calculations.
For more details about the simulated weights method for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list.
"""
assert n_simulate is not None, 'run_std_simulate: must specify n_simulate'
all_values = np.zeros((len(estimator_list), n_simulate))
for i in range(n_simulate):
all_values[:, i] = nestcheck.ns_run_utils.run_estimators(
ns_run, estimator_list, simulate=True)
stds = np.zeros(all_values.shape[0])
for i, _ in enumerate(stds):
stds[i] = np.std(all_values[i, :], ddof=1)
return stds | Uses the 'simulated weights' method to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
Note that the simulated weights method is not accurate for parameter
estimation calculations.
For more details about the simulated weights method for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list. | entailment |
def implementation_std(vals_std, vals_std_u, bs_std, bs_std_u, **kwargs):
r"""Estimates varaition of results due to implementation-specific
effects. See 'nestcheck: diagnostic tests for nested sampling calculations'
(Higson et al. 2019) for more details.
Uncertainties on the output are calculated numerically using the fact
that (from central limit theorem) our uncertainties on vals_std and
bs_std are (approximately) normally distributed. This is needed as
results from standard error propagation techniques are not valid when
the uncertainties are not small compared to the result.
Parameters
----------
vals_std: numpy array
Standard deviations of results from repeated calculations.
vals_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
bs_std: numpy array
Bootstrap error estimates. Each element should correspond to the same
element in vals_std.
bs_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
nsim: int, optional
Number of simulations to use to numerically calculate the uncertainties
on the estimated implementation-specific effects.
random_seed: int or None, optional
Numpy random seed. Use to get reproducible uncertainties on the output.
Returns
-------
imp_std: numpy array
Estimated standard deviation of results due to implementation-specific
effects.
imp_std_u: numpy array
:math:`1\sigma` uncertainties on imp_std.
imp_frac: numpy array
imp_std as a fraction of vals_std.
imp_frac_u:
:math:`1\sigma` uncertainties on imp_frac.
"""
nsim = kwargs.pop('nsim', 1000000)
random_seed = kwargs.pop('random_seed', 0)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
# if the implementation errors are uncorrelated with the
# sampling errrors: var results = var imp + var sampling
# so std imp = sqrt(var results - var sampling)
imp_var = (vals_std ** 2) - (bs_std ** 2)
imp_std = np.sqrt(np.abs(imp_var)) * np.sign(imp_var)
ind = np.where(imp_std <= 0)[0]
imp_std[ind] = 0
imp_std_u = np.zeros(imp_std.shape)
imp_frac = imp_std / vals_std
imp_frac_u = np.zeros(imp_frac.shape)
# Simulate errors distributions
for i, _ in enumerate(imp_std_u):
state = np.random.get_state()
np.random.seed(random_seed)
sim_vals_std = np.random.normal(vals_std[i], vals_std_u[i], size=nsim)
sim_bs_std = np.random.normal(bs_std[i], bs_std_u[i], size=nsim)
sim_imp_var = (sim_vals_std ** 2) - (sim_bs_std ** 2)
sim_imp_std = np.sqrt(np.abs(sim_imp_var)) * np.sign(sim_imp_var)
imp_std_u[i] = np.std(sim_imp_std, ddof=1)
imp_frac_u[i] = np.std((sim_imp_std / sim_vals_std), ddof=1)
np.random.set_state(state)
return imp_std, imp_std_u, imp_frac, imp_frac_u | r"""Estimates varaition of results due to implementation-specific
effects. See 'nestcheck: diagnostic tests for nested sampling calculations'
(Higson et al. 2019) for more details.
Uncertainties on the output are calculated numerically using the fact
that (from central limit theorem) our uncertainties on vals_std and
bs_std are (approximately) normally distributed. This is needed as
results from standard error propagation techniques are not valid when
the uncertainties are not small compared to the result.
Parameters
----------
vals_std: numpy array
Standard deviations of results from repeated calculations.
vals_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
bs_std: numpy array
Bootstrap error estimates. Each element should correspond to the same
element in vals_std.
bs_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
nsim: int, optional
Number of simulations to use to numerically calculate the uncertainties
on the estimated implementation-specific effects.
random_seed: int or None, optional
Numpy random seed. Use to get reproducible uncertainties on the output.
Returns
-------
imp_std: numpy array
Estimated standard deviation of results due to implementation-specific
effects.
imp_std_u: numpy array
:math:`1\sigma` uncertainties on imp_std.
imp_frac: numpy array
imp_std as a fraction of vals_std.
imp_frac_u:
:math:`1\sigma` uncertainties on imp_frac. | entailment |
def run_thread_values(run, estimator_list):
"""Helper function for parallelising thread_values_df.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions
Returns
-------
vals_array: numpy array
Array of estimator values for each thread.
Has shape (len(estimator_list), len(theads)).
"""
threads = nestcheck.ns_run_utils.get_run_threads(run)
vals_list = [nestcheck.ns_run_utils.run_estimators(th, estimator_list)
for th in threads]
vals_array = np.stack(vals_list, axis=1)
assert vals_array.shape == (len(estimator_list), len(threads))
return vals_array | Helper function for parallelising thread_values_df.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions
Returns
-------
vals_array: numpy array
Array of estimator values for each thread.
Has shape (len(estimator_list), len(theads)). | entailment |
def pairwise_distances(dist_list, earth_mover_dist=True, energy_dist=True):
"""Applies statistical_distances to each unique pair of distribution
samples in dist_list.
Parameters
----------
dist_list: list of 1d arrays
earth_mover_dist: bool, optional
Passed to statistical_distances.
energy_dist: bool, optional
Passed to statistical_distances.
Returns
-------
ser: pandas Series object
Values are statistical distances. Index levels are:
calculation type: name of statistical distance.
run: tuple containing the index in dist_list of the pair of samples
arrays from which the statistical distance was computed.
"""
out = []
index = []
for i, samp_i in enumerate(dist_list):
for j, samp_j in enumerate(dist_list):
if j < i:
index.append(str((i, j)))
out.append(statistical_distances(
samp_i, samp_j, earth_mover_dist=earth_mover_dist,
energy_dist=energy_dist))
columns = ['ks pvalue', 'ks distance']
if earth_mover_dist:
columns.append('earth mover distance')
if energy_dist:
columns.append('energy distance')
ser = pd.DataFrame(out, index=index, columns=columns).unstack()
ser.index.names = ['calculation type', 'run']
return ser | Applies statistical_distances to each unique pair of distribution
samples in dist_list.
Parameters
----------
dist_list: list of 1d arrays
earth_mover_dist: bool, optional
Passed to statistical_distances.
energy_dist: bool, optional
Passed to statistical_distances.
Returns
-------
ser: pandas Series object
Values are statistical distances. Index levels are:
calculation type: name of statistical distance.
run: tuple containing the index in dist_list of the pair of samples
arrays from which the statistical distance was computed. | entailment |
def statistical_distances(samples1, samples2, earth_mover_dist=True,
energy_dist=True):
"""Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array
"""
out = []
temp = scipy.stats.ks_2samp(samples1, samples2)
out.append(temp.pvalue)
out.append(temp.statistic)
if earth_mover_dist:
out.append(scipy.stats.wasserstein_distance(samples1, samples2))
if energy_dist:
out.append(scipy.stats.energy_distance(samples1, samples2))
return np.asarray(out) | Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array | entailment |
def get_dummy_thread(nsamples, **kwargs):
"""Generate dummy data for a single nested sampling thread.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if seed is not False:
np.random.seed(seed)
thread = {'logl': np.sort(np.random.random(nsamples)) * logl_range,
'nlive_array': np.full(nsamples, 1.),
'theta': np.random.random((nsamples, ndim)),
'thread_labels': np.zeros(nsamples).astype(int)}
if logl_start != -np.inf:
thread['logl'] += logl_start
thread['thread_min_max'] = np.asarray([[logl_start, thread['logl'][-1]]])
return thread | Generate dummy data for a single nested sampling thread.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. | entailment |
def get_dummy_run(nthread, nsamples, **kwargs):
"""Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = []
# set seed before generating any threads and do not reset for each thread
if seed is not False:
np.random.seed(seed)
threads = []
for _ in range(nthread):
threads.append(get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=logl_start,
logl_range=logl_range))
# Sort threads in order of starting logl so labels match labels that would
# have been given processing a dead points array. N.B. this only works when
# all threads have same start_logl
threads = sorted(threads, key=lambda th: th['logl'][0])
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
# Use combine_ns_runs rather than combine threads as this relabels the
# threads according to their order
return nestcheck.ns_run_utils.combine_threads(threads) | Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. | entailment |
def get_dummy_dynamic_run(nsamples, **kwargs):
"""Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
nthread_init = kwargs.pop('nthread_init', 2)
nthread_dyn = kwargs.pop('nthread_dyn', 3)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed,
logl_start=-np.inf, logl_range=logl_range)
dyn_starts = list(np.random.choice(
init['logl'], nthread_dyn, replace=True))
threads = nestcheck.ns_run_utils.get_run_threads(init)
# Seed must be False here so it is not set again for each thread
threads += [get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=start,
logl_range=logl_range) for start in dyn_starts]
# make sure the threads have unique labels and combine them
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
run = nestcheck.ns_run_utils.combine_threads(threads)
# To make sure the thread labelling is same way it would when
# processing a dead points file, tranform into dead points
samples = nestcheck.write_polychord_output.run_dead_birth_array(run)
return nestcheck.data_processing.process_samples_array(samples) | Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. | entailment |
def get_long_description():
"""Get PyPI long description from the .rst file."""
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, '.pypi_long_desc.rst')) as readme_file:
long_description = readme_file.read()
return long_description | Get PyPI long description from the .rst file. | entailment |
def get_version():
"""Get single-source __version__."""
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, 'nestcheck/_version.py')) as ver_file:
string = ver_file.read()
return string.strip().replace('__version__ = ', '').replace('\'', '') | Get single-source __version__. | entailment |
def plot_run_nlive(method_names, run_dict, **kwargs):
"""Plot the allocations of live points as a function of logX for the input
sets of nested sampling runs of the type used in the dynamic nested
sampling paper (Higson et al. 2019).
Plots also include analytically calculated distributions of relative
posterior mass and relative posterior mass remaining.
Parameters
----------
method_names: list of strs
run_dict: dict of lists of nested sampling runs.
Keys of run_dict must be method_names.
logx_given_logl: function, optional
For mapping points' logl values to logx values.
If not specified the logx coordinates for each run are estimated using
its numbers of live points.
logl_given_logx: function, optional
For calculating the relative posterior mass and posterior mass
remaining at each logx coordinate.
logx_min: float, optional
Lower limit of logx axis. If not specified this is set to the lowest
logx reached by any of the runs.
ymax: bool, optional
Maximum value for plot's nlive axis (yaxis).
npoints: int, optional
Number of points to have in the fgivenx plot grids.
figsize: tuple, optional
Size of figure in inches.
post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
posterior mass curve. If None, all runs are used.
cum_post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
cumulative posterior mass remaining curve. If None, all runs are used.
Returns
-------
fig: matplotlib figure
"""
logx_given_logl = kwargs.pop('logx_given_logl', None)
logl_given_logx = kwargs.pop('logl_given_logx', None)
logx_min = kwargs.pop('logx_min', None)
ymax = kwargs.pop('ymax', None)
npoints = kwargs.pop('npoints', 100)
figsize = kwargs.pop('figsize', (6.4, 2))
post_mass_norm = kwargs.pop('post_mass_norm', None)
cum_post_mass_norm = kwargs.pop('cum_post_mass_norm', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert set(method_names) == set(run_dict.keys()), (
'input method names=' + str(method_names) + ' do not match run_dict '
'keys=' + str(run_dict.keys()))
# Plotting
# --------
fig = plt.figure(figsize=figsize)
ax = plt.gca()
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Reserve colors for certain common method_names so they are always the
# same regardless of method_name order for consistency in the paper.
linecolor_dict = {'standard': colors[2],
'dynamic $G=0$': colors[8],
'dynamic $G=1$': colors[9]}
ax.set_prop_cycle('color', [colors[i] for i in [4, 1, 6, 0, 3, 5, 7]])
integrals_dict = {}
logx_min_list = []
for method_name in method_names:
integrals = np.zeros(len(run_dict[method_name]))
for nr, run in enumerate(run_dict[method_name]):
if 'logx' in run:
logx = run['logx']
elif logx_given_logl is not None:
logx = logx_given_logl(run['logl'])
else:
logx = nestcheck.ns_run_utils.get_logx(
run['nlive_array'], simulate=False)
logx_min_list.append(logx[-1])
logx[0] = 0 # to make lines extend all the way to the end
if nr == 0:
# Label the first line and store it so we can access its color
try:
line, = ax.plot(logx, run['nlive_array'], linewidth=1,
label=method_name,
color=linecolor_dict[method_name])
except KeyError:
line, = ax.plot(logx, run['nlive_array'], linewidth=1,
label=method_name)
else:
# Set other lines to same color and don't add labels
ax.plot(logx, run['nlive_array'], linewidth=1,
color=line.get_color())
# for normalising analytic weight lines
integrals[nr] = -np.trapz(run['nlive_array'], x=logx)
integrals_dict[method_name] = integrals[np.isfinite(integrals)]
# if not specified, set logx min to the lowest logx reached by a run
if logx_min is None:
logx_min = np.asarray(logx_min_list).min()
if logl_given_logx is not None:
# Plot analytic posterior mass and cumulative posterior mass
logx_plot = np.linspace(logx_min, 0, npoints)
logl = logl_given_logx(logx_plot)
# Remove any NaNs
logx_plot = logx_plot[np.where(~np.isnan(logl))[0]]
logl = logl[np.where(~np.isnan(logl))[0]]
w_an = rel_posterior_mass(logx_plot, logl)
# Try normalising the analytic distribution of posterior mass to have
# the same area under the curve as the runs with dynamic_goal=1 (the
# ones which we want to compare to it). If they are not available just
# normalise it to the average area under all the runs (which should be
# about the same if they have the same number of samples).
w_an *= average_by_key(integrals_dict, post_mass_norm)
ax.plot(logx_plot, w_an,
linewidth=2, label='relative posterior mass',
linestyle=':', color='k')
# plot cumulative posterior mass
w_an_c = np.cumsum(w_an)
w_an_c /= np.trapz(w_an_c, x=logx_plot)
# Try normalising the cumulative distribution of posterior mass to have
# the same area under the curve as the runs with dynamic_goal=0 (the
# ones which we want to compare to it). If they are not available just
# normalise it to the average area under all the runs (which should be
# about the same if they have the same number of samples).
w_an_c *= average_by_key(integrals_dict, cum_post_mass_norm)
ax.plot(logx_plot, w_an_c, linewidth=2, linestyle='--', dashes=(2, 3),
label='posterior mass remaining', color='darkblue')
ax.set_ylabel('number of live points')
ax.set_xlabel(r'$\log X $')
# set limits
if ymax is not None:
ax.set_ylim([0, ymax])
else:
ax.set_ylim(bottom=0)
ax.set_xlim([logx_min, 0])
ax.legend()
return fig | Plot the allocations of live points as a function of logX for the input
sets of nested sampling runs of the type used in the dynamic nested
sampling paper (Higson et al. 2019).
Plots also include analytically calculated distributions of relative
posterior mass and relative posterior mass remaining.
Parameters
----------
method_names: list of strs
run_dict: dict of lists of nested sampling runs.
Keys of run_dict must be method_names.
logx_given_logl: function, optional
For mapping points' logl values to logx values.
If not specified the logx coordinates for each run are estimated using
its numbers of live points.
logl_given_logx: function, optional
For calculating the relative posterior mass and posterior mass
remaining at each logx coordinate.
logx_min: float, optional
Lower limit of logx axis. If not specified this is set to the lowest
logx reached by any of the runs.
ymax: bool, optional
Maximum value for plot's nlive axis (yaxis).
npoints: int, optional
Number of points to have in the fgivenx plot grids.
figsize: tuple, optional
Size of figure in inches.
post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
posterior mass curve. If None, all runs are used.
cum_post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
cumulative posterior mass remaining curve. If None, all runs are used.
Returns
-------
fig: matplotlib figure | entailment |
def kde_plot_df(df, xlims=None, **kwargs):
"""Plots kde estimates of distributions of samples in each cell of the
input pandas DataFrame.
There is one subplot for each dataframe column, and on each subplot there
is one kde line.
Parameters
----------
df: pandas data frame
Each cell must contain a 1d numpy array of samples.
xlims: dict, optional
Dictionary of xlimits - keys are column names and values are lists of
length 2.
num_xticks: int, optional
Number of xticks on each subplot.
figsize: tuple, optional
Size of figure in inches.
nrows: int, optional
Number of rows of subplots.
ncols: int, optional
Number of columns of subplots.
normalize: bool, optional
If true, kde plots are normalized to have the same area under their
curves. If False, their max value is set to 1.
legend: bool, optional
Should a legend be added?
legend_kwargs: dict, optional
Additional kwargs for legend.
Returns
-------
fig: matplotlib figure
"""
assert xlims is None or isinstance(xlims, dict)
figsize = kwargs.pop('figsize', (6.4, 1.5))
num_xticks = kwargs.pop('num_xticks', None)
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', int(np.ceil(len(df.columns) / nrows)))
normalize = kwargs.pop('normalize', True)
legend = kwargs.pop('legend', False)
legend_kwargs = kwargs.pop('legend_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
for nax, col in enumerate(df):
if nrows == 1:
ax = axes[nax]
else:
ax = axes[nax // ncols, nax % ncols]
supmin = df[col].apply(np.min).min()
supmax = df[col].apply(np.max).max()
support = np.linspace(supmin - 0.1 * (supmax - supmin),
supmax + 0.1 * (supmax - supmin), 200)
handles = []
labels = []
for name, samps in df[col].iteritems():
pdf = scipy.stats.gaussian_kde(samps)(support)
if not normalize:
pdf /= pdf.max()
handles.append(ax.plot(support, pdf, label=name)[0])
labels.append(name)
ax.set_ylim(bottom=0)
ax.set_yticks([])
if xlims is not None:
try:
ax.set_xlim(xlims[col])
except KeyError:
pass
ax.set_xlabel(col)
if num_xticks is not None:
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(
nbins=num_xticks))
if legend:
fig.legend(handles, labels, **legend_kwargs)
return fig | Plots kde estimates of distributions of samples in each cell of the
input pandas DataFrame.
There is one subplot for each dataframe column, and on each subplot there
is one kde line.
Parameters
----------
df: pandas data frame
Each cell must contain a 1d numpy array of samples.
xlims: dict, optional
Dictionary of xlimits - keys are column names and values are lists of
length 2.
num_xticks: int, optional
Number of xticks on each subplot.
figsize: tuple, optional
Size of figure in inches.
nrows: int, optional
Number of rows of subplots.
ncols: int, optional
Number of columns of subplots.
normalize: bool, optional
If true, kde plots are normalized to have the same area under their
curves. If False, their max value is set to 1.
legend: bool, optional
Should a legend be added?
legend_kwargs: dict, optional
Additional kwargs for legend.
Returns
-------
fig: matplotlib figure | entailment |
def bs_param_dists(run_list, **kwargs):
"""Creates posterior distributions and their bootstrap error functions for
input runs and estimators.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations' (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to be used for the fgivenx
distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
figsize: tuple, optional
Matplotlib figsize in (inches).
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure
"""
fthetas = kwargs.pop('fthetas', [lambda theta: theta[:, 0],
lambda theta: theta[:, 1]])
labels = kwargs.pop('labels', [r'$\theta_' + str(i + 1) + '$' for i in
range(len(fthetas))])
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
n_simulate = kwargs.pop('n_simulate', 100)
random_seed = kwargs.pop('random_seed', 0)
figsize = kwargs.pop('figsize', (6.4, 2))
nx = kwargs.pop('nx', 100)
ny = kwargs.pop('ny', nx)
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'disable': True})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
# Use random seed to make samples consistent and allow caching.
# To avoid fixing seed use random_seed=None
state = np.random.get_state() # save initial random state
np.random.seed(random_seed)
if not isinstance(run_list, list):
run_list = [run_list]
assert len(labels) == len(fthetas), (
'There should be the same number of axes and labels')
width_ratios = [40] * len(fthetas) + [1] * len(run_list)
fig, axes = plt.subplots(nrows=1, ncols=len(run_list) + len(fthetas),
gridspec_kw={'wspace': 0.1,
'width_ratios': width_ratios},
figsize=figsize)
colormaps = ['Reds_r', 'Blues_r', 'Greys_r', 'Greens_r', 'Oranges_r']
mean_colors = ['darkred', 'darkblue', 'darkgrey', 'darkgreen',
'darkorange']
# plot in reverse order so reds are final plot and always on top
for nrun, run in reversed(list(enumerate(run_list))):
try:
cache = cache_in + '_' + str(nrun)
except TypeError:
cache = None
# add bs distribution plots
cbar = plot_bs_dists(run, fthetas, axes[:len(fthetas)],
parallel=parallel,
ftheta_lims=ftheta_lims, cache=cache,
n_simulate=n_simulate, nx=nx, ny=ny,
rasterize_contours=rasterize_contours,
mean_color=mean_colors[nrun],
colormap=colormaps[nrun],
tqdm_kwargs=tqdm_kwargs)
# add colorbar
colorbar_plot = plt.colorbar(cbar, cax=axes[len(fthetas) + nrun],
ticks=[1, 2, 3])
colorbar_plot.solids.set_edgecolor('face')
colorbar_plot.ax.set_yticklabels([])
if nrun == len(run_list) - 1:
colorbar_plot.ax.set_yticklabels(
[r'$1\sigma$', r'$2\sigma$', r'$3\sigma$'])
# Format axis ticks and labels
for nax, ax in enumerate(axes[:len(fthetas)]):
ax.set_yticks([])
ax.set_xlabel(labels[nax])
if ax.is_first_col():
ax.set_ylabel('probability')
# Prune final xtick label so it doesn't overlap with next plot
prune = 'upper' if nax != len(fthetas) - 1 else None
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(
nbins=5, prune=prune))
np.random.set_state(state) # return to original random state
return fig | Creates posterior distributions and their bootstrap error functions for
input runs and estimators.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations' (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to be used for the fgivenx
distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
figsize: tuple, optional
Matplotlib figsize in (inches).
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure | entailment |
def param_logx_diagram(run_list, **kwargs):
"""Creates diagrams of a nested sampling run's evolution as it iterates
towards higher likelihoods, expressed as a function of log X, where X(L) is
the fraction of the prior volume with likelihood greater than some value L.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations" (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: dict, optional
Plot limits for each ftheta.
plot_means: bool, optional
Should the mean value of each ftheta be plotted?
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
logx_min: float, optional
Lower limit of logx axis.
figsize: tuple, optional
Matplotlib figure size (in inches).
colors: list of strs, optional
Colors to plot run scatter plots with.
colormaps: list of strs, optional
Colormaps to plot run fgivenx plots with.
npoints: int, optional
How many points to have in the logx array used to calculate and plot
analytical weights.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel optional
point_size: float, optional
size of markers on scatter plot (in pts)
thin: float, optional
factor by which to reduce the number of samples before plotting the
scatter plot. Must be in half-closed interval (0, 1].
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure
"""
fthetas = kwargs.pop('fthetas', [lambda theta: theta[:, 0],
lambda theta: theta[:, 1]])
labels = kwargs.pop('labels', [r'$\theta_' + str(i + 1) + '$' for i in
range(len(fthetas))])
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
threads_to_plot = kwargs.pop('threads_to_plot', [0])
plot_means = kwargs.pop('plot_means', True)
n_simulate = kwargs.pop('n_simulate', 100)
random_seed = kwargs.pop('random_seed', 0)
logx_min = kwargs.pop('logx_min', None)
figsize = kwargs.pop('figsize', (6.4, 2 * (1 + len(fthetas))))
colors = kwargs.pop('colors', ['red', 'blue', 'grey', 'green', 'orange'])
colormaps = kwargs.pop('colormaps', ['Reds_r', 'Blues_r', 'Greys_r',
'Greens_r', 'Oranges_r'])
# Options for fgivenx
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
point_size = kwargs.pop('point_size', 0.2)
thin = kwargs.pop('thin', 1)
npoints = kwargs.pop('npoints', 100)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'disable': True})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if not isinstance(run_list, list):
run_list = [run_list]
# Use random seed to make samples consistent and allow caching.
# To avoid fixing seed use random_seed=None
state = np.random.get_state() # save initial random state
np.random.seed(random_seed)
if not plot_means:
mean_colors = [None] * len(colors)
else:
mean_colors = ['dark' + col for col in colors]
nlogx = npoints
ny_posterior = npoints
assert len(fthetas) == len(labels)
assert len(fthetas) == len(ftheta_lims)
thread_linestyles = ['-', '-.', ':']
# make figure
# -----------
fig, axes = plt.subplots(nrows=1 + len(fthetas), ncols=2, figsize=figsize,
gridspec_kw={'wspace': 0,
'hspace': 0,
'width_ratios': [15, 40]})
# make colorbar axes in top left corner
axes[0, 0].set_visible(False)
divider = mpl_toolkits.axes_grid1.make_axes_locatable(axes[0, 0])
colorbar_ax_list = []
for i in range(len(run_list)):
colorbar_ax_list.append(divider.append_axes("left", size=0.05,
pad=0.05))
# Reverse color bar axis order so when an extra run is added the other
# colorbars stay in the same place
colorbar_ax_list = list(reversed(colorbar_ax_list))
# plot runs in reverse order to put the first run on top
for nrun, run in reversed(list(enumerate(run_list))):
# Weight Plot
# -----------
ax_weight = axes[0, 1]
ax_weight.set_ylabel('posterior\nmass')
samples = np.zeros((n_simulate, run['nlive_array'].shape[0] * 2))
for i in range(n_simulate):
logx_temp = nestcheck.ns_run_utils.get_logx(
run['nlive_array'], simulate=True)[::-1]
logw_rel = logx_temp + run['logl'][::-1]
w_rel = np.exp(logw_rel - logw_rel.max())
w_rel /= np.trapz(w_rel, x=logx_temp)
samples[i, ::2] = logx_temp
samples[i, 1::2] = w_rel
if logx_min is None:
logx_min = samples[:, 0].min()
logx_sup = np.linspace(logx_min, 0, nlogx)
try:
cache = cache_in + '_' + str(nrun) + '_weights'
except TypeError:
cache = None
interp_alt = functools.partial(alternate_helper, func=np.interp)
y, pmf = fgivenx.drivers.compute_pmf(
interp_alt, logx_sup, samples, cache=cache, ny=npoints,
parallel=parallel, tqdm_kwargs=tqdm_kwargs)
cbar = fgivenx.plot.plot(
logx_sup, y, pmf, ax_weight, rasterize_contours=rasterize_contours,
colors=plt.get_cmap(colormaps[nrun]))
ax_weight.set_xlim([logx_min, 0])
ax_weight.set_ylim(bottom=0)
ax_weight.set_yticks([])
ax_weight.set_xticklabels([])
# color bar plot
# --------------
colorbar_plot = plt.colorbar(cbar, cax=colorbar_ax_list[nrun],
ticks=[1, 2, 3])
colorbar_ax_list[nrun].yaxis.set_ticks_position('left')
colorbar_plot.solids.set_edgecolor('face')
colorbar_plot.ax.set_yticklabels([])
if nrun == 0:
colorbar_plot.ax.set_yticklabels(
[r'$1\sigma$', r'$2\sigma$', r'$3\sigma$'])
# samples plot
# ------------
logx = nestcheck.ns_run_utils.get_logx(run['nlive_array'],
simulate=False)
scatter_x = logx
scatter_theta = run['theta']
if thin != 1:
assert 0 < thin <= 1, (
'thin={} should be in the half-closed interval(0, 1]'
.format(thin))
state = np.random.get_state() # save initial random state
np.random.seed(random_seed)
inds = np.where(np.random.random(logx.shape) <= thin)[0]
np.random.set_state(state) # return to original random state
scatter_x = logx[inds]
scatter_theta = run['theta'][inds, :]
for nf, ftheta in enumerate(fthetas):
ax_samples = axes[1 + nf, 1]
ax_samples.scatter(scatter_x, ftheta(scatter_theta),
s=point_size, color=colors[nrun])
if threads_to_plot is not None:
for i in threads_to_plot:
thread_inds = np.where(run['thread_labels'] == i)[0]
ax_samples.plot(logx[thread_inds],
ftheta(run['theta'][thread_inds]),
linestyle=thread_linestyles[nrun],
color='black', lw=1)
ax_samples.set_xlim([logx_min, 0])
ax_samples.set_ylim(ftheta_lims[nf])
# Plot posteriors
# ---------------
posterior_axes = [axes[i + 1, 0] for i in range(len(fthetas))]
_ = plot_bs_dists(run, fthetas, posterior_axes,
ftheta_lims=ftheta_lims,
flip_axes=True, n_simulate=n_simulate,
rasterize_contours=rasterize_contours,
cache=cache_in, nx=npoints, ny=ny_posterior,
colormap=colormaps[nrun],
mean_color=mean_colors[nrun],
parallel=parallel, tqdm_kwargs=tqdm_kwargs)
# Plot means onto scatter plot
# ----------------------------
if plot_means:
w_rel = nestcheck.ns_run_utils.get_w_rel(run, simulate=False)
w_rel /= np.sum(w_rel)
means = [np.sum(w_rel * f(run['theta'])) for f in fthetas]
for nf, mean in enumerate(means):
axes[nf + 1, 1].axhline(y=mean, lw=1, linestyle='--',
color=mean_colors[nrun])
# Format axes
for nf, ax in enumerate(posterior_axes):
ax.set_ylim(ftheta_lims[nf])
ax.invert_xaxis() # only invert each axis once, not for every run!
axes[-1, 1].set_xlabel(r'$\log X$')
# Add labels
for i, label in enumerate(labels):
axes[i + 1, 0].set_ylabel(label)
# Prune final ytick label so it doesn't overlap with next plot
prune = 'upper' if i != 0 else None
axes[i + 1, 0].yaxis.set_major_locator(
matplotlib.ticker.MaxNLocator(nbins=3, prune=prune))
for _, ax in np.ndenumerate(axes):
if not ax.is_first_col():
ax.set_yticklabels([])
if not (ax.is_last_row() and ax.is_last_col()):
ax.set_xticks([])
np.random.set_state(state) # return to original random state
return fig | Creates diagrams of a nested sampling run's evolution as it iterates
towards higher likelihoods, expressed as a function of log X, where X(L) is
the fraction of the prior volume with likelihood greater than some value L.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations" (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: dict, optional
Plot limits for each ftheta.
plot_means: bool, optional
Should the mean value of each ftheta be plotted?
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
logx_min: float, optional
Lower limit of logx axis.
figsize: tuple, optional
Matplotlib figure size (in inches).
colors: list of strs, optional
Colors to plot run scatter plots with.
colormaps: list of strs, optional
Colormaps to plot run fgivenx plots with.
npoints: int, optional
How many points to have in the logx array used to calculate and plot
analytical weights.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel optional
point_size: float, optional
size of markers on scatter plot (in pts)
thin: float, optional
factor by which to reduce the number of samples before plotting the
scatter plot. Must be in half-closed interval (0, 1].
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure | entailment |
def plot_bs_dists(run, fthetas, axes, **kwargs):
"""Helper function for plotting uncertainties on posterior distributions
using bootstrap resamples and the fgivenx module. Used by bs_param_dists
and param_logx_diagram.
Parameters
----------
run: dict
Nested sampling run to plot.
fthetas: list of functions
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
axes: list of matplotlib axis objects
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx
distributions.
colormap: matplotlib colormap
Colors to plot fgivenx distribution.
mean_color: matplotlib color as str
Color to plot mean of each parameter. If None (default) means are not
plotted.
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
smooth: bool, optional
fgivenx smooth option.
flip_axes: bool, optional
Whether or not plot should be rotated 90 degrees anticlockwise onto its
side.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
cbar: matplotlib colorbar
For use in higher order functions.
"""
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
n_simulate = kwargs.pop('n_simulate', 100)
colormap = kwargs.pop('colormap', plt.get_cmap('Reds_r'))
mean_color = kwargs.pop('mean_color', None)
nx = kwargs.pop('nx', 100)
ny = kwargs.pop('ny', nx)
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
smooth = kwargs.pop('smooth', False)
flip_axes = kwargs.pop('flip_axes', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'leave': False})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert len(fthetas) == len(axes), \
'There should be the same number of axes and functions to plot'
assert len(fthetas) == len(ftheta_lims), \
'There should be the same number of axes and functions to plot'
threads = nestcheck.ns_run_utils.get_run_threads(run)
# get a list of evenly weighted theta samples from bootstrap resampling
bs_samps = []
for i in range(n_simulate):
run_temp = nestcheck.error_analysis.bootstrap_resample_run(
run, threads=threads)
w_temp = nestcheck.ns_run_utils.get_w_rel(run_temp, simulate=False)
bs_samps.append((run_temp['theta'], w_temp))
for nf, ftheta in enumerate(fthetas):
# Make an array where each row contains one bootstrap replication's
# samples
max_samps = 2 * max([bs_samp[0].shape[0] for bs_samp in bs_samps])
samples_array = np.full((n_simulate, max_samps), np.nan)
for i, (theta, weights) in enumerate(bs_samps):
nsamp = 2 * theta.shape[0]
samples_array[i, :nsamp:2] = ftheta(theta)
samples_array[i, 1:nsamp:2] = weights
ftheta_vals = np.linspace(ftheta_lims[nf][0], ftheta_lims[nf][1], nx)
try:
cache = cache_in + '_' + str(nf)
except TypeError:
cache = None
samp_kde = functools.partial(alternate_helper,
func=weighted_1d_gaussian_kde)
y, pmf = fgivenx.drivers.compute_pmf(
samp_kde, ftheta_vals, samples_array, ny=ny, cache=cache,
parallel=parallel, tqdm_kwargs=tqdm_kwargs)
if flip_axes:
cbar = fgivenx.plot.plot(
y, ftheta_vals, np.swapaxes(pmf, 0, 1), axes[nf],
colors=colormap, rasterize_contours=rasterize_contours,
smooth=smooth)
else:
cbar = fgivenx.plot.plot(
ftheta_vals, y, pmf, axes[nf], colors=colormap,
rasterize_contours=rasterize_contours, smooth=smooth)
# Plot means
# ----------
if mean_color is not None:
w_rel = nestcheck.ns_run_utils.get_w_rel(run, simulate=False)
w_rel /= np.sum(w_rel)
means = [np.sum(w_rel * f(run['theta'])) for f in fthetas]
for nf, mean in enumerate(means):
if flip_axes:
axes[nf].axhline(y=mean, lw=1, linestyle='--',
color=mean_color)
else:
axes[nf].axvline(x=mean, lw=1, linestyle='--',
color=mean_color)
return cbar | Helper function for plotting uncertainties on posterior distributions
using bootstrap resamples and the fgivenx module. Used by bs_param_dists
and param_logx_diagram.
Parameters
----------
run: dict
Nested sampling run to plot.
fthetas: list of functions
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
axes: list of matplotlib axis objects
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx
distributions.
colormap: matplotlib colormap
Colors to plot fgivenx distribution.
mean_color: matplotlib color as str
Color to plot mean of each parameter. If None (default) means are not
plotted.
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
smooth: bool, optional
fgivenx smooth option.
flip_axes: bool, optional
Whether or not plot should be rotated 90 degrees anticlockwise onto its
side.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
cbar: matplotlib colorbar
For use in higher order functions. | entailment |
def alternate_helper(x, alt_samps, func=None):
"""Helper function for making fgivenx plots of functions with 2 array
arguments of variable lengths."""
alt_samps = alt_samps[~np.isnan(alt_samps)]
arg1 = alt_samps[::2]
arg2 = alt_samps[1::2]
return func(x, arg1, arg2) | Helper function for making fgivenx plots of functions with 2 array
arguments of variable lengths. | entailment |
def weighted_1d_gaussian_kde(x, samples, weights):
"""Gaussian kde with weighted samples (1d only). Uses Scott bandwidth
factor.
When all the sample weights are equal, this is equivalent to
kde = scipy.stats.gaussian_kde(theta)
return kde(x)
When the weights are not all equal, we compute the effective number
of samples as the information content (Shannon entropy)
nsamp_eff = exp(- sum_i (w_i log(w_i)))
Alternative ways to estimate nsamp_eff include Kish's formula
nsamp_eff = (sum_i w_i) ** 2 / (sum_i w_i ** 2)
See https://en.wikipedia.org/wiki/Effective_sample_size and "Effective
sample size for importance sampling based on discrepancy measures"
(Martino et al. 2017) for more information.
Parameters
----------
x: 1d numpy array
Coordinates at which to evaluate the kde.
samples: 1d numpy array
Samples from which to calculate kde.
weights: 1d numpy array of same shape as samples
Weights of each point. Need not be normalised as this is done inside
the function.
Returns
-------
result: 1d numpy array of same shape as x
Kde evaluated at x values.
"""
assert x.ndim == 1
assert samples.ndim == 1
assert samples.shape == weights.shape
# normalise weights and find effective number of samples
weights /= np.sum(weights)
nz_weights = weights[np.nonzero(weights)]
nsamp_eff = np.exp(-1. * np.sum(nz_weights * np.log(nz_weights)))
# Calculate the weighted sample variance
mu = np.sum(weights * samples)
var = np.sum(weights * ((samples - mu) ** 2))
var *= nsamp_eff / (nsamp_eff - 1) # correct for bias using nsamp_eff
# Calculate bandwidth
scott_factor = np.power(nsamp_eff, -1. / (5)) # 1d Scott factor
sig = np.sqrt(var) * scott_factor
# Calculate and weight residuals
xx, ss = np.meshgrid(x, samples)
chisquared = ((xx - ss) / sig) ** 2
energy = np.exp(-0.5 * chisquared) / np.sqrt(2 * np.pi * (sig ** 2))
result = np.sum(energy * weights[:, np.newaxis], axis=0)
return result | Gaussian kde with weighted samples (1d only). Uses Scott bandwidth
factor.
When all the sample weights are equal, this is equivalent to
kde = scipy.stats.gaussian_kde(theta)
return kde(x)
When the weights are not all equal, we compute the effective number
of samples as the information content (Shannon entropy)
nsamp_eff = exp(- sum_i (w_i log(w_i)))
Alternative ways to estimate nsamp_eff include Kish's formula
nsamp_eff = (sum_i w_i) ** 2 / (sum_i w_i ** 2)
See https://en.wikipedia.org/wiki/Effective_sample_size and "Effective
sample size for importance sampling based on discrepancy measures"
(Martino et al. 2017) for more information.
Parameters
----------
x: 1d numpy array
Coordinates at which to evaluate the kde.
samples: 1d numpy array
Samples from which to calculate kde.
weights: 1d numpy array of same shape as samples
Weights of each point. Need not be normalised as this is done inside
the function.
Returns
-------
result: 1d numpy array of same shape as x
Kde evaluated at x values. | entailment |
def rel_posterior_mass(logx, logl):
"""Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to logX is 1.
Parameters
----------
logx: 1d numpy array
Logx values at which to calculate posterior mass.
logl: 1d numpy array
Logl values corresponding to each logx (same shape as logx).
Returns
-------
w_rel: 1d numpy array
Relative posterior mass at each input logx value.
"""
logw = logx + logl
w_rel = np.exp(logw - logw.max())
w_rel /= np.abs(np.trapz(w_rel, x=logx))
return w_rel | Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to logX is 1.
Parameters
----------
logx: 1d numpy array
Logx values at which to calculate posterior mass.
logl: 1d numpy array
Logl values corresponding to each logx (same shape as logx).
Returns
-------
w_rel: 1d numpy array
Relative posterior mass at each input logx value. | entailment |
def average_by_key(dict_in, key):
"""Helper function for plot_run_nlive.
Try returning the average of dict_in[key] and, if this does not work or if
key is None, return average of whole dict.
Parameters
----------
dict_in: dict
Values should be arrays.
key: str
Returns
-------
average: float
"""
if key is None:
return np.mean(np.concatenate(list(dict_in.values())))
else:
try:
return np.mean(dict_in[key])
except KeyError:
print('method name "' + key + '" not found, so ' +
'normalise area under the analytic relative posterior ' +
'mass curve using the mean of all methods.')
return np.mean(np.concatenate(list(dict_in.values()))) | Helper function for plot_run_nlive.
Try returning the average of dict_in[key] and, if this does not work or if
key is None, return average of whole dict.
Parameters
----------
dict_in: dict
Values should be arrays.
key: str
Returns
-------
average: float | entailment |
def batch_process_data(file_roots, **kwargs):
"""Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details).
"""
base_dir = kwargs.pop('base_dir', 'chains')
process_func = kwargs.pop('process_func', process_polychord_run)
func_kwargs = kwargs.pop('func_kwargs', {})
func_kwargs['errors_to_handle'] = kwargs.pop('errors_to_handle', ())
data = nestcheck.parallel_utils.parallel_apply(
process_error_helper, file_roots, func_args=(base_dir, process_func),
func_kwargs=func_kwargs, **kwargs)
# Sort processed runs into the same order as file_roots (as parallel_apply
# does not preserve order)
data = sorted(data,
key=lambda x: file_roots.index(x['output']['file_root']))
# Extract error information and print
errors = {}
for i, run in enumerate(data):
if 'error' in run:
try:
errors[run['error']].append(i)
except KeyError:
errors[run['error']] = [i]
for error_name, index_list in errors.items():
message = (error_name + ' processing ' + str(len(index_list)) + ' / '
+ str(len(file_roots)) + ' files')
if len(index_list) != len(file_roots):
message += ('. Roots with errors have (zero based) indexes: '
+ str(index_list))
print(message)
# Return runs which did not have errors
return [run for run in data if 'error' not in run] | Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details). | entailment |
def process_error_helper(root, base_dir, process_func, errors_to_handle=(),
**func_kwargs):
"""Wrapper which applies process_func and handles some common errors so one
bad run does not spoil the whole batch.
Useful errors to handle include:
OSError: if you are not sure if all the files exist
AssertionError: if some of the many assertions fail for known reasons;
for example is there are occasional problems decomposing runs into threads
due to limited numerical precision in logls.
Parameters
----------
root: str
File root.
base_dir: str
Directory containing file.
process_func: func
Function for processing file.
errors_to_handle: error type or tuple of error types
Errors to catch without throwing an exception.
func_kwargs: dict
Kwargs to pass to process_func.
Returns
-------
run: dict
Nested sampling run dict (see the module docstring for more
details) or, if an error occured, a dict containing its type
and the file root.
"""
try:
return process_func(root, base_dir, **func_kwargs)
except errors_to_handle as err:
run = {'error': type(err).__name__,
'output': {'file_root': root}}
return run | Wrapper which applies process_func and handles some common errors so one
bad run does not spoil the whole batch.
Useful errors to handle include:
OSError: if you are not sure if all the files exist
AssertionError: if some of the many assertions fail for known reasons;
for example is there are occasional problems decomposing runs into threads
due to limited numerical precision in logls.
Parameters
----------
root: str
File root.
base_dir: str
Directory containing file.
process_func: func
Function for processing file.
errors_to_handle: error type or tuple of error types
Errors to catch without throwing an exception.
func_kwargs: dict
Kwargs to pass to process_func.
Returns
-------
run: dict
Nested sampling run dict (see the module docstring for more
details) or, if an error occured, a dict containing its type
and the file root. | entailment |
def process_polychord_run(file_root, base_dir, process_stats_file=True,
**kwargs):
"""Loads data from a PolyChord run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies PolyChord version v1.13 or later and the setting
write_dead=True.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
process_stats_file: bool, optional
Should PolyChord's <root>.stats file be processed? Set to False if you
don't have the <root>.stats file (such as if PolyChord was run with
write_stats=False).
kwargs: dict, optional
Options passed to ns_run_utils.check_ns_run.
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
"""
# N.B. PolyChord dead points files also contains remaining live points at
# termination
samples = np.loadtxt(os.path.join(base_dir, file_root) + '_dead-birth.txt')
ns_run = process_samples_array(samples, **kwargs)
ns_run['output'] = {'base_dir': base_dir, 'file_root': file_root}
if process_stats_file:
try:
ns_run['output'] = process_polychord_stats(file_root, base_dir)
except (OSError, IOError, ValueError) as err:
warnings.warn(
('process_polychord_stats raised {} processing {}.stats file. '
' Proceeding without stats.').format(
type(err).__name__, os.path.join(base_dir, file_root)),
UserWarning)
return ns_run | Loads data from a PolyChord run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies PolyChord version v1.13 or later and the setting
write_dead=True.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
process_stats_file: bool, optional
Should PolyChord's <root>.stats file be processed? Set to False if you
don't have the <root>.stats file (such as if PolyChord was run with
write_stats=False).
kwargs: dict, optional
Options passed to ns_run_utils.check_ns_run.
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details). | entailment |
def process_multinest_run(file_root, base_dir, **kwargs):
"""Loads data from a MultiNest run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies MultiNest version 3.11 or later.
Parameters
----------
file_root: str
Root name for output files. When running MultiNest, this is determined
by the nest_root parameter.
base_dir: str
Directory containing output files. When running MultiNest, this is
determined by the nest_root parameter.
kwargs: dict, optional
Passed to ns_run_utils.check_ns_run (via process_samples_array)
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
"""
# Load dead and live points
dead = np.loadtxt(os.path.join(base_dir, file_root) + '-dead-birth.txt')
live = np.loadtxt(os.path.join(base_dir, file_root)
+ '-phys_live-birth.txt')
# Remove unnecessary final columns
dead = dead[:, :-2]
live = live[:, :-1]
assert dead[:, -2].max() < live[:, -2].min(), (
'final live points should have greater logls than any dead point!',
dead, live)
ns_run = process_samples_array(np.vstack((dead, live)), **kwargs)
assert np.all(ns_run['thread_min_max'][:, 0] == -np.inf), (
'As MultiNest does not currently perform dynamic nested sampling, all '
'threads should start by sampling the whole prior.')
ns_run['output'] = {}
ns_run['output']['file_root'] = file_root
ns_run['output']['base_dir'] = base_dir
return ns_run | Loads data from a MultiNest run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies MultiNest version 3.11 or later.
Parameters
----------
file_root: str
Root name for output files. When running MultiNest, this is determined
by the nest_root parameter.
base_dir: str
Directory containing output files. When running MultiNest, this is
determined by the nest_root parameter.
kwargs: dict, optional
Passed to ns_run_utils.check_ns_run (via process_samples_array)
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details). | entailment |
def process_dynesty_run(results):
"""Transforms results from a dynesty run into the nestcheck dictionary
format for analysis. This function has been tested with dynesty v9.2.0.
Note that the nestcheck point weights and evidence will not be exactly
the same as the dynesty ones as nestcheck calculates logX volumes more
precisely (using the trapezium rule).
This function does not require the birth_inds_given_contours and
threads_given_birth_inds functions as dynesty results objects
already include thread labels via their samples_id property. If the
dynesty run is dynamic, the batch_bounds property is need to determine
the threads' starting birth contours.
Parameters
----------
results: dynesty results object
N.B. the remaining live points at termination must be included in the
results (dynesty samplers' run_nested method does this if
add_live_points=True - its default value).
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
"""
samples = np.zeros((results.samples.shape[0],
results.samples.shape[1] + 3))
samples[:, 0] = results.logl
samples[:, 1] = results.samples_id
samples[:, 3:] = results.samples
unique_th, first_inds = np.unique(results.samples_id, return_index=True)
assert np.array_equal(unique_th, np.asarray(range(unique_th.shape[0])))
thread_min_max = np.full((unique_th.shape[0], 2), np.nan)
try:
# Try processing standard nested sampling results
assert unique_th.shape[0] == results.nlive
assert np.array_equal(
np.unique(results.samples_id[-results.nlive:]),
np.asarray(range(results.nlive))), (
'perhaps the final live points are not included?')
thread_min_max[:, 0] = -np.inf
except AttributeError:
# If results has no nlive attribute, it must be dynamic nested sampling
assert unique_th.shape[0] == sum(results.batch_nlive)
for th_lab, ind in zip(unique_th, first_inds):
thread_min_max[th_lab, 0] = (
results.batch_bounds[results.samples_batch[ind], 0])
for th_lab in unique_th:
final_ind = np.where(results.samples_id == th_lab)[0][-1]
thread_min_max[th_lab, 1] = results.logl[final_ind]
samples[final_ind, 2] = -1
assert np.all(~np.isnan(thread_min_max))
run = nestcheck.ns_run_utils.dict_given_run_array(samples, thread_min_max)
nestcheck.ns_run_utils.check_ns_run(run)
return run | Transforms results from a dynesty run into the nestcheck dictionary
format for analysis. This function has been tested with dynesty v9.2.0.
Note that the nestcheck point weights and evidence will not be exactly
the same as the dynesty ones as nestcheck calculates logX volumes more
precisely (using the trapezium rule).
This function does not require the birth_inds_given_contours and
threads_given_birth_inds functions as dynesty results objects
already include thread labels via their samples_id property. If the
dynesty run is dynamic, the batch_bounds property is need to determine
the threads' starting birth contours.
Parameters
----------
results: dynesty results object
N.B. the remaining live points at termination must be included in the
results (dynesty samplers' run_nested method does this if
add_live_points=True - its default value).
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details). | entailment |
def process_polychord_stats(file_root, base_dir):
"""Reads a PolyChord <root>.stats output file and returns the information
contained in a dictionary.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
Returns
-------
output: dict
See PolyChord documentation for more details.
"""
filename = os.path.join(base_dir, file_root) + '.stats'
output = {'base_dir': base_dir,
'file_root': file_root}
with open(filename, 'r') as stats_file:
lines = stats_file.readlines()
output['logZ'] = float(lines[8].split()[2])
output['logZerr'] = float(lines[8].split()[4])
# Cluster logZs and errors
output['logZs'] = []
output['logZerrs'] = []
for line in lines[14:]:
if line[:5] != 'log(Z':
break
output['logZs'].append(float(
re.findall(r'=(.*)', line)[0].split()[0]))
output['logZerrs'].append(float(
re.findall(r'=(.*)', line)[0].split()[2]))
# Other output info
nclust = len(output['logZs'])
output['ncluster'] = nclust
output['nposterior'] = int(lines[20 + nclust].split()[1])
output['nequals'] = int(lines[21 + nclust].split()[1])
output['ndead'] = int(lines[22 + nclust].split()[1])
output['nlive'] = int(lines[23 + nclust].split()[1])
try:
output['nlike'] = int(lines[24 + nclust].split()[1])
except ValueError:
# if nlike has too many digits, PolyChord just writes ***** to .stats
# file. This causes a ValueError
output['nlike'] = np.nan
output['avnlike'] = float(lines[25 + nclust].split()[1])
output['avnlikeslice'] = float(lines[25 + nclust].split()[3])
# Means and stds of dimensions (not produced by PolyChord<=1.13)
if len(lines) > 29 + nclust:
output['param_means'] = []
output['param_mean_errs'] = []
for line in lines[29 + nclust:]:
output['param_means'].append(float(line.split()[1]))
output['param_mean_errs'].append(float(line.split()[3]))
return output | Reads a PolyChord <root>.stats output file and returns the information
contained in a dictionary.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
Returns
-------
output: dict
See PolyChord documentation for more details. | entailment |
def process_samples_array(samples, **kwargs):
"""Convert an array of nested sampling dead and live points of the type
produced by PolyChord and MultiNest into a nestcheck nested sampling run
dictionary.
Parameters
----------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
kwargs: dict, optional
Options passed to birth_inds_given_contours
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more
details). Only contains information in samples (not additional
optional output key).
"""
samples = samples[np.argsort(samples[:, -2])]
ns_run = {}
ns_run['logl'] = samples[:, -2]
ns_run['theta'] = samples[:, :-2]
birth_contours = samples[:, -1]
# birth_contours, ns_run['theta'] = check_logls_unique(
# samples[:, -2], samples[:, -1], samples[:, :-2])
birth_inds = birth_inds_given_contours(
birth_contours, ns_run['logl'], **kwargs)
ns_run['thread_labels'] = threads_given_birth_inds(birth_inds)
unique_threads = np.unique(ns_run['thread_labels'])
assert np.array_equal(unique_threads,
np.asarray(range(unique_threads.shape[0])))
# Work out nlive_array and thread_min_max logls from thread labels and
# birth contours
thread_min_max = np.zeros((unique_threads.shape[0], 2))
# NB delta_nlive indexes are offset from points' indexes by 1 as we need an
# element to represent the initial sampling of live points before any dead
# points are created.
# I.E. birth on step 1 corresponds to replacing dead point zero
delta_nlive = np.zeros(samples.shape[0] + 1)
for label in unique_threads:
thread_inds = np.where(ns_run['thread_labels'] == label)[0]
# Max is final logl in thread
thread_min_max[label, 1] = ns_run['logl'][thread_inds[-1]]
thread_start_birth_ind = birth_inds[thread_inds[0]]
# delta nlive indexes are +1 from logl indexes to allow for initial
# nlive (before first dead point)
delta_nlive[thread_inds[-1] + 1] -= 1
if thread_start_birth_ind == birth_inds[0]:
# thread minimum is -inf as it starts by sampling from whole prior
thread_min_max[label, 0] = -np.inf
delta_nlive[0] += 1
else:
assert thread_start_birth_ind >= 0
thread_min_max[label, 0] = ns_run['logl'][thread_start_birth_ind]
delta_nlive[thread_start_birth_ind + 1] += 1
ns_run['thread_min_max'] = thread_min_max
ns_run['nlive_array'] = np.cumsum(delta_nlive)[:-1]
return ns_run | Convert an array of nested sampling dead and live points of the type
produced by PolyChord and MultiNest into a nestcheck nested sampling run
dictionary.
Parameters
----------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
kwargs: dict, optional
Options passed to birth_inds_given_contours
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more
details). Only contains information in samples (not additional
optional output key). | entailment |
def birth_inds_given_contours(birth_logl_arr, logl_arr, **kwargs):
"""Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1.
"""
dup_assert = kwargs.pop('dup_assert', False)
dup_warn = kwargs.pop('dup_warn', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert logl_arr.ndim == 1, logl_arr.ndim
assert birth_logl_arr.ndim == 1, birth_logl_arr.ndim
# Check for duplicate logl values (if specified by dup_assert or dup_warn)
nestcheck.ns_run_utils.check_ns_run_logls(
{'logl': logl_arr}, dup_assert=dup_assert, dup_warn=dup_warn)
# Random seed so results are consistent if there are duplicate logls
state = np.random.get_state() # Save random state before seeding
np.random.seed(0)
# Calculate birth inds
init_birth = birth_logl_arr[0]
assert np.all(birth_logl_arr <= logl_arr), (
logl_arr[birth_logl_arr > logl_arr])
birth_inds = np.full(birth_logl_arr.shape, np.nan)
birth_inds[birth_logl_arr == init_birth] = -1
for i, birth_logl in enumerate(birth_logl_arr):
if not np.isnan(birth_inds[i]):
# birth ind has already been assigned
continue
dup_deaths = np.where(logl_arr == birth_logl)[0]
if dup_deaths.shape == (1,):
# death index is unique
birth_inds[i] = dup_deaths[0]
continue
# The remainder of this loop deals with the case that multiple points
# have the same logl value (=birth_logl). This can occur due to limited
# precision, or for likelihoods with contant regions. In this case we
# randomly assign the duplicates birth steps in a manner
# that provides a valid division into nested sampling runs
dup_births = np.where(birth_logl_arr == birth_logl)[0]
assert dup_deaths.shape[0] > 1, dup_deaths
if np.all(birth_logl_arr[dup_deaths] != birth_logl):
# If no points both are born and die on this contour, we can just
# randomly assign an order
np.random.shuffle(dup_deaths)
inds_to_use = dup_deaths
else:
# If some points are both born and die on the contour, we need to
# take care that the assigned birth inds do not result in some
# points dying before they are born
try:
inds_to_use = sample_less_than_condition(
dup_deaths, dup_births)
except ValueError:
raise ValueError((
'There is no way to allocate indexes dup_deaths={} such '
'that each is less than dup_births={}.').format(
dup_deaths, dup_births))
try:
# Add our selected inds_to_use values to the birth_inds array
# Note that dup_deaths (and hence inds to use) may have more
# members than dup_births, because one of the duplicates may be
# the final point in a thread. We therefore include only the first
# dup_births.shape[0] elements
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
except ValueError:
warnings.warn((
'for logl={}, the number of points born (indexes='
'{}) is bigger than the number of points dying '
'(indexes={}). This indicates a problem with your '
'nested sampling software - it may be caused by '
'a bug in PolyChord which was fixed in PolyChord '
'v1.14, so try upgrading. I will try to give an '
'approximate allocation of threads but this may '
'fail.').format(
birth_logl, dup_births, inds_to_use), UserWarning)
extra_inds = np.random.choice(
inds_to_use, size=dup_births.shape[0] - inds_to_use.shape[0])
inds_to_use = np.concatenate((inds_to_use, extra_inds))
np.random.shuffle(inds_to_use)
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
assert np.all(~np.isnan(birth_inds)), np.isnan(birth_inds).sum()
np.random.set_state(state) # Reset random state
return birth_inds.astype(int) | Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1. | entailment |
def sample_less_than_condition(choices_in, condition):
"""Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order.
"""
output = np.zeros(min(condition.shape[0], choices_in.shape[0]))
choices = copy.deepcopy(choices_in)
for i, _ in enumerate(output):
# randomly select one of the choices which meets condition
avail_inds = np.where(choices < condition[i])[0]
selected_ind = np.random.choice(avail_inds)
output[i] = choices[selected_ind]
# remove the chosen value
choices = np.delete(choices, selected_ind)
return output | Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order. | entailment |
def threads_given_birth_inds(birth_inds):
"""Divides a nested sampling run into threads, using info on the indexes
at which points were sampled. See "Sampling errors in nested sampling
parameter estimation" (Higson et al. 2018) for more information.
Parameters
----------
birth_inds: 1d numpy array
Indexes of the iso-likelihood contours from within which each point was
sampled ("born").
Returns
-------
thread_labels: 1d numpy array of ints
labels of the thread each point belongs to.
"""
unique, counts = np.unique(birth_inds, return_counts=True)
# First get a list of all the indexes on which threads start and their
# counts. This is every point initially sampled from the prior, plus any
# indexes where more than one point is sampled.
thread_start_inds = np.concatenate((
unique[:1], unique[1:][counts[1:] > 1]))
thread_start_counts = np.concatenate((
counts[:1], counts[1:][counts[1:] > 1] - 1))
thread_labels = np.full(birth_inds.shape, np.nan)
thread_num = 0
for nmulti, multi in enumerate(thread_start_inds):
for i, start_ind in enumerate(np.where(birth_inds == multi)[0]):
# unless nmulti=0 the first point born on the contour (i=0) is
# already assigned to a thread
if i != 0 or nmulti == 0:
# check point has not already been assigned
assert np.isnan(thread_labels[start_ind])
thread_labels[start_ind] = thread_num
# find the point which replaced it
next_ind = np.where(birth_inds == start_ind)[0]
while next_ind.shape != (0,):
# check point has not already been assigned
assert np.isnan(thread_labels[next_ind[0]])
thread_labels[next_ind[0]] = thread_num
# find the point which replaced it
next_ind = np.where(birth_inds == next_ind[0])[0]
thread_num += 1
if not np.all(~np.isnan(thread_labels)):
warnings.warn((
'{} points (out of a total of {}) were not given a thread label! '
'This is likely due to small numerical errors in your nested '
'sampling software while running the calculation or writing the '
'input files. '
'I will try to give an approximate answer by randomly assigning '
'these points to threads.'
'\nIndexes without labels are {}'
'\nIndexes on which threads start are {} with {} threads '
'starting on each.').format(
(np.isnan(thread_labels)).sum(), birth_inds.shape[0],
np.where(np.isnan(thread_labels))[0],
thread_start_inds, thread_start_counts))
inds = np.where(np.isnan(thread_labels))[0]
state = np.random.get_state() # Save random state before seeding
np.random.seed(0) # make thread decomposition is reproducible
for ind in inds:
# Get the set of threads with members both before and after ind to
# ensure we don't change nlive_array by extending a thread
labels_to_choose = np.intersect1d( # N.B. this removes nans too
thread_labels[:ind], thread_labels[ind + 1:])
if labels_to_choose.shape[0] == 0:
# In edge case that there is no intersection, just randomly
# select from non-nan thread labels
labels_to_choose = np.unique(
thread_labels[~np.isnan(thread_labels)])
thread_labels[ind] = np.random.choice(labels_to_choose)
np.random.set_state(state) # Reset random state
assert np.all(~np.isnan(thread_labels)), (
'{} points still do not have thread labels'.format(
(np.isnan(thread_labels)).sum()))
assert np.array_equal(thread_labels, thread_labels.astype(int)), (
'Thread labels should all be ints!')
thread_labels = thread_labels.astype(int)
# Check unique thread labels are a sequence from 0 to nthreads-1
assert np.array_equal(
np.unique(thread_labels),
np.asarray(range(sum(thread_start_counts)))), (
str(np.unique(thread_labels)) + ' is not equal to range('
+ str(sum(thread_start_counts)) + ')')
return thread_labels | Divides a nested sampling run into threads, using info on the indexes
at which points were sampled. See "Sampling errors in nested sampling
parameter estimation" (Higson et al. 2018) for more information.
Parameters
----------
birth_inds: 1d numpy array
Indexes of the iso-likelihood contours from within which each point was
sampled ("born").
Returns
-------
thread_labels: 1d numpy array of ints
labels of the thread each point belongs to. | entailment |
def parallel_map(func, *arg_iterable, **kwargs):
"""Apply function to iterable with parallel map, and hence returns
results in order. functools.partial is used to freeze func_pre_args and
func_kwargs, meaning that the iterable argument must be the last positional
argument.
Roughly equivalent to
>>> [func(*func_pre_args, x, **func_kwargs) for x in arg_iterable]
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
chunksize: int, optional
Perform function in batches
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs
"""
chunksize = kwargs.pop('chunksize', 1)
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
func_to_map = functools.partial(func, *func_pre_args, **func_kwargs)
if parallel:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
return list(pool.map(func_to_map, *arg_iterable, chunksize=chunksize))
else:
if parallel_warning:
warnings.warn(('parallel_map has parallel=False - turn on '
'parallelisation for faster processing'),
UserWarning)
return list(map(func_to_map, *arg_iterable)) | Apply function to iterable with parallel map, and hence returns
results in order. functools.partial is used to freeze func_pre_args and
func_kwargs, meaning that the iterable argument must be the last positional
argument.
Roughly equivalent to
>>> [func(*func_pre_args, x, **func_kwargs) for x in arg_iterable]
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
chunksize: int, optional
Perform function in batches
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs | entailment |
def parallel_apply(func, arg_iterable, **kwargs):
"""Apply function to iterable with parallelisation and a tqdm progress bar.
Roughly equivalent to
>>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in
arg_iterable]
but will **not** necessarily return results in input order.
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
func_args: tuple, optional
Additional positional arguments for func.
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs
"""
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
func_args = kwargs.pop('func_args', ())
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if 'leave' not in tqdm_kwargs: # default to leave=False
tqdm_kwargs['leave'] = False
assert isinstance(func_args, tuple), (
str(func_args) + ' is type ' + str(type(func_args)))
assert isinstance(func_pre_args, tuple), (
str(func_pre_args) + ' is type ' + str(type(func_pre_args)))
progress = select_tqdm()
if not parallel:
if parallel_warning:
warnings.warn(('parallel_map has parallel=False - turn on '
'parallelisation for faster processing'),
UserWarning)
return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for
x in progress(arg_iterable, **tqdm_kwargs)]
else:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
futures = []
for element in arg_iterable:
futures.append(pool.submit(
func, *(func_pre_args + (element,) + func_args),
**func_kwargs))
results = []
for fut in progress(concurrent.futures.as_completed(futures),
total=len(arg_iterable), **tqdm_kwargs):
results.append(fut.result())
return results | Apply function to iterable with parallelisation and a tqdm progress bar.
Roughly equivalent to
>>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in
arg_iterable]
but will **not** necessarily return results in input order.
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
func_args: tuple, optional
Additional positional arguments for func.
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs | entailment |
def select_tqdm():
"""If running in a jupyter notebook, then returns tqdm_notebook.
Otherwise returns a regular tqdm progress bar.
Returns
-------
progress: function
"""
try:
progress = tqdm.tqdm_notebook
assert get_ipython().has_trait('kernel')
except (NameError, AssertionError):
progress = tqdm.tqdm
return progress | If running in a jupyter notebook, then returns tqdm_notebook.
Otherwise returns a regular tqdm progress bar.
Returns
-------
progress: function | entailment |
def summary_df_from_array(results_array, names, axis=0, **kwargs):
"""Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This function converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_array: 2d numpy array
names: list of str
Names for the output df's columns.
axis: int, optional
Axis on which to calculate summary statistics.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
"""
assert axis == 0 or axis == 1
df = pd.DataFrame(results_array)
if axis == 1:
df = df.T
df.columns = names
return summary_df(df, **kwargs) | Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This function converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_array: 2d numpy array
names: list of str
Names for the output df's columns.
axis: int, optional
Axis on which to calculate summary statistics.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details. | entailment |
def summary_df_from_list(results_list, names, **kwargs):
"""Make a panda data frame of the mean and std devs of each element of a
list of 1d arrays, including the uncertainties on the values.
This just converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_list: list of 1d numpy arrays
Must have same length as names.
names: list of strs
Names for the output df's columns.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
"""
for arr in results_list:
assert arr.shape == (len(names),)
df = pd.DataFrame(np.stack(results_list, axis=0))
df.columns = names
return summary_df(df, **kwargs) | Make a panda data frame of the mean and std devs of each element of a
list of 1d arrays, including the uncertainties on the values.
This just converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_list: list of 1d numpy arrays
Must have same length as names.
names: list of strs
Names for the output df's columns.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details. | entailment |
def summary_df_from_multi(multi_in, inds_to_keep=None, **kwargs):
"""Apply summary_df to a multiindex while preserving some levels.
Parameters
----------
multi_in: multiindex pandas DataFrame
inds_to_keep: None or list of strs, optional
Index levels to preserve.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
"""
# Need to pop include true values and add separately at the end as
# otherwise we get multiple true values added
include_true_values = kwargs.pop('include_true_values', False)
true_values = kwargs.get('true_values', None)
if inds_to_keep is None:
inds_to_keep = list(multi_in.index.names)[:-1]
if 'calculation type' not in inds_to_keep:
df = multi_in.groupby(inds_to_keep).apply(
summary_df, include_true_values=False, **kwargs)
else:
# If there is already a level called 'calculation type' in multi,
# summary_df will try making a second 'calculation type' index and (as
# of pandas v0.23.0) throw an error. Avoid this by renaming.
inds_to_keep = [lev if lev != 'calculation type' else
'calculation type temp' for lev in inds_to_keep]
multi_temp = copy.deepcopy(multi_in)
multi_temp.index.set_names(
[lev if lev != 'calculation type' else 'calculation type temp' for
lev in list(multi_temp.index.names)], inplace=True)
df = multi_temp.groupby(inds_to_keep).apply(
summary_df, include_true_values=False, **kwargs)
# add the 'calculation type' values ('mean' and 'std') produced by
# summary_df to the input calculation type names (now in level
# 'calculation type temp')
ind = (df.index.get_level_values('calculation type temp') + ' ' +
df.index.get_level_values('calculation type'))
order = list(df.index.names)
order.remove('calculation type temp')
df.index = df.index.droplevel(
['calculation type', 'calculation type temp'])
df['calculation type'] = list(ind)
df.set_index('calculation type', append=True, inplace=True)
df = df.reorder_levels(order)
if include_true_values:
assert true_values is not None
tv_ind = ['true values' if name == 'calculation type' else '' for
name in df.index.names[:-1]] + ['value']
df.loc[tuple(tv_ind), :] = true_values
return df | Apply summary_df to a multiindex while preserving some levels.
Parameters
----------
multi_in: multiindex pandas DataFrame
inds_to_keep: None or list of strs, optional
Index levels to preserve.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details. | entailment |
def summary_df(df_in, **kwargs):
"""Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This is similar to pandas.DataFrame.describe but also includes estimates of
the numerical uncertainties.
The output DataFrame has multiindex levels:
'calculation type': mean and standard deviations of the data.
'result type': value and uncertainty for each quantity.
calculation type result type column_1 column_2 ...
mean value
mean uncertainty
std value
std uncertainty
Parameters
----------
df_in: pandas DataFrame
true_values: array
Analytical values if known for comparison with mean. Used to
calculate root mean squared errors (RMSE).
include_true_values: bool, optional
Whether or not to include true values in the output DataFrame.
include_rmse: bool, optional
Whether or not to include root-mean-squared-errors in the output
DataFrame.
Returns
-------
df: MultiIndex DataFrame
"""
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if true_values is not None:
assert true_values.shape[0] == df_in.shape[1], (
'There should be one true value for every column! '
'true_values.shape=' + str(true_values.shape) + ', '
'df_in.shape=' + str(df_in.shape))
# make the data frame
df = pd.DataFrame([df_in.mean(axis=0), df_in.std(axis=0, ddof=1)],
index=['mean', 'std'])
if include_true_values:
assert true_values is not None
df.loc['true values'] = true_values
# Make index categorical to allow sorting
df.index = pd.CategoricalIndex(df.index.values, ordered=True,
categories=['true values', 'mean', 'std',
'rmse'],
name='calculation type')
# add uncertainties
num_cals = df_in.shape[0]
mean_unc = df.loc['std'] / np.sqrt(num_cals)
std_unc = df.loc['std'] * np.sqrt(1 / (2 * (num_cals - 1)))
df['result type'] = pd.Categorical(['value'] * df.shape[0], ordered=True,
categories=['value', 'uncertainty'])
df.set_index(['result type'], drop=True, append=True, inplace=True)
df.loc[('mean', 'uncertainty'), :] = mean_unc.values
df.loc[('std', 'uncertainty'), :] = std_unc.values
if include_rmse:
assert true_values is not None, \
'Need to input true values for RMSE!'
rmse, rmse_unc = rmse_and_unc(df_in.values, true_values)
df.loc[('rmse', 'value'), :] = rmse
df.loc[('rmse', 'uncertainty'), :] = rmse_unc
# Ensure correct row order by sorting
df.sort_index(inplace=True)
# Cast calculation type index back from categorical to string to allow
# adding new calculation types
df.set_index(
[df.index.get_level_values('calculation type').astype(str),
df.index.get_level_values('result type')],
inplace=True)
return df | Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This is similar to pandas.DataFrame.describe but also includes estimates of
the numerical uncertainties.
The output DataFrame has multiindex levels:
'calculation type': mean and standard deviations of the data.
'result type': value and uncertainty for each quantity.
calculation type result type column_1 column_2 ...
mean value
mean uncertainty
std value
std uncertainty
Parameters
----------
df_in: pandas DataFrame
true_values: array
Analytical values if known for comparison with mean. Used to
calculate root mean squared errors (RMSE).
include_true_values: bool, optional
Whether or not to include true values in the output DataFrame.
include_rmse: bool, optional
Whether or not to include root-mean-squared-errors in the output
DataFrame.
Returns
-------
df: MultiIndex DataFrame | entailment |
def efficiency_gain_df(method_names, method_values, est_names, **kwargs):
r"""Calculated data frame showing
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
The standard method on which to base the gain is assumed to be the first
method input.
The output DataFrame will contain rows:
mean [dynamic goal]: mean calculation result for standard nested
sampling and dynamic nested sampling with each input dynamic
goal.
std [dynamic goal]: standard deviation of results for standard
nested sampling and dynamic nested sampling with each input
dynamic goal.
gain [dynamic goal]: the efficiency gain (computational speedup)
from dynamic nested sampling compared to standard nested
sampling. This equals (variance of standard results) /
(variance of dynamic results); see the dynamic nested
sampling paper for more details.
Parameters
----------
method names: list of strs
method values: list
Each element is a list of 1d arrays of results for the method. Each
array must have shape (len(est_names),).
est_names: list of strs
Provide column titles for output df.
true_values: iterable of same length as estimators list
True values of the estimators for the given likelihood and prior.
Returns
-------
results: pandas data frame
Results data frame.
"""
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
adjust_nsamp = kwargs.pop('adjust_nsamp', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if adjust_nsamp is not None:
assert adjust_nsamp.shape == (len(method_names),)
assert len(method_names) == len(method_values)
df_dict = {}
for i, method_name in enumerate(method_names):
# Set include_true_values=False as we don't want them repeated for
# every method
df = summary_df_from_list(
method_values[i], est_names, true_values=true_values,
include_true_values=False, include_rmse=include_rmse)
if i != 0:
stats = ['std']
if include_rmse:
stats.append('rmse')
if adjust_nsamp is not None:
# Efficiency gain measures performance per number of
# samples (proportional to computational work). If the
# number of samples is not the same we can adjust this.
adjust = (adjust_nsamp[0] / adjust_nsamp[i])
else:
adjust = 1
for stat in stats:
# Calculate efficiency gain vs standard nested sampling
gain, gain_unc = get_eff_gain(
df_dict[method_names[0]].loc[(stat, 'value')],
df_dict[method_names[0]].loc[(stat, 'uncertainty')],
df.loc[(stat, 'value')],
df.loc[(stat, 'uncertainty')], adjust=adjust)
key = stat + ' efficiency gain'
df.loc[(key, 'value'), :] = gain
df.loc[(key, 'uncertainty'), :] = gain_unc
df_dict[method_name] = df
results = pd.concat(df_dict)
results.index.rename('dynamic settings', level=0, inplace=True)
new_ind = []
new_ind.append(pd.CategoricalIndex(
results.index.get_level_values('calculation type'), ordered=True,
categories=['true values', 'mean', 'std', 'rmse',
'std efficiency gain', 'rmse efficiency gain']))
new_ind.append(pd.CategoricalIndex(
results.index.get_level_values('dynamic settings'),
ordered=True, categories=[''] + method_names))
new_ind.append(results.index.get_level_values('result type'))
results.set_index(new_ind, inplace=True)
if include_true_values:
with warnings.catch_warnings():
# Performance not an issue here so suppress annoying warning
warnings.filterwarnings('ignore', message=(
'indexing past lexsort depth may impact performance.'))
results.loc[('true values', '', 'value'), :] = true_values
results.sort_index(inplace=True)
return results | r"""Calculated data frame showing
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
The standard method on which to base the gain is assumed to be the first
method input.
The output DataFrame will contain rows:
mean [dynamic goal]: mean calculation result for standard nested
sampling and dynamic nested sampling with each input dynamic
goal.
std [dynamic goal]: standard deviation of results for standard
nested sampling and dynamic nested sampling with each input
dynamic goal.
gain [dynamic goal]: the efficiency gain (computational speedup)
from dynamic nested sampling compared to standard nested
sampling. This equals (variance of standard results) /
(variance of dynamic results); see the dynamic nested
sampling paper for more details.
Parameters
----------
method names: list of strs
method values: list
Each element is a list of 1d arrays of results for the method. Each
array must have shape (len(est_names),).
est_names: list of strs
Provide column titles for output df.
true_values: iterable of same length as estimators list
True values of the estimators for the given likelihood and prior.
Returns
-------
results: pandas data frame
Results data frame. | entailment |
def paper_format_efficiency_gain_df(eff_gain_df):
"""Transform efficiency gain data frames output by nestcheck into the
format shown in the dynamic nested sampling paper (Higson et al. 2019).
Parameters
----------
eff_gain_df: pandas DataFrame
DataFrame of the from produced by efficiency_gain_df.
Returns
-------
paper_df: pandas DataFrame
"""
idxs = pd.IndexSlice[['std', 'std efficiency gain'], :, :]
paper_df = copy.deepcopy(eff_gain_df.loc[idxs, :])
# Show mean number of samples and likelihood calls instead of st dev
means = (eff_gain_df.xs('mean', level='calculation type')
.xs('value', level='result type'))
for col in ['samples', 'likelihood calls']:
try:
col_vals = []
for val in means[col].values:
col_vals += [int(np.rint(val)), np.nan]
col_vals += [np.nan] * (paper_df.shape[0] - len(col_vals))
paper_df[col] = col_vals
except KeyError:
pass
row_name_map = {'std efficiency gain': 'Efficiency gain',
'St.Dev. efficiency gain': 'Efficiency gain',
'dynamic ': '',
'std': 'St.Dev.'}
row_names = (paper_df.index.get_level_values(0).astype(str) + ' ' +
paper_df.index.get_level_values(1).astype(str))
for key, value in row_name_map.items():
row_names = row_names.str.replace(key, value)
paper_df.index = [row_names, paper_df.index.get_level_values(2)]
return paper_df | Transform efficiency gain data frames output by nestcheck into the
format shown in the dynamic nested sampling paper (Higson et al. 2019).
Parameters
----------
eff_gain_df: pandas DataFrame
DataFrame of the from produced by efficiency_gain_df.
Returns
-------
paper_df: pandas DataFrame | entailment |
def get_eff_gain(base_std, base_std_unc, meth_std, meth_std_unc, adjust=1):
r"""Calculates efficiency gain for a new method compared to a base method.
Given the variation in repeated calculations' results using the two
methods, the efficiency gain is:
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
The uncertainty on the efficiency gain is also calculated.
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
Parameters
----------
base_std: 1d numpy array
base_std_unc: 1d numpy array
Uncertainties on base_std.
meth_std: 1d numpy array
meth_std_unc: 1d numpy array
Uncertainties on base_std.
Returns
-------
gain: 1d numpy array
gain_unc: 1d numpy array
Uncertainties on gain.
"""
ratio = base_std / meth_std
ratio_unc = array_ratio_std(
base_std, base_std_unc, meth_std, meth_std_unc)
gain = ratio ** 2
gain_unc = 2 * ratio * ratio_unc
gain *= adjust
gain_unc *= adjust
return gain, gain_unc | r"""Calculates efficiency gain for a new method compared to a base method.
Given the variation in repeated calculations' results using the two
methods, the efficiency gain is:
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
The uncertainty on the efficiency gain is also calculated.
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
Parameters
----------
base_std: 1d numpy array
base_std_unc: 1d numpy array
Uncertainties on base_std.
meth_std: 1d numpy array
meth_std_unc: 1d numpy array
Uncertainties on base_std.
Returns
-------
gain: 1d numpy array
gain_unc: 1d numpy array
Uncertainties on gain. | entailment |
def rmse_and_unc(values_array, true_values):
r"""Calculate the root meet squared error and its numerical uncertainty.
With a reasonably large number of values in values_list the uncertainty
on sq_errors should be approximately normal (from the central limit
theorem).
Uncertainties are calculated via error propagation: if :math:`\sigma`
is the error on :math:`X` then the error on :math:`\sqrt{X}`
is :math:`\frac{\sigma}{2 \sqrt{X}}`.
Parameters
----------
values_array: 2d numpy array
Array of results: each row corresponds to a different estimate of the
quantities considered.
true_values: 1d numpy array
Correct values for the quantities considered.
Returns
-------
rmse: 1d numpy array
Root-mean-squared-error for each quantity.
rmse_unc: 1d numpy array
Numerical uncertainties on each element of rmse.
"""
assert true_values.shape == (values_array.shape[1],)
errors = values_array - true_values[np.newaxis, :]
sq_errors = errors ** 2
sq_errors_mean = np.mean(sq_errors, axis=0)
sq_errors_mean_unc = (np.std(sq_errors, axis=0, ddof=1) /
np.sqrt(sq_errors.shape[0]))
rmse = np.sqrt(sq_errors_mean)
rmse_unc = 0.5 * (1 / rmse) * sq_errors_mean_unc
return rmse, rmse_unc | r"""Calculate the root meet squared error and its numerical uncertainty.
With a reasonably large number of values in values_list the uncertainty
on sq_errors should be approximately normal (from the central limit
theorem).
Uncertainties are calculated via error propagation: if :math:`\sigma`
is the error on :math:`X` then the error on :math:`\sqrt{X}`
is :math:`\frac{\sigma}{2 \sqrt{X}}`.
Parameters
----------
values_array: 2d numpy array
Array of results: each row corresponds to a different estimate of the
quantities considered.
true_values: 1d numpy array
Correct values for the quantities considered.
Returns
-------
rmse: 1d numpy array
Root-mean-squared-error for each quantity.
rmse_unc: 1d numpy array
Numerical uncertainties on each element of rmse. | entailment |
def array_ratio_std(values_n, sigmas_n, values_d, sigmas_d):
r"""Gives error on the ratio of 2 floats or 2 1-dimensional arrays given
their values and uncertainties. This assumes the covariance = 0, and that
the input uncertainties are small compared to the corresponding input
values. _n and _d denote the numerator and denominator respectively.
Parameters
----------
values_n: float or numpy array
Numerator values.
sigmas_n: float or numpy array
:math:`1\sigma` uncertainties on values_n.
values_d: float or numpy array
Denominator values.
sigmas_d: float or numpy array
:math:`1\sigma` uncertainties on values_d.
Returns
-------
std: float or numpy array
:math:`1\sigma` uncertainty on values_n / values_d.
"""
std = np.sqrt((sigmas_n / values_n) ** 2 + (sigmas_d / values_d) ** 2)
std *= (values_n / values_d)
return std | r"""Gives error on the ratio of 2 floats or 2 1-dimensional arrays given
their values and uncertainties. This assumes the covariance = 0, and that
the input uncertainties are small compared to the corresponding input
values. _n and _d denote the numerator and denominator respectively.
Parameters
----------
values_n: float or numpy array
Numerator values.
sigmas_n: float or numpy array
:math:`1\sigma` uncertainties on values_n.
values_d: float or numpy array
Denominator values.
sigmas_d: float or numpy array
:math:`1\sigma` uncertainties on values_d.
Returns
-------
std: float or numpy array
:math:`1\sigma` uncertainty on values_n / values_d. | entailment |
def run_estimators(ns_run, estimator_list, simulate=False):
"""Calculates values of list of quantities (such as the Bayesian evidence
or mean of parameters) for a single nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
estimator_list: list of functions for estimating quantities from nested
sampling runs. Example functions can be found in estimators.py. Each
should have arguments: func(ns_run, logw=None).
simulate: bool, optional
See get_logw docstring.
Returns
-------
output: 1d numpy array
Calculation result for each estimator in estimator_list.
"""
logw = get_logw(ns_run, simulate=simulate)
output = np.zeros(len(estimator_list))
for i, est in enumerate(estimator_list):
output[i] = est(ns_run, logw=logw)
return output | Calculates values of list of quantities (such as the Bayesian evidence
or mean of parameters) for a single nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
estimator_list: list of functions for estimating quantities from nested
sampling runs. Example functions can be found in estimators.py. Each
should have arguments: func(ns_run, logw=None).
simulate: bool, optional
See get_logw docstring.
Returns
-------
output: 1d numpy array
Calculation result for each estimator in estimator_list. | entailment |
def array_given_run(ns_run):
"""Converts information on samples in a nested sampling run dictionary into
a numpy array representation. This allows fast addition of more samples and
recalculation of nlive.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
samples: 2d numpy array
Array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
"""
samples = np.zeros((ns_run['logl'].shape[0], 3 + ns_run['theta'].shape[1]))
samples[:, 0] = ns_run['logl']
samples[:, 1] = ns_run['thread_labels']
# Calculate 'change in nlive' after each step
samples[:-1, 2] = np.diff(ns_run['nlive_array'])
samples[-1, 2] = -1 # nlive drops to zero after final point
samples[:, 3:] = ns_run['theta']
return samples | Converts information on samples in a nested sampling run dictionary into
a numpy array representation. This allows fast addition of more samples and
recalculation of nlive.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
samples: 2d numpy array
Array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample. | entailment |
def dict_given_run_array(samples, thread_min_max):
"""
Converts an array of information about samples back into a nested sampling
run dictionary (see data_processing module docstring for more details).
N.B. the output dict only contains the following keys: 'logl',
'thread_label', 'nlive_array', 'theta'. Any other keys giving additional
information about the run output cannot be reproduced from the function
arguments, and are therefore ommitted.
Parameters
----------
samples: numpy array
Numpy array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
thread_min_max': numpy array, optional
2d array with a row for each thread containing the likelihoods at which
it begins and ends.
Needed to calculate nlive_array (otherwise this is set to None).
Returns
-------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
"""
ns_run = {'logl': samples[:, 0],
'thread_labels': samples[:, 1],
'thread_min_max': thread_min_max,
'theta': samples[:, 3:]}
if np.all(~np.isnan(ns_run['thread_labels'])):
ns_run['thread_labels'] = ns_run['thread_labels'].astype(int)
assert np.array_equal(samples[:, 1], ns_run['thread_labels']), ((
'Casting thread labels from samples array to int has changed '
'their values!\nsamples[:, 1]={}\nthread_labels={}').format(
samples[:, 1], ns_run['thread_labels']))
nlive_0 = (thread_min_max[:, 0] <= ns_run['logl'].min()).sum()
assert nlive_0 > 0, 'nlive_0={}'.format(nlive_0)
nlive_array = np.zeros(samples.shape[0]) + nlive_0
nlive_array[1:] += np.cumsum(samples[:-1, 2])
# Check if there are multiple threads starting on the first logl point
dup_th_starts = (thread_min_max[:, 0] == ns_run['logl'].min()).sum()
if dup_th_starts > 1:
# In this case we approximate the true nlive (which we dont really
# know) by making sure the array's final point is 1 and setting all
# points with logl = logl.min() to have the same nlive
nlive_array += (1 - nlive_array[-1])
n_logl_min = (ns_run['logl'] == ns_run['logl'].min()).sum()
nlive_array[:n_logl_min] = nlive_0
warnings.warn((
'duplicate starting logls: {} threads start at logl.min()={}, '
'and {} points have logl=logl.min(). nlive_array may only be '
'approximately correct.').format(
dup_th_starts, ns_run['logl'].min(), n_logl_min), UserWarning)
assert nlive_array.min() > 0, ((
'nlive contains 0s or negative values. nlive_0={}'
'\nnlive_array = {}\nthread_min_max={}').format(
nlive_0, nlive_array, thread_min_max))
assert nlive_array[-1] == 1, (
'final point in nlive_array != 1.\nnlive_array = ' + str(nlive_array))
ns_run['nlive_array'] = nlive_array
return ns_run | Converts an array of information about samples back into a nested sampling
run dictionary (see data_processing module docstring for more details).
N.B. the output dict only contains the following keys: 'logl',
'thread_label', 'nlive_array', 'theta'. Any other keys giving additional
information about the run output cannot be reproduced from the function
arguments, and are therefore ommitted.
Parameters
----------
samples: numpy array
Numpy array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
thread_min_max': numpy array, optional
2d array with a row for each thread containing the likelihoods at which
it begins and ends.
Needed to calculate nlive_array (otherwise this is set to None).
Returns
-------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details). | entailment |
def get_run_threads(ns_run):
"""
Get the individual threads from a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
threads: list of numpy array
Each thread (list element) is a samples array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
"""
samples = array_given_run(ns_run)
unique_threads = np.unique(ns_run['thread_labels'])
assert ns_run['thread_min_max'].shape[0] == unique_threads.shape[0], (
'some threads have no points! {0} != {1}'.format(
unique_threads.shape[0], ns_run['thread_min_max'].shape[0]))
threads = []
for i, th_lab in enumerate(unique_threads):
thread_array = samples[np.where(samples[:, 1] == th_lab)]
# delete changes in nlive due to other threads in the run
thread_array[:, 2] = 0
thread_array[-1, 2] = -1
min_max = np.reshape(ns_run['thread_min_max'][i, :], (1, 2))
assert min_max[0, 1] == thread_array[-1, 0], (
'thread max logl should equal logl of its final point!')
threads.append(dict_given_run_array(thread_array, min_max))
return threads | Get the individual threads from a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
threads: list of numpy array
Each thread (list element) is a samples array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample. | entailment |
def combine_ns_runs(run_list_in, **kwargs):
"""
Combine a list of complete nested sampling run dictionaries into a single
ns run.
Input runs must contain any repeated threads.
Parameters
----------
run_list_in: list of dicts
List of nested sampling runs in dict format (see data_processing module
docstring for more details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
"""
run_list = copy.deepcopy(run_list_in)
if len(run_list) == 1:
run = run_list[0]
else:
nthread_tot = 0
for i, _ in enumerate(run_list):
check_ns_run(run_list[i], **kwargs)
run_list[i]['thread_labels'] += nthread_tot
nthread_tot += run_list[i]['thread_min_max'].shape[0]
thread_min_max = np.vstack([run['thread_min_max'] for run in run_list])
# construct samples array from the threads, including an updated nlive
samples_temp = np.vstack([array_given_run(run) for run in run_list])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
# Make combined run
run = dict_given_run_array(samples_temp, thread_min_max)
# Combine only the additive properties stored in run['output']
run['output'] = {}
for key in ['nlike', 'ndead']:
try:
run['output'][key] = sum([temp['output'][key] for temp in
run_list_in])
except KeyError:
pass
check_ns_run(run, **kwargs)
return run | Combine a list of complete nested sampling run dictionaries into a single
ns run.
Input runs must contain any repeated threads.
Parameters
----------
run_list_in: list of dicts
List of nested sampling runs in dict format (see data_processing module
docstring for more details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details). | entailment |
def combine_threads(threads, assert_birth_point=False):
"""
Combine list of threads into a single ns run.
This is different to combining runs as repeated threads are allowed, and as
some threads can start from log-likelihood contours on which no dead
point in the run is present.
Note that if all the thread labels are not unique and in ascending order,
the output will fail check_ns_run. However provided the thread labels are
not used it will work ok for calculations based on nlive, logl and theta.
Parameters
----------
threads: list of dicts
List of nested sampling run dicts, each representing a single thread.
assert_birth_point: bool, optional
Whether or not to assert there is exactly one point present in the run
with the log-likelihood at which each point was born. This is not true
for bootstrap resamples of runs, where birth points may be repeated or
not present at all.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
"""
thread_min_max = np.vstack([td['thread_min_max'] for td in threads])
assert len(threads) == thread_min_max.shape[0]
# construct samples array from the threads, including an updated nlive
samples_temp = np.vstack([array_given_run(thread) for thread in threads])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
# update the changes in live points column for threads which start part way
# through the run. These are only present in dynamic nested sampling.
logl_starts = thread_min_max[:, 0]
state = np.random.get_state() # save random state
np.random.seed(0) # seed to make sure any random assignment is repoducable
for logl_start in logl_starts[logl_starts != -np.inf]:
ind = np.where(samples_temp[:, 0] == logl_start)[0]
if assert_birth_point:
assert ind.shape == (1,), \
'No unique birth point! ' + str(ind.shape)
if ind.shape == (1,):
# If the point at which this thread started is present exactly
# once in this bootstrap replication:
samples_temp[ind[0], 2] += 1
elif ind.shape == (0,):
# If the point with the likelihood at which the thread started
# is not present in this particular bootstrap replication,
# approximate it with the point with the nearest likelihood.
ind_closest = np.argmin(np.abs(samples_temp[:, 0] - logl_start))
samples_temp[ind_closest, 2] += 1
else:
# If the point at which this thread started is present multiple
# times in this bootstrap replication, select one at random to
# increment nlive on. This avoids any systematic bias from e.g.
# always choosing the first point.
samples_temp[np.random.choice(ind), 2] += 1
np.random.set_state(state)
# make run
ns_run = dict_given_run_array(samples_temp, thread_min_max)
try:
check_ns_run_threads(ns_run)
except AssertionError:
# If the threads are not valid (e.g. for bootstrap resamples) then
# set them to None so they can't be accidentally used
ns_run['thread_labels'] = None
ns_run['thread_min_max'] = None
return ns_run | Combine list of threads into a single ns run.
This is different to combining runs as repeated threads are allowed, and as
some threads can start from log-likelihood contours on which no dead
point in the run is present.
Note that if all the thread labels are not unique and in ascending order,
the output will fail check_ns_run. However provided the thread labels are
not used it will work ok for calculations based on nlive, logl and theta.
Parameters
----------
threads: list of dicts
List of nested sampling run dicts, each representing a single thread.
assert_birth_point: bool, optional
Whether or not to assert there is exactly one point present in the run
with the log-likelihood at which each point was born. This is not true
for bootstrap resamples of runs, where birth points may be repeated or
not present at all.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details). | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.