sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def get_logw(ns_run, simulate=False):
r"""Calculates the log posterior weights of the samples (using logarithms
to avoid overflow errors with very large or small values).
Uses the trapezium rule such that the weight of point i is
.. math:: w_i = \mathcal{L}_i (X_{i-1} - X_{i+1}) / 2
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
false their expected values are used).
Returns
-------
logw: 1d numpy array
Log posterior masses of points.
"""
try:
# find logX value for each point
logx = get_logx(ns_run['nlive_array'], simulate=simulate)
logw = np.zeros(ns_run['logl'].shape[0])
# Vectorized trapezium rule: w_i prop to (X_{i-1} - X_{i+1}) / 2
logw[1:-1] = log_subtract(logx[:-2], logx[2:]) - np.log(2)
# Assign all prior volume closest to first point X_first to that point:
# that is from logx=0 to logx=log((X_first + X_second) / 2)
logw[0] = log_subtract(0, scipy.special.logsumexp([logx[0], logx[1]]) -
np.log(2))
# Assign all prior volume closest to final point X_last to that point:
# that is from logx=log((X_penultimate + X_last) / 2) to logx=-inf
logw[-1] = scipy.special.logsumexp([logx[-2], logx[-1]]) - np.log(2)
# multiply by likelihood (add in log space)
logw += ns_run['logl']
return logw
except IndexError:
if ns_run['logl'].shape[0] == 1:
# If there is only one point in the run then assign all prior
# volume X \in (0, 1) to that point, so the weight is just
# 1 * logl_0 = logl_0
return copy.deepcopy(ns_run['logl'])
else:
raise | r"""Calculates the log posterior weights of the samples (using logarithms
to avoid overflow errors with very large or small values).
Uses the trapezium rule such that the weight of point i is
.. math:: w_i = \mathcal{L}_i (X_{i-1} - X_{i+1}) / 2
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
false their expected values are used).
Returns
-------
logw: 1d numpy array
Log posterior masses of points. | entailment |
def get_w_rel(ns_run, simulate=False):
"""Get the relative posterior weights of the samples, normalised so
the maximum sample weight is 1. This is calculated from get_logw with
protection against numerical overflows.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
See the get_logw docstring for more details.
Returns
-------
w_rel: 1d numpy array
Relative posterior masses of points.
"""
logw = get_logw(ns_run, simulate=simulate)
return np.exp(logw - logw.max()) | Get the relative posterior weights of the samples, normalised so
the maximum sample weight is 1. This is calculated from get_logw with
protection against numerical overflows.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
See the get_logw docstring for more details.
Returns
-------
w_rel: 1d numpy array
Relative posterior masses of points. | entailment |
def get_logx(nlive, simulate=False):
r"""Returns a logx vector showing the expected or simulated logx positions
of points.
The shrinkage factor between two points
.. math:: t_i = X_{i-1} / X_{i}
is distributed as the largest of :math:`n_i` uniform random variables
between 1 and 0, where :math:`n_i` is the local number of live points.
We are interested in
.. math:: \log(t_i) = \log X_{i-1} - \log X_{i}
which has expected value :math:`-1/n_i`.
Parameters
----------
nlive_array: 1d numpy array
Ordered local number of live points present at each point's
iso-likelihood contour.
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
False their expected values are used).
Returns
-------
logx: 1d numpy array
log X values for points.
"""
assert nlive.min() > 0, (
'nlive contains zeros or negative values! nlive = ' + str(nlive))
if simulate:
logx_steps = np.log(np.random.random(nlive.shape)) / nlive
else:
logx_steps = -1 * (nlive.astype(float) ** -1)
return np.cumsum(logx_steps) | r"""Returns a logx vector showing the expected or simulated logx positions
of points.
The shrinkage factor between two points
.. math:: t_i = X_{i-1} / X_{i}
is distributed as the largest of :math:`n_i` uniform random variables
between 1 and 0, where :math:`n_i` is the local number of live points.
We are interested in
.. math:: \log(t_i) = \log X_{i-1} - \log X_{i}
which has expected value :math:`-1/n_i`.
Parameters
----------
nlive_array: 1d numpy array
Ordered local number of live points present at each point's
iso-likelihood contour.
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
False their expected values are used).
Returns
-------
logx: 1d numpy array
log X values for points. | entailment |
def log_subtract(loga, logb):
r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
logb: float
Must be less than loga.
Returns
-------
log(a - b): float
"""
return loga + np.log(1 - np.exp(logb - loga)) | r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
logb: float
Must be less than loga.
Returns
-------
log(a - b): float | entailment |
def check_ns_run(run, dup_assert=False, dup_warn=False):
"""Checks a nestcheck format nested sampling run dictionary has the
expected properties (see the data_processing module docstring for more
details).
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
See check_ns_run_logls docstring.
dup_warn: bool, optional
See check_ns_run_logls docstring.
Raises
------
AssertionError
if run does not have expected properties.
"""
assert isinstance(run, dict)
check_ns_run_members(run)
check_ns_run_logls(run, dup_assert=dup_assert, dup_warn=dup_warn)
check_ns_run_threads(run) | Checks a nestcheck format nested sampling run dictionary has the
expected properties (see the data_processing module docstring for more
details).
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
See check_ns_run_logls docstring.
dup_warn: bool, optional
See check_ns_run_logls docstring.
Raises
------
AssertionError
if run does not have expected properties. | entailment |
def check_ns_run_members(run):
"""Check nested sampling run member keys and values.
Parameters
----------
run: dict
nested sampling run to check.
Raises
------
AssertionError
if run does not have expected properties.
"""
run_keys = list(run.keys())
# Mandatory keys
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert key in run_keys
run_keys.remove(key)
# Optional keys
for key in ['output']:
try:
run_keys.remove(key)
except ValueError:
pass
# Check for unexpected keys
assert not run_keys, 'Unexpected keys in ns_run: ' + str(run_keys)
# Check type of mandatory members
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert isinstance(run[key], np.ndarray), (
key + ' is type ' + type(run[key]).__name__)
# check shapes of keys
assert run['logl'].ndim == 1
assert run['logl'].shape == run['nlive_array'].shape
assert run['logl'].shape == run['thread_labels'].shape
assert run['theta'].ndim == 2
assert run['logl'].shape[0] == run['theta'].shape[0] | Check nested sampling run member keys and values.
Parameters
----------
run: dict
nested sampling run to check.
Raises
------
AssertionError
if run does not have expected properties. | entailment |
def check_ns_run_logls(run, dup_assert=False, dup_warn=False):
"""Check run logls are unique and in the correct order.
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
Whether to raise and AssertionError if there are duplicate logl values.
dup_warn: bool, optional
Whether to give a UserWarning if there are duplicate logl values (only
used if dup_assert is False).
Raises
------
AssertionError
if run does not have expected properties.
"""
assert np.array_equal(run['logl'], run['logl'][np.argsort(run['logl'])])
if dup_assert or dup_warn:
unique_logls, counts = np.unique(run['logl'], return_counts=True)
repeat_logls = run['logl'].shape[0] - unique_logls.shape[0]
msg = ('{} duplicate logl values (out of a total of {}). This may be '
'caused by limited numerical precision in the output files.'
'\nrepeated logls = {}\ncounts = {}\npositions in list of {}'
' unique logls = {}').format(
repeat_logls, run['logl'].shape[0],
unique_logls[counts != 1], counts[counts != 1],
unique_logls.shape[0], np.where(counts != 1)[0])
if dup_assert:
assert repeat_logls == 0, msg
elif dup_warn:
if repeat_logls != 0:
warnings.warn(msg, UserWarning) | Check run logls are unique and in the correct order.
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
Whether to raise and AssertionError if there are duplicate logl values.
dup_warn: bool, optional
Whether to give a UserWarning if there are duplicate logl values (only
used if dup_assert is False).
Raises
------
AssertionError
if run does not have expected properties. | entailment |
def check_ns_run_threads(run):
"""Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties.
"""
assert run['thread_labels'].dtype == int
uniq_th = np.unique(run['thread_labels'])
assert np.array_equal(
np.asarray(range(run['thread_min_max'].shape[0])), uniq_th), \
str(uniq_th)
# Check thread_min_max
assert np.any(run['thread_min_max'][:, 0] == -np.inf), (
'Run should have at least one thread which starts by sampling the ' +
'whole prior')
for th_lab in uniq_th:
inds = np.where(run['thread_labels'] == th_lab)[0]
th_info = 'thread label={}, first_logl={}, thread_min_max={}'.format(
th_lab, run['logl'][inds[0]], run['thread_min_max'][th_lab, :])
assert run['thread_min_max'][th_lab, 0] <= run['logl'][inds[0]], (
'First point in thread has logl less than thread min logl! ' +
th_info + ', difference={}'.format(
run['logl'][inds[0]] - run['thread_min_max'][th_lab, 0]))
assert run['thread_min_max'][th_lab, 1] == run['logl'][inds[-1]], (
'Last point in thread logl != thread end logl! ' + th_info) | Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties. | entailment |
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0] | r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int | entailment |
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw) | r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | entailment |
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw)) | r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | entailment |
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise | Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float | entailment |
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative) | One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float | entailment |
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2)) | Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float | entailment |
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative) | Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | entailment |
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative) | One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float | entailment |
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise | Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function. | entailment |
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds]) | Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float | entailment |
def write_run_output(run, **kwargs):
"""Writes PolyChord output files corresponding to the input nested sampling
run. The file root is
.. code-block:: python
root = os.path.join(run['output']['base_dir'],
run['output']['file_root'])
Output files which can be made with this function (see the PolyChord
documentation for more information about what each contains):
* [root].stats
* [root].txt
* [root]_equal_weights.txt
* [root]_dead-birth.txt
* [root]_dead.txt
Files produced by PolyChord which are not made by this function:
* [root].resume: for resuming runs part way through (not relevant for a
completed run).
* [root]_phys_live.txt and [root]phys_live-birth.txt: for checking runtime
progress (not relevant for a completed run).
* [root].paramnames: for use with getdist (not needed when calling getdist
from within python).
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
write_dead: bool, optional
Whether or not to write [root]_dead.txt and [root]_dead-birth.txt.
write_stats: bool, optional
Whether or not to write [root].stats.
posteriors: bool, optional
Whether or not to write [root].txt.
equals: bool, optional
Whether or not to write [root]_equal_weights.txt.
stats_means_errs: bool, optional
Whether or not to calculate mean values of :math:`\log \mathcal{Z}` and
each parameter, and their uncertainties.
fmt: str, optional
Formatting for numbers written by np.savetxt. Default value is set to
make output files look like the ones produced by PolyChord.
n_simulate: int, optional
Number of bootstrap replications to use when estimating uncertainty on
evidence and parameter means.
"""
write_dead = kwargs.pop('write_dead', True)
write_stats = kwargs.pop('write_stats', True)
posteriors = kwargs.pop('posteriors', False)
equals = kwargs.pop('equals', False)
stats_means_errs = kwargs.pop('stats_means_errs', True)
fmt = kwargs.pop('fmt', '% .14E')
n_simulate = kwargs.pop('n_simulate', 100)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
mandatory_keys = ['file_root', 'base_dir']
for key in mandatory_keys:
assert key in run['output'], key + ' not in run["output"]'
root = os.path.join(run['output']['base_dir'], run['output']['file_root'])
if write_dead:
samples = run_dead_birth_array(run)
np.savetxt(root + '_dead-birth.txt', samples, fmt=fmt)
np.savetxt(root + '_dead.txt', samples[:, :-1], fmt=fmt)
if equals or posteriors:
w_rel = nestcheck.ns_run_utils.get_w_rel(run)
post_arr = np.zeros((run['theta'].shape[0], run['theta'].shape[1] + 2))
post_arr[:, 0] = w_rel
post_arr[:, 1] = -2 * run['logl']
post_arr[:, 2:] = run['theta']
if posteriors:
np.savetxt(root + '.txt', post_arr, fmt=fmt)
run['output']['nposterior'] = post_arr.shape[0]
else:
run['output']['nposterior'] = 0
if equals:
inds = np.where(w_rel > np.random.random(w_rel.shape[0]))[0]
np.savetxt(root + '_equal_weights.txt', post_arr[inds, 1:],
fmt=fmt)
run['output']['nequals'] = inds.shape[0]
else:
run['output']['nequals'] = 0
if write_stats:
run['output']['ndead'] = run['logl'].shape[0]
if stats_means_errs:
# Get logZ and param estimates and errors
estimators = [e.logz]
for i in range(run['theta'].shape[1]):
estimators.append(functools.partial(e.param_mean, param_ind=i))
values = nestcheck.ns_run_utils.run_estimators(run, estimators)
stds = nestcheck.error_analysis.run_std_bootstrap(
run, estimators, n_simulate=n_simulate)
run['output']['logZ'] = values[0]
run['output']['logZerr'] = stds[0]
run['output']['param_means'] = list(values[1:])
run['output']['param_mean_errs'] = list(stds[1:])
write_stats_file(run['output']) | Writes PolyChord output files corresponding to the input nested sampling
run. The file root is
.. code-block:: python
root = os.path.join(run['output']['base_dir'],
run['output']['file_root'])
Output files which can be made with this function (see the PolyChord
documentation for more information about what each contains):
* [root].stats
* [root].txt
* [root]_equal_weights.txt
* [root]_dead-birth.txt
* [root]_dead.txt
Files produced by PolyChord which are not made by this function:
* [root].resume: for resuming runs part way through (not relevant for a
completed run).
* [root]_phys_live.txt and [root]phys_live-birth.txt: for checking runtime
progress (not relevant for a completed run).
* [root].paramnames: for use with getdist (not needed when calling getdist
from within python).
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
write_dead: bool, optional
Whether or not to write [root]_dead.txt and [root]_dead-birth.txt.
write_stats: bool, optional
Whether or not to write [root].stats.
posteriors: bool, optional
Whether or not to write [root].txt.
equals: bool, optional
Whether or not to write [root]_equal_weights.txt.
stats_means_errs: bool, optional
Whether or not to calculate mean values of :math:`\log \mathcal{Z}` and
each parameter, and their uncertainties.
fmt: str, optional
Formatting for numbers written by np.savetxt. Default value is set to
make output files look like the ones produced by PolyChord.
n_simulate: int, optional
Number of bootstrap replications to use when estimating uncertainty on
evidence and parameter means. | entailment |
def run_dead_birth_array(run, **kwargs):
"""Converts input run into an array of the format of a PolyChord
<root>_dead-birth.txt file. Note that this in fact includes live points
remaining at termination as well as dead points.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
"""
nestcheck.ns_run_utils.check_ns_run(run, **kwargs)
threads = nestcheck.ns_run_utils.get_run_threads(run)
samp_arrays = []
ndim = run['theta'].shape[1]
for th in threads:
samp_arr = np.zeros((th['theta'].shape[0], ndim + 2))
samp_arr[:, :ndim] = th['theta']
samp_arr[:, ndim] = th['logl']
samp_arr[1:, ndim + 1] = th['logl'][:-1]
if th['thread_min_max'][0, 0] == -np.inf:
samp_arr[0, ndim + 1] = -1e30
else:
samp_arr[0, ndim + 1] = th['thread_min_max'][0, 0]
samp_arrays.append(samp_arr)
samples = np.vstack(samp_arrays)
samples = samples[np.argsort(samples[:, ndim]), :]
return samples | Converts input run into an array of the format of a PolyChord
<root>_dead-birth.txt file. Note that this in fact includes live points
remaining at termination as well as dead points.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl | entailment |
def write_stats_file(run_output_dict):
"""Writes a dummy PolyChord format .stats file for tests functions for
processing stats files. This is written to:
base_dir/file_root.stats
Also returns the data in the file as a dict for comparison.
Parameters
----------
run_output_dict: dict
Output information to write to .stats file. Must contain file_root and
base_dir. If other settings are not specified, default values are used.
Returns
-------
output: dict
The expected output of
nestcheck.process_polychord_stats(file_root, base_dir)
"""
mandatory_keys = ['file_root', 'base_dir']
for key in mandatory_keys:
assert key in run_output_dict, key + ' not in run_output_dict'
default_output = {'logZ': 0.0,
'logZerr': 0.0,
'logZs': [0.0],
'logZerrs': [0.0],
'ncluster': 1,
'nposterior': 0,
'nequals': 0,
'ndead': 0,
'nlike': 0,
'nlive': 0,
'avnlike': 0.0,
'avnlikeslice': 0.0,
'param_means': [0.0, 0.0, 0.0],
'param_mean_errs': [0.0, 0.0, 0.0]}
allowed_keys = set(mandatory_keys) | set(default_output.keys())
assert set(run_output_dict.keys()).issubset(allowed_keys), (
'Input dict contains unexpected keys: {}'.format(
set(run_output_dict.keys()) - allowed_keys))
output = copy.deepcopy(run_output_dict)
for key, value in default_output.items():
if key not in output:
output[key] = value
# Make a PolyChord format .stats file corresponding to output
file_lines = [
'Evidence estimates:',
'===================',
(' - The evidence Z is a log-normally distributed, with location and '
'scale parameters mu and sigma.'),
' - We denote this as log(Z) = mu +/- sigma.',
'',
'Global evidence:',
'----------------',
'',
'log(Z) = {0} +/- {1}'.format(
output['logZ'], output['logZerr']),
'',
'',
'Local evidences:',
'----------------',
'']
for i, (lz, lzerr) in enumerate(zip(output['logZs'], output['logZerrs'])):
file_lines.append('log(Z_ {0}) = {1} +/- {2}'.format(
str(i + 1).rjust(2), lz, lzerr))
file_lines += [
'',
'',
'Run-time information:',
'---------------------',
'',
' ncluster: 0 / 1',
' nposterior: {0}'.format(output['nposterior']),
' nequals: {0}'.format(output['nequals']),
' ndead: {0}'.format(output['ndead']),
' nlive: {0}'.format(output['nlive']),
' nlike: {0}'.format(output['nlike']),
' <nlike>: {0} ( {1} per slice )'.format(
output['avnlike'], output['avnlikeslice']),
'',
'',
'Dim No. Mean Sigma']
for i, (mean, meanerr) in enumerate(zip(output['param_means'],
output['param_mean_errs'])):
file_lines.append('{0} {1} +/- {2}'.format(
str(i + 1).ljust(3), mean, meanerr))
file_path = os.path.join(output['base_dir'],
output['file_root'] + '.stats')
with open(file_path, 'w') as stats_file:
stats_file.writelines('{}\n'.format(line) for line in file_lines)
return output | Writes a dummy PolyChord format .stats file for tests functions for
processing stats files. This is written to:
base_dir/file_root.stats
Also returns the data in the file as a dict for comparison.
Parameters
----------
run_output_dict: dict
Output information to write to .stats file. Must contain file_root and
base_dir. If other settings are not specified, default values are used.
Returns
-------
output: dict
The expected output of
nestcheck.process_polychord_stats(file_root, base_dir) | entailment |
def run_list_error_values(run_list, estimator_list, estimator_names,
n_simulate=100, **kwargs):
"""Gets a data frame with calculation values and error diagnostics for each
run in the input run list.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int, optional
Number of bootstrap replications to use on each run.
thread_pvalue: bool, optional
Whether or not to compute KS test diaganostic for correlations between
threads within a run.
bs_stat_dist: bool, optional
Whether or not to compute statistical distance between bootstrap error
distributions diaganostic.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs (or pairs of runs for pairwise comparisons).
Columns have titles given by estimator_names and show results for the
different functions in estimators_list.
"""
thread_pvalue = kwargs.pop('thread_pvalue', False)
bs_stat_dist = kwargs.pop('bs_stat_dist', False)
parallel = kwargs.pop('parallel', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
# Calculation results
# -------------------
df = estimator_values_df(run_list, estimator_list, parallel=parallel,
estimator_names=estimator_names)
df.index = df.index.map(str)
df['calculation type'] = 'values'
df.set_index('calculation type', drop=True, append=True, inplace=True)
df = df.reorder_levels(['calculation type', 'run'])
# Bootstrap stds
# --------------
# Create bs_vals_df then convert to stds so bs_vals_df does not need to be
# recomputed if bs_stat_dist is True
bs_vals_df = bs_values_df(run_list, estimator_list, estimator_names,
n_simulate, parallel=parallel)
bs_std_df = bs_vals_df.applymap(lambda x: np.std(x, ddof=1))
bs_std_df.index.name = 'run'
bs_std_df['calculation type'] = 'bootstrap std'
bs_std_df.set_index('calculation type', drop=True, append=True,
inplace=True)
bs_std_df = bs_std_df.reorder_levels(['calculation type', 'run'])
df = pd.concat([df, bs_std_df])
# Pairwise KS p-values on threads
# -------------------------------
if thread_pvalue:
t_vals_df = thread_values_df(
run_list, estimator_list, estimator_names, parallel=parallel)
t_d_df = pairwise_dists_on_cols(t_vals_df, earth_mover_dist=False,
energy_dist=False)
# Keep only the p value not the distance measures
t_d_df = t_d_df.xs('ks pvalue', level='calculation type',
drop_level=False)
# Append 'thread ' to caclulcation type
t_d_df.index.set_levels(['thread ks pvalue'], level='calculation type',
inplace=True)
df = pd.concat([df, t_d_df])
# Pairwise distances on BS distributions
# --------------------------------------
if bs_stat_dist:
b_d_df = pairwise_dists_on_cols(bs_vals_df)
# Select only statistical distances - not KS pvalue as this is not
# useful for the bootstrap resample distributions (see Higson et al.
# 2019 for more details).
dists = ['ks distance', 'earth mover distance', 'energy distance']
b_d_df = b_d_df.loc[pd.IndexSlice[dists, :], :]
# Append 'bootstrap ' to caclulcation type
new_ind = ['bootstrap ' +
b_d_df.index.get_level_values('calculation type'),
b_d_df.index.get_level_values('run')]
b_d_df.set_index(new_ind, inplace=True)
df = pd.concat([df, b_d_df])
return df | Gets a data frame with calculation values and error diagnostics for each
run in the input run list.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int, optional
Number of bootstrap replications to use on each run.
thread_pvalue: bool, optional
Whether or not to compute KS test diaganostic for correlations between
threads within a run.
bs_stat_dist: bool, optional
Whether or not to compute statistical distance between bootstrap error
distributions diaganostic.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs (or pairs of runs for pairwise comparisons).
Columns have titles given by estimator_names and show results for the
different functions in estimators_list. | entailment |
def estimator_values_df(run_list, estimator_list, **kwargs):
"""Get a dataframe of estimator values.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs, optional
Name of each func in estimator_list.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs.
Columns have titles given by estimator_names and show results for the
different functions in estimators_list.
"""
estimator_names = kwargs.pop(
'estimator_names',
['est_' + str(i) for i in range(len(estimator_list))])
parallel = kwargs.pop('parallel', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
values_list = pu.parallel_apply(
nestcheck.ns_run_utils.run_estimators, run_list,
func_args=(estimator_list,), parallel=parallel)
df = pd.DataFrame(np.stack(values_list, axis=0))
df.columns = estimator_names
df.index.name = 'run'
return df | Get a dataframe of estimator values.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs, optional
Name of each func in estimator_list.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs.
Columns have titles given by estimator_names and show results for the
different functions in estimators_list. | entailment |
def error_values_summary(error_values, **summary_df_kwargs):
"""Get summary statistics about calculation errors, including estimated
implementation errors.
Parameters
----------
error_values: pandas DataFrame
Of format output by run_list_error_values (look at it for more
details).
summary_df_kwargs: dict, optional
See pandas_functions.summary_df docstring for more details.
Returns
-------
df: pandas DataFrame
Table showing means and standard deviations of results and diagnostics
for the different runs. Also contains estimated numerical uncertainties
on results.
"""
df = pf.summary_df_from_multi(error_values, **summary_df_kwargs)
# get implementation stds
imp_std, imp_std_unc, imp_frac, imp_frac_unc = \
nestcheck.error_analysis.implementation_std(
df.loc[('values std', 'value')],
df.loc[('values std', 'uncertainty')],
df.loc[('bootstrap std mean', 'value')],
df.loc[('bootstrap std mean', 'uncertainty')])
df.loc[('implementation std', 'value'), df.columns] = imp_std
df.loc[('implementation std', 'uncertainty'), df.columns] = imp_std_unc
df.loc[('implementation std frac', 'value'), :] = imp_frac
df.loc[('implementation std frac', 'uncertainty'), :] = imp_frac_unc
# Get implementation RMSEs (calculated using the values RMSE instead of
# values std)
if 'values rmse' in set(df.index.get_level_values('calculation type')):
imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc = \
nestcheck.error_analysis.implementation_std(
df.loc[('values rmse', 'value')],
df.loc[('values rmse', 'uncertainty')],
df.loc[('bootstrap std mean', 'value')],
df.loc[('bootstrap std mean', 'uncertainty')])
df.loc[('implementation rmse', 'value'), df.columns] = imp_rmse
df.loc[('implementation rmse', 'uncertainty'), df.columns] = \
imp_rmse_unc
df.loc[('implementation rmse frac', 'value'), :] = imp_frac
df.loc[('implementation rmse frac', 'uncertainty'), :] = imp_frac_unc
# Return only the calculation types we are interested in, in order
calcs_to_keep = ['true values', 'values mean', 'values std',
'values rmse', 'bootstrap std mean',
'implementation std', 'implementation std frac',
'implementation rmse', 'implementation rmse frac',
'thread ks pvalue mean', 'bootstrap ks distance mean',
'bootstrap energy distance mean',
'bootstrap earth mover distance mean']
df = pd.concat([df.xs(calc, level='calculation type', drop_level=False) for
calc in calcs_to_keep if calc in
df.index.get_level_values('calculation type')])
return df | Get summary statistics about calculation errors, including estimated
implementation errors.
Parameters
----------
error_values: pandas DataFrame
Of format output by run_list_error_values (look at it for more
details).
summary_df_kwargs: dict, optional
See pandas_functions.summary_df docstring for more details.
Returns
-------
df: pandas DataFrame
Table showing means and standard deviations of results and diagnostics
for the different runs. Also contains estimated numerical uncertainties
on results. | entailment |
def run_list_error_summary(run_list, estimator_list, estimator_names,
n_simulate, **kwargs):
"""Wrapper which runs run_list_error_values then applies error_values
summary to the resulting dataframe. See the docstrings for those two
funcions for more details and for descriptions of parameters and output.
"""
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
error_values = run_list_error_values(run_list, estimator_list,
estimator_names, n_simulate, **kwargs)
return error_values_summary(error_values, true_values=true_values,
include_true_values=include_true_values,
include_rmse=include_rmse) | Wrapper which runs run_list_error_values then applies error_values
summary to the resulting dataframe. See the docstrings for those two
funcions for more details and for descriptions of parameters and output. | entailment |
def bs_values_df(run_list, estimator_list, estimator_names, n_simulate,
**kwargs):
"""Computes a data frame of bootstrap resampled values.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int
Number of bootstrap replications to use on each run.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
bs_values_df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d array of bootstrap resampled values for the run
and estimator.
"""
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'bs values'})
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
bs_values_list = pu.parallel_apply(
nestcheck.error_analysis.run_bootstrap_values, run_list,
func_args=(estimator_list,), func_kwargs={'n_simulate': n_simulate},
tqdm_kwargs=tqdm_kwargs, **kwargs)
df = pd.DataFrame()
for i, name in enumerate(estimator_names):
df[name] = [arr[i, :] for arr in bs_values_list]
# Check there are the correct number of bootstrap replications in each cell
for vals_shape in df.loc[0].apply(lambda x: x.shape).values:
assert vals_shape == (n_simulate,), (
'Should be n_simulate=' + str(n_simulate) + ' values in ' +
'each cell. The cell contains array with shape ' +
str(vals_shape))
return df | Computes a data frame of bootstrap resampled values.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int
Number of bootstrap replications to use on each run.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
bs_values_df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d array of bootstrap resampled values for the run
and estimator. | entailment |
def thread_values_df(run_list, estimator_list, estimator_names, **kwargs):
"""Calculates estimator values for the constituent threads of the input
runs.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d numpy array with length equal to the number
of threads in the run, containing the results from evaluating the
estimator on each thread.
"""
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'thread values'})
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
# get thread results
thread_vals_arrays = pu.parallel_apply(
nestcheck.error_analysis.run_thread_values, run_list,
func_args=(estimator_list,), tqdm_kwargs=tqdm_kwargs, **kwargs)
df = pd.DataFrame()
for i, name in enumerate(estimator_names):
df[name] = [arr[i, :] for arr in thread_vals_arrays]
# Check there are the correct number of thread values in each cell
for vals_shape in df.loc[0].apply(lambda x: x.shape).values:
assert vals_shape == (run_list[0]['thread_min_max'].shape[0],), \
('Should be nlive=' + str(run_list[0]['thread_min_max'].shape[0]) +
' values in each cell. The cell contains array with shape ' +
str(vals_shape))
return df | Calculates estimator values for the constituent threads of the input
runs.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d numpy array with length equal to the number
of threads in the run, containing the results from evaluating the
estimator on each thread. | entailment |
def pairwise_dists_on_cols(df_in, earth_mover_dist=True, energy_dist=True):
"""Computes pairwise statistical distance measures.
parameters
----------
df_in: pandas data frame
Columns represent estimators and rows represent runs.
Each data frane element is an array of values which are used as samples
in the distance measures.
earth_mover_dist: bool, optional
Passed to error_analysis.pairwise_distances.
energy_dist: bool, optional
Passed to error_analysis.pairwise_distances.
returns
-------
df: pandas data frame with kl values for each pair.
"""
df = pd.DataFrame()
for col in df_in.columns:
df[col] = nestcheck.error_analysis.pairwise_distances(
df_in[col].values, earth_mover_dist=earth_mover_dist,
energy_dist=energy_dist)
return df | Computes pairwise statistical distance measures.
parameters
----------
df_in: pandas data frame
Columns represent estimators and rows represent runs.
Each data frane element is an array of values which are used as samples
in the distance measures.
earth_mover_dist: bool, optional
Passed to error_analysis.pairwise_distances.
energy_dist: bool, optional
Passed to error_analysis.pairwise_distances.
returns
-------
df: pandas data frame with kl values for each pair. | entailment |
def _backtick_columns(cols):
"""
Quote the column names
"""
def bt(s):
b = '' if s == '*' or not s else '`'
return [_ for _ in [b + (s or '') + b] if _]
formatted = []
for c in cols:
if c[0] == '#':
formatted.append(c[1:])
elif c.startswith('(') and c.endswith(')'):
# WHERE (column_a, column_b) IN ((1,10), (1,20))
formatted.append(c)
else:
# backtick the former part when it meets the first dot, and then all the rest
formatted.append('.'.join(bt(c.split('.')[0]) + bt('.'.join(c.split('.')[1:]))))
return ', '.join(formatted) | Quote the column names | entailment |
def _value_parser(self, value, columnname=False, placeholder='%s'):
"""
Input: {'c1': 'v', 'c2': None, '#c3': 'uuid()'}
Output:
('%s, %s, uuid()', [None, 'v']) # insert; columnname=False
('`c2` = %s, `c1` = %s, `c3` = uuid()', [None, 'v']) # update; columnname=True
No need to transform NULL value since it's supported in execute()
"""
if not isinstance(value, dict):
raise TypeError('Input value should be a dictionary')
q = []
a = []
for k, v in value.items():
if k[0] == '#': # if is sql function
q.append(' = '.join([self._backtick(k[1:]), str(v)]) if columnname else v)
else:
q.append(' = '.join([self._backtick(k), placeholder]) if columnname else placeholder)
a.append(v)
return ', '.join(q), tuple(a) | Input: {'c1': 'v', 'c2': None, '#c3': 'uuid()'}
Output:
('%s, %s, uuid()', [None, 'v']) # insert; columnname=False
('`c2` = %s, `c1` = %s, `c3` = uuid()', [None, 'v']) # update; columnname=True
No need to transform NULL value since it's supported in execute() | entailment |
def _by_columns(self, columns):
"""
Allow select.group and select.order accepting string and list
"""
return columns if self.isstr(columns) else self._backtick_columns(columns) | Allow select.group and select.order accepting string and list | entailment |
def select(self, table, columns=None, join=None, where=None, group=None, having=None, order=None, limit=None,
iterator=False, fetch=True):
"""
:type table: string
:type columns: list
:type join: dict
:param join: {'[>]table1(t1)': {'user.id': 't1.user_id'}} -> "LEFT JOIN table AS t1 ON user.id = t1.user_id"
:type where: dict
:type group: string|list
:type having: string
:type order: string|list
:type limit: int|list
# TODO: change to offset
:param limit: The max row number for this query.
If it contains offset, limit must be a list like [offset, limit]
:param iterator: Whether to output the result in a generator. It always returns generator if the cursor is
SSCursor or SSDictCursor, no matter iterator is True or False.
:type fetch: bool
"""
if not columns:
columns = ['*']
where_q, _args = self._where_parser(where)
# TODO: support multiple table
_sql = ''.join(['SELECT ', self._backtick_columns(columns),
' FROM ', self._tablename_parser(table)['formatted_tablename'],
self._join_parser(join),
where_q,
(' GROUP BY ' + self._by_columns(group)) if group else '',
(' HAVING ' + having) if having else '',
(' ORDER BY ' + self._by_columns(order)) if order else '',
self._limit_parser(limit), ';'])
if self.debug:
return self.cur.mogrify(_sql, _args)
execute_result = self.cur.execute(_sql, _args)
if not fetch:
return execute_result
if self.cursorclass in (pymysql.cursors.SSCursor, pymysql.cursors.SSDictCursor):
return self.cur
if iterator:
return self._yield_result()
return self.cur.fetchall() | :type table: string
:type columns: list
:type join: dict
:param join: {'[>]table1(t1)': {'user.id': 't1.user_id'}} -> "LEFT JOIN table AS t1 ON user.id = t1.user_id"
:type where: dict
:type group: string|list
:type having: string
:type order: string|list
:type limit: int|list
# TODO: change to offset
:param limit: The max row number for this query.
If it contains offset, limit must be a list like [offset, limit]
:param iterator: Whether to output the result in a generator. It always returns generator if the cursor is
SSCursor or SSDictCursor, no matter iterator is True or False.
:type fetch: bool | entailment |
def select_page(self, limit, offset=0, **kwargs):
"""
:type limit: int
:param limit: The max row number for each page
:type offset: int
:param offset: The starting position of the page
:return:
"""
start = offset
while True:
result = self.select(limit=[start, limit], **kwargs)
start += limit
if result:
yield result
else:
break
if self.debug:
break | :type limit: int
:param limit: The max row number for each page
:type offset: int
:param offset: The starting position of the page
:return: | entailment |
def get(self, table, column, join=None, where=None, insert=False, ifnone=None):
"""
A simplified method of select, for getting the first result in one column only. A common case of using this
method is getting id.
:type table: string
:type column: str
:type join: dict
:type where: dict
:type insert: bool
:param insert: If insert==True, insert the input condition if there's no result and return the id of new row.
:type ifnone: string
:param ifnone: When ifnone is a non-empty string, raise an error if query returns empty result. insert parameter
would not work in this mode.
"""
select_result = self.select(table=table, columns=[column], join=join, where=where, limit=1)
if self.debug:
return select_result
result = select_result[0] if select_result else None
if result:
return result[0 if self.cursorclass is pymysql.cursors.Cursor else column]
if ifnone:
raise ValueError(ifnone)
if insert:
if any([isinstance(d, dict) for d in where.values()]):
raise ValueError("The where parameter in get() doesn't support nested condition with insert==True.")
return self.insert(table=table, value=where)
return None | A simplified method of select, for getting the first result in one column only. A common case of using this
method is getting id.
:type table: string
:type column: str
:type join: dict
:type where: dict
:type insert: bool
:param insert: If insert==True, insert the input condition if there's no result and return the id of new row.
:type ifnone: string
:param ifnone: When ifnone is a non-empty string, raise an error if query returns empty result. insert parameter
would not work in this mode. | entailment |
def insert(self, table, value, ignore=False, commit=True):
"""
Insert a dict into db.
:type table: string
:type value: dict
:type ignore: bool
:type commit: bool
:return: int. The row id of the insert.
"""
value_q, _args = self._value_parser(value, columnname=False)
_sql = ''.join(['INSERT', ' IGNORE' if ignore else '', ' INTO ', self._backtick(table),
' (', self._backtick_columns(value), ') VALUES (', value_q, ');'])
if self.debug:
return self.cur.mogrify(_sql, _args)
self.cur.execute(_sql, _args)
if commit:
self.conn.commit()
return self.cur.lastrowid | Insert a dict into db.
:type table: string
:type value: dict
:type ignore: bool
:type commit: bool
:return: int. The row id of the insert. | entailment |
def upsert(self, table, value, update_columns=None, commit=True):
"""
:type table: string
:type value: dict
:type update_columns: list
:param update_columns: specify the columns which will be updated if record exists
:type commit: bool
"""
if not isinstance(value, dict):
raise TypeError('Input value should be a dictionary')
if not update_columns:
update_columns = value.keys()
value_q, _args = self._value_parser(value, columnname=False)
_sql = ''.join(['INSERT INTO ', self._backtick(table), ' (', self._backtick_columns(value), ') VALUES ',
'(', value_q, ') ',
'ON DUPLICATE KEY UPDATE ',
', '.join(['='.join([k, 'VALUES('+k+')']) for k in update_columns]), ';'])
if self.debug:
return self.cur.mogrify(_sql, _args)
self.cur.execute(_sql, _args)
if commit:
self.conn.commit()
return self.cur.lastrowid | :type table: string
:type value: dict
:type update_columns: list
:param update_columns: specify the columns which will be updated if record exists
:type commit: bool | entailment |
def insertmany(self, table, columns, value, ignore=False, commit=True):
"""
Insert multiple records within one query.
:type table: string
:type columns: list
:type value: list|tuple
:param value: Doesn't support MySQL functions
:param value: Example: [(value1_column1, value1_column2,), ]
:type ignore: bool
:type commit: bool
:return: int. The row id of the LAST insert only.
"""
if not isinstance(value, (list, tuple)):
raise TypeError('Input value should be a list or tuple')
# Cannot add semicolon here, otherwise it will not pass the Cursor.executemany validation
_sql = ''.join(['INSERT', ' IGNORE' if ignore else '', ' INTO ', self._backtick(table),
' (', self._backtick_columns(columns), ') VALUES (', ', '.join(['%s'] * len(columns)), ')'])
_args = tuple(value)
# For insertmany, the base queries for executemany and printing are different
_sql_full = ''.join(['INSERT', ' IGNORE' if ignore else '', ' INTO ', self._backtick(table),
' (', self._backtick_columns(columns), ') VALUES ',
', '.join([''.join(['(', ', '.join(['%s'] * len(columns)), ')'])] * len(_args)),
';'])
_args_flattened = [item for sublist in _args for item in sublist]
if self.debug:
return self.cur.mogrify(_sql_full, _args_flattened)
self.cur.executemany(_sql, _args)
if commit:
self.conn.commit()
return self.cur.lastrowid | Insert multiple records within one query.
:type table: string
:type columns: list
:type value: list|tuple
:param value: Doesn't support MySQL functions
:param value: Example: [(value1_column1, value1_column2,), ]
:type ignore: bool
:type commit: bool
:return: int. The row id of the LAST insert only. | entailment |
def update(self, table, value, where, join=None, commit=True):
"""
:type table: string
:type value: dict
:type where: dict
:type join: dict
:type commit: bool
"""
value_q, _value_args = self._value_parser(value, columnname=True)
where_q, _where_args = self._where_parser(where)
_sql = ''.join(['UPDATE ', self._tablename_parser(table)['formatted_tablename'],
self._join_parser(join),
' SET ', value_q, where_q, ';'])
_args = _value_args + _where_args
if self.debug:
return self.cur.mogrify(_sql, _args)
result = self.cur.execute(_sql, _args)
if commit:
self.commit()
return result | :type table: string
:type value: dict
:type where: dict
:type join: dict
:type commit: bool | entailment |
def delete(self, table, where=None, commit=True):
"""
:type table: string
:type where: dict
:type commit: bool
"""
where_q, _args = self._where_parser(where)
alias = self._tablename_parser(table)['alias']
_sql = ''.join(['DELETE ',
alias + ' ' if alias else '',
'FROM ', self._tablename_parser(table)['formatted_tablename'], where_q, ';'])
if self.debug:
return self.cur.mogrify(_sql, _args)
result = self.cur.execute(_sql, _args)
if commit:
self.commit()
return result | :type table: string
:type where: dict
:type commit: bool | entailment |
def get_whitespace(txt):
"""
Returns a list containing the whitespace to the left and
right of a string as its two elements
"""
# if the entire parameter is whitespace
rall = re.search(r'^([\s])+$', txt)
if rall:
tmp = txt.split('\n', 1)
if len(tmp) == 2:
return (tmp[0], '\n' + tmp[1]) # left, right
else:
return ('', tmp[0]) # left, right
left = ''
# find whitespace to the left of the parameter
rlm = re.search(r'^([\s])+', txt)
if rlm:
left = rlm.group(0)
right = ''
# find whitespace to the right of the parameter
rrm = re.search(r'([\s])+$', txt)
if rrm:
right = rrm.group(0)
return (left, right) | Returns a list containing the whitespace to the left and
right of a string as its two elements | entailment |
def find_whitespace_pattern(self):
"""
Try to find a whitespace pattern in the existing parameters
to be applied to a newly added parameter
"""
name_ws = []
value_ws = []
for entry in self._entries:
name_ws.append(get_whitespace(entry.name))
if entry.value != '':
value_ws.append(get_whitespace(entry._value)) # _value is unstripped
if len(value_ws) >= 1:
value_ws = most_common(value_ws)
else:
value_ws = ('', ' ')
if len(name_ws) >= 1:
name_ws = most_common(name_ws)
else:
name_ws = (' ', '')
return name_ws, value_ws | Try to find a whitespace pattern in the existing parameters
to be applied to a newly added parameter | entailment |
def _path_for_file(self, project_name, date):
"""
Generate the path on disk for a specified project and date.
:param project_name: the PyPI project name for the data
:type project: str
:param date: the date for the data
:type date: datetime.datetime
:return: path for where to store this data on disk
:rtype: str
"""
return os.path.join(
self.cache_path,
'%s_%s.json' % (project_name, date.strftime('%Y%m%d'))
) | Generate the path on disk for a specified project and date.
:param project_name: the PyPI project name for the data
:type project: str
:param date: the date for the data
:type date: datetime.datetime
:return: path for where to store this data on disk
:rtype: str | entailment |
def get(self, project, date):
"""
Get the cache data for a specified project for the specified date.
Returns None if the data cannot be found in the cache.
:param project: PyPi project name to get data for
:type project: str
:param date: date to get data for
:type date: datetime.datetime
:return: dict of per-date data for project
:rtype: :py:obj:`dict` or ``None``
"""
fpath = self._path_for_file(project, date)
logger.debug('Cache GET project=%s date=%s - path=%s',
project, date.strftime('%Y-%m-%d'), fpath)
try:
with open(fpath, 'r') as fh:
data = json.loads(fh.read())
except:
logger.debug('Error getting from cache for project=%s date=%s',
project, date.strftime('%Y-%m-%d'))
return None
data['cache_metadata']['date'] = datetime.strptime(
data['cache_metadata']['date'],
'%Y%m%d'
)
data['cache_metadata']['updated'] = datetime.fromtimestamp(
data['cache_metadata']['updated']
)
return data | Get the cache data for a specified project for the specified date.
Returns None if the data cannot be found in the cache.
:param project: PyPi project name to get data for
:type project: str
:param date: date to get data for
:type date: datetime.datetime
:return: dict of per-date data for project
:rtype: :py:obj:`dict` or ``None`` | entailment |
def set(self, project, date, data, data_ts):
"""
Set the cache data for a specified project for the specified date.
:param project: project name to set data for
:type project: str
:param date: date to set data for
:type date: datetime.datetime
:param data: data to cache
:type data: dict
:param data_ts: maximum timestamp in the BigQuery data table
:type data_ts: int
"""
data['cache_metadata'] = {
'project': project,
'date': date.strftime('%Y%m%d'),
'updated': time.time(),
'version': VERSION,
'data_ts': data_ts
}
fpath = self._path_for_file(project, date)
logger.debug('Cache SET project=%s date=%s - path=%s',
project, date.strftime('%Y-%m-%d'), fpath)
with open(fpath, 'w') as fh:
fh.write(json.dumps(data)) | Set the cache data for a specified project for the specified date.
:param project: project name to set data for
:type project: str
:param date: date to set data for
:type date: datetime.datetime
:param data: data to cache
:type data: dict
:param data_ts: maximum timestamp in the BigQuery data table
:type data_ts: int | entailment |
def get_dates_for_project(self, project):
"""
Return a list of the dates we have in cache for the specified project,
sorted in ascending date order.
:param project: project name
:type project: str
:return: list of datetime.datetime objects
:rtype: datetime.datetime
"""
file_re = re.compile(r'^%s_([0-9]{8})\.json$' % project)
all_dates = []
for f in os.listdir(self.cache_path):
if not os.path.isfile(os.path.join(self.cache_path, f)):
continue
m = file_re.match(f)
if m is None:
continue
all_dates.append(datetime.strptime(m.group(1), '%Y%m%d'))
return sorted(all_dates) | Return a list of the dates we have in cache for the specified project,
sorted in ascending date order.
:param project: project name
:type project: str
:return: list of datetime.datetime objects
:rtype: datetime.datetime | entailment |
def parse_args(argv):
"""
Use Argparse to parse command-line arguments.
:param argv: list of arguments to parse (``sys.argv[1:]``)
:type argv: ``list``
:return: parsed arguments
:rtype: :py:class:`argparse.Namespace`
"""
p = argparse.ArgumentParser(
description='pypi-download-stats - Calculate detailed download stats '
'and generate HTML and badges for PyPI packages - '
'<%s>' % PROJECT_URL,
prog='pypi-download-stats'
)
p.add_argument('-V', '--version', action='version',
version='%(prog)s ' + VERSION)
p.add_argument('-v', '--verbose', dest='verbose', action='count',
default=0,
help='verbose output. specify twice for debug-level output.')
m = p.add_mutually_exclusive_group()
m.add_argument('-Q', '--no-query', dest='query', action='store_false',
default=True, help='do not query; just generate output '
'from cached data')
m.add_argument('-G', '--no-generate', dest='generate', action='store_false',
default=True, help='do not generate output; just query '
'data and cache results')
p.add_argument('-o', '--out-dir', dest='out_dir', action='store', type=str,
default='./pypi-stats', help='output directory (default: '
'./pypi-stats')
p.add_argument('-p', '--project-id', dest='project_id', action='store',
type=str, default=None,
help='ProjectID for your Google Cloud user, if not using '
'service account credentials JSON file')
# @TODO this is tied to the DiskDataCache class
p.add_argument('-c', '--cache-dir', dest='cache_dir', action='store',
type=str, default='./pypi-stats-cache',
help='stats cache directory (default: ./pypi-stats-cache)')
p.add_argument('-B', '--backfill-num-days', dest='backfill_days', type=int,
action='store', default=7,
help='number of days of historical data to backfill, if '
'missing (defaut: 7). Note this may incur BigQuery '
'charges. Set to -1 to backfill all available history.')
g = p.add_mutually_exclusive_group()
g.add_argument('-P', '--project', dest='PROJECT', action='append', type=str,
help='project name to query/generate stats for (can be '
'specified more than once; '
'this will reduce query cost for multiple projects)')
g.add_argument('-U', '--user', dest='user', action='store', type=str,
help='Run for all PyPI projects owned by the specified'
'user.')
args = p.parse_args(argv)
return args | Use Argparse to parse command-line arguments.
:param argv: list of arguments to parse (``sys.argv[1:]``)
:type argv: ``list``
:return: parsed arguments
:rtype: :py:class:`argparse.Namespace` | entailment |
def set_log_level_format(level, format):
"""
Set logger level and format.
:param level: logging level; see the :py:mod:`logging` constants.
:type level: int
:param format: logging formatter format string
:type format: str
"""
formatter = logging.Formatter(fmt=format)
logger.handlers[0].setFormatter(formatter)
logger.setLevel(level) | Set logger level and format.
:param level: logging level; see the :py:mod:`logging` constants.
:type level: int
:param format: logging formatter format string
:type format: str | entailment |
def _pypi_get_projects_for_user(username):
"""
Given the username of a PyPI user, return a list of all of the user's
projects from the XMLRPC interface.
See: https://wiki.python.org/moin/PyPIXmlRpc
:param username: PyPI username
:type username: str
:return: list of string project names
:rtype: ``list``
"""
client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
pkgs = client.user_packages(username) # returns [role, package]
return [x[1] for x in pkgs] | Given the username of a PyPI user, return a list of all of the user's
projects from the XMLRPC interface.
See: https://wiki.python.org/moin/PyPIXmlRpc
:param username: PyPI username
:type username: str
:return: list of string project names
:rtype: ``list`` | entailment |
def main(args=None):
"""
Main entry point
"""
# parse args
if args is None:
args = parse_args(sys.argv[1:])
# set logging level
if args.verbose > 1:
set_log_debug()
elif args.verbose == 1:
set_log_info()
outpath = os.path.abspath(os.path.expanduser(args.out_dir))
cachepath = os.path.abspath(os.path.expanduser(args.cache_dir))
cache = DiskDataCache(cache_path=cachepath)
if args.user:
args.PROJECT = _pypi_get_projects_for_user(args.user)
if args.query:
DataQuery(args.project_id, args.PROJECT, cache).run_queries(
backfill_num_days=args.backfill_days)
else:
logger.warning('Query disabled by command-line flag; operating on '
'cached data only.')
if not args.generate:
logger.warning('Output generation disabled by command-line flag; '
'exiting now.')
raise SystemExit(0)
for proj in args.PROJECT:
logger.info('Generating output for: %s', proj)
stats = ProjectStats(proj, cache)
outdir = os.path.join(outpath, proj)
OutputGenerator(proj, stats, outdir).generate() | Main entry point | entailment |
def generate_graph(self):
"""
Generate the graph; return a 2-tuple of strings, script to place in the
head of the HTML document and div content for the graph itself.
:return: 2-tuple (script, div)
:rtype: tuple
"""
logger.debug('Generating graph for %s', self._graph_id)
# tools to use
tools = [
PanTool(),
BoxZoomTool(),
WheelZoomTool(),
SaveTool(),
ResetTool(),
ResizeTool()
]
# generate the stacked area graph
try:
g = Area(
self._data, x='Date', y=self._y_series_names,
title=self._title, stack=True, xlabel='Date',
ylabel='Downloads', tools=tools,
# note the width and height will be set by JavaScript
plot_height=400, plot_width=800,
toolbar_location='above', legend=False
)
except Exception as ex:
logger.error("Error generating %s graph", self._graph_id)
logger.error("Data: %s", self._data)
logger.error("y=%s", self._y_series_names)
raise ex
lines = []
legend_parts = []
# add a line at the top of each Patch (stacked area) for hovertool
for renderer in g.select(GlyphRenderer):
if not isinstance(renderer.glyph, Patches):
continue
series_name = renderer.data_source.data['series'][0]
logger.debug('Adding line for Patches %s (series: %s)', renderer,
series_name)
line = self._line_for_patches(self._data, g, renderer, series_name)
if line is not None:
lines.append(line)
legend_parts.append((series_name, [line]))
# add the Hovertool, specifying only our line glyphs
g.add_tools(
HoverTool(
tooltips=[
(self._y_name, '@SeriesName'),
('Date', '@FmtDate'),
('Downloads', '@Downloads'),
],
renderers=lines,
line_policy='nearest'
)
)
# legend outside chart area
legend = Legend(legends=legend_parts, location=(0, 0))
g.add_layout(legend, 'right')
return components(g) | Generate the graph; return a 2-tuple of strings, script to place in the
head of the HTML document and div content for the graph itself.
:return: 2-tuple (script, div)
:rtype: tuple | entailment |
def _line_for_patches(self, data, chart, renderer, series_name):
"""
Add a line along the top edge of a Patch in a stacked Area Chart; return
the new Glyph for addition to HoverTool.
:param data: original data for the graph
:type data: dict
:param chart: Chart to add the line to
:type chart: bokeh.charts.Chart
:param renderer: GlyphRenderer containing one Patches glyph, to draw
the line for
:type renderer: bokeh.models.renderers.GlyphRenderer
:param series_name: the data series name this Patches represents
:type series_name: str
:return: GlyphRenderer for a Line at the top edge of this Patch
:rtype: bokeh.models.renderers.GlyphRenderer
"""
# @TODO this method needs a major refactor
# get the original x and y values, and color
xvals = deepcopy(renderer.data_source.data['x_values'][0])
yvals = deepcopy(renderer.data_source.data['y_values'][0])
line_color = renderer.glyph.fill_color
# save original values for logging if needed
orig_xvals = [x for x in xvals]
orig_yvals = [y for y in yvals]
# get a list of the values
new_xvals = [x for x in xvals]
new_yvals = [y for y in yvals]
# so when a Patch is made, the first point is (0,0); trash it
xvals = new_xvals[1:]
yvals = new_yvals[1:]
# then, we can tell the last point in the "top" line because it will be
# followed by a point with the same x value and a y value of 0.
last_idx = None
for idx, val in enumerate(xvals):
if yvals[idx+1] == 0 and xvals[idx+1] == xvals[idx]:
last_idx = idx
break
if last_idx is None:
logger.error('Unable to find top line of patch (x_values=%s '
'y_values=%s', orig_xvals, orig_yvals)
return None
# truncate our values to just what makes up the top line
xvals = xvals[:last_idx+1]
yvals = yvals[:last_idx+1]
# Currently (bokeh 0.12.1) HoverTool won't show the tooltip for the last
# point in our line. As a hack for this, add a point with the same Y
# value and an X slightly before it.
lastx = xvals[-1]
xvals[-1] = lastx - 1000 # 1000 nanoseconds
xvals.append(lastx)
yvals.append(yvals[-1])
# get the actual download counts from the original data
download_counts = [
data[series_name][y] for y in range(0, len(yvals) - 1)
]
download_counts.append(download_counts[-1])
# create a ColumnDataSource for the new overlay line
data2 = {
'x': xvals, # Date/x values are numpy.datetime64
'y': yvals,
# the following are hacks for data that we want in the HoverTool
# tooltip
'SeriesName': [series_name for _ in yvals],
# formatted date
'FmtDate': [self.datetime64_to_formatted_date(x) for x in xvals],
# to show the exact value, not where the pointer is
'Downloads': download_counts
}
# set the formatted date for our hacked second-to-last point to the
# same value as the last point
data2['FmtDate'][-2] = data2['FmtDate'][-1]
# create the CloumnDataSource, then the line for it, then the Glyph
line_ds = ColumnDataSource(data2)
line = Line(x='x', y='y', line_color=line_color)
lineglyph = chart.add_glyph(line_ds, line)
return lineglyph | Add a line along the top edge of a Patch in a stacked Area Chart; return
the new Glyph for addition to HoverTool.
:param data: original data for the graph
:type data: dict
:param chart: Chart to add the line to
:type chart: bokeh.charts.Chart
:param renderer: GlyphRenderer containing one Patches glyph, to draw
the line for
:type renderer: bokeh.models.renderers.GlyphRenderer
:param series_name: the data series name this Patches represents
:type series_name: str
:return: GlyphRenderer for a Line at the top edge of this Patch
:rtype: bokeh.models.renderers.GlyphRenderer | entailment |
def _get_cache_dates(self):
"""
Get s list of dates (:py:class:`datetime.datetime`) present in cache,
beginning with the longest contiguous set of dates that isn't missing
more than one date in series.
:return: list of datetime objects for contiguous dates in cache
:rtype: ``list``
"""
all_dates = self.cache.get_dates_for_project(self.project_name)
dates = []
last_date = None
for val in sorted(all_dates):
if last_date is None:
last_date = val
continue
if val - last_date > timedelta(hours=48):
# reset dates to start from here
logger.warning("Last cache date was %s, current date is %s; "
"delta is too large. Starting cache date series "
"at current date.", last_date, val)
dates = []
last_date = val
dates.append(val)
# find the first download record, and only look at dates after that
for idx, cache_date in enumerate(dates):
data = self._cache_get(cache_date)
if not self._is_empty_cache_record(data):
logger.debug("First cache date with data: %s", cache_date)
return dates[idx:]
return dates | Get s list of dates (:py:class:`datetime.datetime`) present in cache,
beginning with the longest contiguous set of dates that isn't missing
more than one date in series.
:return: list of datetime objects for contiguous dates in cache
:rtype: ``list`` | entailment |
def _is_empty_cache_record(self, rec):
"""
Return True if the specified cache record has no data, False otherwise.
:param rec: cache record returned by :py:meth:`~._cache_get`
:type rec: dict
:return: True if record is empty, False otherwise
:rtype: bool
"""
# these are taken from DataQuery.query_one_table()
for k in [
'by_version',
'by_file_type',
'by_installer',
'by_implementation',
'by_system',
'by_distro',
'by_country'
]:
if k in rec and len(rec[k]) > 0:
return False
return True | Return True if the specified cache record has no data, False otherwise.
:param rec: cache record returned by :py:meth:`~._cache_get`
:type rec: dict
:return: True if record is empty, False otherwise
:rtype: bool | entailment |
def _cache_get(self, date):
"""
Return cache data for the specified day; cache locally in this class.
:param date: date to get data for
:type date: datetime.datetime
:return: cache data for date
:rtype: dict
"""
if date in self.cache_data:
logger.debug('Using class-cached data for date %s',
date.strftime('%Y-%m-%d'))
return self.cache_data[date]
logger.debug('Getting data from cache for date %s',
date.strftime('%Y-%m-%d'))
data = self.cache.get(self.project_name, date)
self.cache_data[date] = data
return data | Return cache data for the specified day; cache locally in this class.
:param date: date to get data for
:type date: datetime.datetime
:return: cache data for date
:rtype: dict | entailment |
def _compound_column_value(k1, k2):
"""
Like :py:meth:`~._column_value` but collapses two unknowns into one.
:param k1: first (top-level) value
:param k2: second (bottom-level) value
:return: display key
:rtype: str
"""
k1 = ProjectStats._column_value(k1)
k2 = ProjectStats._column_value(k2)
if k1 == 'unknown' and k2 == 'unknown':
return 'unknown'
return '%s %s' % (k1, k2) | Like :py:meth:`~._column_value` but collapses two unknowns into one.
:param k1: first (top-level) value
:param k2: second (bottom-level) value
:return: display key
:rtype: str | entailment |
def _shorten_version(ver, num_components=2):
"""
If ``ver`` is a dot-separated string with at least (num_components +1)
components, return only the first two. Else return the original string.
:param ver: version string
:type ver: str
:return: shortened (major, minor) version
:rtype: str
"""
parts = ver.split('.')
if len(parts) <= num_components:
return ver
return '.'.join(parts[:num_components]) | If ``ver`` is a dot-separated string with at least (num_components +1)
components, return only the first two. Else return the original string.
:param ver: version string
:type ver: str
:return: shortened (major, minor) version
:rtype: str | entailment |
def per_version_data(self):
"""
Return download data by version.
:return: dict of cache data; keys are datetime objects, values are
dict of version (str) to count (int)
:rtype: dict
"""
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
if len(data['by_version']) == 0:
data['by_version'] = {'other': 0}
ret[cache_date] = data['by_version']
return ret | Return download data by version.
:return: dict of cache data; keys are datetime objects, values are
dict of version (str) to count (int)
:rtype: dict | entailment |
def per_file_type_data(self):
"""
Return download data by file type.
:return: dict of cache data; keys are datetime objects, values are
dict of file type (str) to count (int)
:rtype: dict
"""
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
if len(data['by_file_type']) == 0:
data['by_file_type'] = {'other': 0}
ret[cache_date] = data['by_file_type']
return ret | Return download data by file type.
:return: dict of cache data; keys are datetime objects, values are
dict of file type (str) to count (int)
:rtype: dict | entailment |
def per_installer_data(self):
"""
Return download data by installer name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of installer name/version (str) to count (int).
:rtype: dict
"""
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {}
for inst_name, inst_data in data['by_installer'].items():
for inst_ver, count in inst_data.items():
k = self._compound_column_value(
inst_name,
self._shorten_version(inst_ver)
)
ret[cache_date][k] = count
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret | Return download data by installer name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of installer name/version (str) to count (int).
:rtype: dict | entailment |
def per_implementation_data(self):
"""
Return download data by python impelementation name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of implementation name/version (str) to count (int).
:rtype: dict
"""
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {}
for impl_name, impl_data in data['by_implementation'].items():
for impl_ver, count in impl_data.items():
k = self._compound_column_value(
impl_name,
self._shorten_version(impl_ver)
)
ret[cache_date][k] = count
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret | Return download data by python impelementation name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of implementation name/version (str) to count (int).
:rtype: dict | entailment |
def per_system_data(self):
"""
Return download data by system.
:return: dict of cache data; keys are datetime objects, values are
dict of system (str) to count (int)
:rtype: dict
"""
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {
self._column_value(x): data['by_system'][x]
for x in data['by_system']
}
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret | Return download data by system.
:return: dict of cache data; keys are datetime objects, values are
dict of system (str) to count (int)
:rtype: dict | entailment |
def per_country_data(self):
"""
Return download data by country.
:return: dict of cache data; keys are datetime objects, values are
dict of country (str) to count (int)
:rtype: dict
"""
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {}
for cc, count in data['by_country'].items():
k = '%s (%s)' % (self._alpha2_to_country(cc), cc)
ret[cache_date][k] = count
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret | Return download data by country.
:return: dict of cache data; keys are datetime objects, values are
dict of country (str) to count (int)
:rtype: dict | entailment |
def per_distro_data(self):
"""
Return download data by distro name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of distro name/version (str) to count (int).
:rtype: dict
"""
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {}
for distro_name, distro_data in data['by_distro'].items():
if distro_name.lower() == 'red hat enterprise linux server':
distro_name = 'RHEL'
for distro_ver, count in distro_data.items():
ver = self._shorten_version(distro_ver, num_components=1)
if distro_name.lower() == 'os x':
ver = self._shorten_version(distro_ver,
num_components=2)
k = self._compound_column_value(distro_name, ver)
ret[cache_date][k] = count
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret | Return download data by distro name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of distro name/version (str) to count (int).
:rtype: dict | entailment |
def downloads_per_day(self):
"""
Return the number of downloads per day, averaged over the past 7 days
of data.
:return: average number of downloads per day
:rtype: int
"""
count, num_days = self._downloads_for_num_days(7)
res = ceil(count / num_days)
logger.debug("Downloads per day = (%d / %d) = %d", count, num_days, res)
return res | Return the number of downloads per day, averaged over the past 7 days
of data.
:return: average number of downloads per day
:rtype: int | entailment |
def downloads_per_week(self):
"""
Return the number of downloads in the last 7 days.
:return: number of downloads in the last 7 days; if we have less than
7 days of data, returns None.
:rtype: int
"""
if len(self.cache_dates) < 7:
logger.error("Only have %d days of data; cannot calculate "
"downloads per week", len(self.cache_dates))
return None
count, _ = self._downloads_for_num_days(7)
logger.debug("Downloads per week = %d", count)
return count | Return the number of downloads in the last 7 days.
:return: number of downloads in the last 7 days; if we have less than
7 days of data, returns None.
:rtype: int | entailment |
def _downloads_for_num_days(self, num_days):
"""
Given a number of days of historical data to look at (starting with
today and working backwards), return the total number of downloads
for that time range, and the number of days of data we had (in cases
where we had less data than requested).
:param num_days: number of days of data to look at
:type num_days: int
:return: 2-tuple of (download total, number of days of data)
:rtype: tuple
"""
logger.debug("Getting download total for last %d days", num_days)
dates = self.cache_dates
logger.debug("Cache has %d days of data", len(dates))
if len(dates) > num_days:
dates = dates[(-1 * num_days):]
logger.debug("Looking at last %d days of data", len(dates))
dl_sum = 0
for cache_date in dates:
data = self._cache_get(cache_date)
dl_sum += sum(data['by_version'].values())
logger.debug("Sum of download counts: %d", dl_sum)
return dl_sum, len(dates) | Given a number of days of historical data to look at (starting with
today and working backwards), return the total number of downloads
for that time range, and the number of days of data we had (in cases
where we had less data than requested).
:param num_days: number of days of data to look at
:type num_days: int
:return: 2-tuple of (download total, number of days of data)
:rtype: tuple | entailment |
def _get_project_id(self):
"""
Get our projectId from the ``GOOGLE_APPLICATION_CREDENTIALS`` creds
JSON file.
:return: project ID
:rtype: str
"""
fpath = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', None)
if fpath is None:
raise Exception('ERROR: No project ID specified, and '
'GOOGLE_APPLICATION_CREDENTIALS env var is not set')
fpath = os.path.abspath(os.path.expanduser(fpath))
logger.debug('Reading credentials file at %s to get project_id', fpath)
with open(fpath, 'r') as fh:
cred_data = json.loads(fh.read())
return cred_data['project_id'] | Get our projectId from the ``GOOGLE_APPLICATION_CREDENTIALS`` creds
JSON file.
:return: project ID
:rtype: str | entailment |
def _get_bigquery_service(self):
"""
Connect to the BigQuery service.
Calling ``GoogleCredentials.get_application_default`` requires that
you either be running in the Google Cloud, or have the
``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path
to a credentials JSON file.
:return: authenticated BigQuery service connection object
:rtype: `googleapiclient.discovery.Resource <http://google.github.io/\
google-api-python-client/docs/epy/googleapiclient.discovery.\
Resource-class.html>`_
"""
logger.debug('Getting Google Credentials')
credentials = GoogleCredentials.get_application_default()
logger.debug('Building BigQuery service instance')
bigquery_service = build('bigquery', 'v2', credentials=credentials)
return bigquery_service | Connect to the BigQuery service.
Calling ``GoogleCredentials.get_application_default`` requires that
you either be running in the Google Cloud, or have the
``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path
to a credentials JSON file.
:return: authenticated BigQuery service connection object
:rtype: `googleapiclient.discovery.Resource <http://google.github.io/\
google-api-python-client/docs/epy/googleapiclient.discovery.\
Resource-class.html>`_ | entailment |
def _get_download_table_ids(self):
"""
Get a list of PyPI downloads table (sharded per day) IDs.
:return: list of table names (strings)
:rtype: ``list``
"""
all_table_names = [] # matching per-date table names
logger.info('Querying for all tables in dataset')
tables = self.service.tables()
request = tables.list(projectId=self._PROJECT_ID,
datasetId=self._DATASET_ID)
while request is not None:
response = request.execute()
# if the number of results is evenly divisible by the page size,
# we may end up with a last response that has no 'tables' key,
# and is empty.
if 'tables' not in response:
response['tables'] = []
for table in response['tables']:
if table['type'] != 'TABLE':
logger.debug('Skipping %s (type=%s)',
table['tableReference']['tableId'],
table['type'])
continue
if not self._table_re.match(table['tableReference']['tableId']):
logger.debug('Skipping table with non-matching name: %s',
table['tableReference']['tableId'])
continue
all_table_names.append(table['tableReference']['tableId'])
request = tables.list_next(previous_request=request,
previous_response=response)
return sorted(all_table_names) | Get a list of PyPI downloads table (sharded per day) IDs.
:return: list of table names (strings)
:rtype: ``list`` | entailment |
def _datetime_for_table_name(self, table_name):
"""
Return a :py:class:`datetime.datetime` object for the date of the
data in the specified table name.
:param table_name: name of the table
:type table_name: str
:return: datetime that the table holds data for
:rtype: datetime.datetime
"""
m = self._table_re.match(table_name)
dt = datetime.strptime(m.group(1), '%Y%m%d')
return dt | Return a :py:class:`datetime.datetime` object for the date of the
data in the specified table name.
:param table_name: name of the table
:type table_name: str
:return: datetime that the table holds data for
:rtype: datetime.datetime | entailment |
def _run_query(self, query):
"""
Run one query against BigQuery and return the result.
:param query: the query to run
:type query: str
:return: list of per-row response dicts (key => value)
:rtype: ``list``
"""
query_request = self.service.jobs()
logger.debug('Running query: %s', query)
start = datetime.now()
resp = query_request.query(
projectId=self.project_id, body={'query': query}
).execute()
duration = datetime.now() - start
logger.debug('Query response (in %s): %s', duration, resp)
if not resp['jobComplete']:
logger.error('Error: query reported job not complete!')
if int(resp['totalRows']) == 0:
return []
if int(resp['totalRows']) != len(resp['rows']):
logger.error('Error: query reported %s total rows, but only '
'returned %d', resp['totalRows'], len(resp['rows']))
data = []
fields = [f['name'] for f in resp['schema']['fields']]
for row in resp['rows']:
d = {}
for idx, val in enumerate(row['f']):
d[fields[idx]] = val['v']
data.append(d)
return data | Run one query against BigQuery and return the result.
:param query: the query to run
:type query: str
:return: list of per-row response dicts (key => value)
:rtype: ``list`` | entailment |
def _get_newest_ts_in_table(self, table_name):
"""
Return the timestamp for the newest record in the given table.
:param table_name: name of the table to query
:type table_name: str
:return: timestamp of newest row in table
:rtype: int
"""
logger.debug(
'Querying for newest timestamp in table %s', table_name
)
q = "SELECT TIMESTAMP_TO_SEC(MAX(timestamp)) AS max_ts %s;" % (
self._from_for_table(table_name)
)
res = self._run_query(q)
ts = int(res[0]['max_ts'])
logger.debug('Newest timestamp in table %s: %s', table_name, ts)
return ts | Return the timestamp for the newest record in the given table.
:param table_name: name of the table to query
:type table_name: str
:return: timestamp of newest row in table
:rtype: int | entailment |
def _query_by_installer(self, table_name):
"""
Query for download data broken down by installer, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by installer; keys are project
name, values are a dict of installer names to dicts of installer
version to download count.
:rtype: dict
"""
logger.info('Querying for downloads by installer in table %s',
table_name)
q = "SELECT file.project, details.installer.name, " \
"details.installer.version, COUNT(*) as dl_count " \
"%s " \
"%s " \
"GROUP BY file.project, details.installer.name, " \
"details.installer.version;" % (
self._from_for_table(table_name),
self._where_for_projects
)
res = self._run_query(q)
result = self._dict_for_projects()
# iterate through results
for row in res:
# pointer to the per-project result dict
proj = result[row['file_project']]
# grab the name and version; change None to 'unknown'
iname = row['details_installer_name']
iver = row['details_installer_version']
if iname not in proj:
proj[iname] = {}
if iver not in proj[iname]:
proj[iname][iver] = 0
proj[iname][iver] += int(row['dl_count'])
return result | Query for download data broken down by installer, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by installer; keys are project
name, values are a dict of installer names to dicts of installer
version to download count.
:rtype: dict | entailment |
def _query_by_system(self, table_name):
"""
Query for download data broken down by system, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by system; keys are project name,
values are a dict of system names to download count.
:rtype: dict
"""
logger.info('Querying for downloads by system in table %s',
table_name)
q = "SELECT file.project, details.system.name, COUNT(*) as dl_count " \
"%s " \
"%s " \
"GROUP BY file.project, details.system.name;" % (
self._from_for_table(table_name),
self._where_for_projects
)
res = self._run_query(q)
result = self._dict_for_projects()
for row in res:
system = row['details_system_name']
result[row['file_project']][system] = int(
row['dl_count'])
return result | Query for download data broken down by system, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by system; keys are project name,
values are a dict of system names to download count.
:rtype: dict | entailment |
def _query_by_distro(self, table_name):
"""
Query for download data broken down by OS distribution, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by distro; keys are project name,
values are a dict of distro names to dicts of distro version to
download count.
:rtype: dict
"""
logger.info('Querying for downloads by distro in table %s', table_name)
q = "SELECT file.project, details.distro.name, " \
"details.distro.version, COUNT(*) as dl_count " \
"%s " \
"%s " \
"GROUP BY file.project, details.distro.name, " \
"details.distro.version;" % (
self._from_for_table(table_name),
self._where_for_projects
)
res = self._run_query(q)
result = self._dict_for_projects()
# iterate through results
for row in res:
# pointer to the per-project result dict
proj = result[row['file_project']]
# grab the name and version; change None to 'unknown'
dname = row['details_distro_name']
dver = row['details_distro_version']
if dname not in proj:
proj[dname] = {}
if dver not in proj[dname]:
proj[dname][dver] = 0
proj[dname][dver] += int(row['dl_count'])
return result | Query for download data broken down by OS distribution, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by distro; keys are project name,
values are a dict of distro names to dicts of distro version to
download count.
:rtype: dict | entailment |
def query_one_table(self, table_name):
"""
Run all queries for the given table name (date) and update the cache.
:param table_name: table name to query against
:type table_name: str
"""
table_date = self._datetime_for_table_name(table_name)
logger.info('Running all queries for date table: %s (%s)', table_name,
table_date.strftime('%Y-%m-%d'))
final = self._dict_for_projects()
try:
data_timestamp = self._get_newest_ts_in_table(table_name)
except HttpError as exc:
try:
content = json.loads(exc.content.decode('utf-8'))
if content['error']['message'].startswith('Not found: Table'):
logger.error("Table %s not found; no data for that day",
table_name)
return
except:
pass
raise exc
# data queries
# note - ProjectStats._is_empty_cache_record() needs to know keys
for name, func in {
'by_version': self._query_by_version,
'by_file_type': self._query_by_file_type,
'by_installer': self._query_by_installer,
'by_implementation': self._query_by_implementation,
'by_system': self._query_by_system,
'by_distro': self._query_by_distro,
'by_country': self._query_by_country_code
}.items():
tmp = func(table_name)
for proj_name in tmp:
final[proj_name][name] = tmp[proj_name]
# add to cache
for proj_name in final:
self.cache.set(proj_name, table_date, final[proj_name],
data_timestamp) | Run all queries for the given table name (date) and update the cache.
:param table_name: table name to query against
:type table_name: str | entailment |
def _have_cache_for_date(self, dt):
"""
Return True if we have cached data for all projects for the specified
datetime. Return False otherwise.
:param dt: datetime to find cache for
:type dt: datetime.datetime
:return: True if we have cache for all projects for this date, False
otherwise
:rtype: bool
"""
for p in self.projects:
if self.cache.get(p, dt) is None:
return False
return True | Return True if we have cached data for all projects for the specified
datetime. Return False otherwise.
:param dt: datetime to find cache for
:type dt: datetime.datetime
:return: True if we have cache for all projects for this date, False
otherwise
:rtype: bool | entailment |
def backfill_history(self, num_days, available_table_names):
"""
Backfill historical data for days that are missing.
:param num_days: number of days of historical data to backfill,
if missing
:type num_days: int
:param available_table_names: names of available per-date tables
:type available_table_names: ``list``
"""
if num_days == -1:
# skip the first date, under the assumption that data may be
# incomplete
logger.info('Backfilling all available history')
start_table = available_table_names[1]
else:
logger.info('Backfilling %d days of history', num_days)
start_table = available_table_names[-1 * num_days]
start_date = self._datetime_for_table_name(start_table)
end_table = available_table_names[-3]
end_date = self._datetime_for_table_name(end_table)
logger.debug(
'Backfilling history from %s (%s) to %s (%s)', start_table,
start_date.strftime('%Y-%m-%d'), end_table,
end_date.strftime('%Y-%m-%d')
)
for days in range((end_date - start_date).days + 1):
backfill_dt = start_date + timedelta(days=days)
if self._have_cache_for_date(backfill_dt):
logger.info('Cache present for all projects for %s; skipping',
backfill_dt.strftime('%Y-%m-%d'))
continue
backfill_table = self._table_name_for_datetime(backfill_dt)
logger.info('Backfilling %s (%s)', backfill_table,
backfill_dt.strftime('%Y-%m-%d'))
self.query_one_table(backfill_table) | Backfill historical data for days that are missing.
:param num_days: number of days of historical data to backfill,
if missing
:type num_days: int
:param available_table_names: names of available per-date tables
:type available_table_names: ``list`` | entailment |
def run_queries(self, backfill_num_days=7):
"""
Run the data queries for the specified projects.
:param backfill_num_days: number of days of historical data to backfill,
if missing
:type backfill_num_days: int
"""
available_tables = self._get_download_table_ids()
logger.debug('Found %d available download tables: %s',
len(available_tables), available_tables)
today_table = available_tables[-1]
yesterday_table = available_tables[-2]
self.query_one_table(today_table)
self.query_one_table(yesterday_table)
self.backfill_history(backfill_num_days, available_tables) | Run the data queries for the specified projects.
:param backfill_num_days: number of days of historical data to backfill,
if missing
:type backfill_num_days: int | entailment |
def filter_data_columns(data):
"""
Given a dict of data such as those in :py:class:`~.ProjectStats` attributes,
made up of :py:class:`datetime.datetime` keys and values of dicts of column
keys to counts, return a list of the distinct column keys in sorted order.
:param data: data dict as returned by ProjectStats attributes
:type data: dict
:return: sorted list of distinct keys
:rtype: ``list``
"""
keys = set()
for dt, d in data.items():
for k in d:
keys.add(k)
return sorted([x for x in keys]) | Given a dict of data such as those in :py:class:`~.ProjectStats` attributes,
made up of :py:class:`datetime.datetime` keys and values of dicts of column
keys to counts, return a list of the distinct column keys in sorted order.
:param data: data dict as returned by ProjectStats attributes
:type data: dict
:return: sorted list of distinct keys
:rtype: ``list`` | entailment |
def _generate_html(self):
"""
Generate the HTML for the specified graphs.
:return:
:rtype:
"""
logger.debug('Generating templated HTML')
env = Environment(
loader=PackageLoader('pypi_download_stats', 'templates'),
extensions=['jinja2.ext.loopcontrols'])
env.filters['format_date_long'] = filter_format_date_long
env.filters['format_date_ymd'] = filter_format_date_ymd
env.filters['data_columns'] = filter_data_columns
template = env.get_template('base.html')
logger.debug('Rendering template')
html = template.render(
project=self.project_name,
cache_date=self._stats.as_of_datetime,
user=getuser(),
host=platform_node(),
version=VERSION,
proj_url=PROJECT_URL,
graphs=self._graphs,
graph_keys=self.GRAPH_KEYS,
resources=Resources(mode='inline').render(),
badges=self._badges
)
logger.debug('Template rendered')
return html | Generate the HTML for the specified graphs.
:return:
:rtype: | entailment |
def _data_dict_to_bokeh_chart_data(self, data):
"""
Take a dictionary of data, as returned by the :py:class:`~.ProjectStats`
per_*_data properties, return a 2-tuple of data dict and x labels list
usable by bokeh.charts.
:param data: data dict from :py:class:`~.ProjectStats` property
:type data: dict
:return: 2-tuple of data dict, x labels list
:rtype: tuple
"""
labels = []
# find all the data keys
keys = set()
for date in data:
for k in data[date]:
keys.add(k)
# final output dict
out_data = {}
for k in keys:
out_data[k] = []
# transform the data; deal with sparse data
for data_date, data_dict in sorted(data.items()):
labels.append(data_date)
for k in out_data:
if k in data_dict:
out_data[k].append(data_dict[k])
else:
out_data[k].append(0)
return out_data, labels | Take a dictionary of data, as returned by the :py:class:`~.ProjectStats`
per_*_data properties, return a 2-tuple of data dict and x labels list
usable by bokeh.charts.
:param data: data dict from :py:class:`~.ProjectStats` property
:type data: dict
:return: 2-tuple of data dict, x labels list
:rtype: tuple | entailment |
def _limit_data(self, data):
"""
Find the per-day average of each series in the data over the last 7
days; drop all but the top 10.
:param data: original graph data
:type data: dict
:return: dict containing only the top 10 series, based on average over
the last 7 days.
:rtype: dict
"""
if len(data.keys()) <= 10:
logger.debug("Data has less than 10 keys; not limiting")
return data
# average last 7 days of each series
avgs = {}
for k in data:
if len(data[k]) <= 7:
vals = data[k]
else:
vals = data[k][-7:]
avgs[k] = sum(vals) / len(vals)
# hold state
final_data = {} # final data dict
other = [] # values for dropped/'other' series
count = 0 # iteration counter
# iterate the sorted averages; either drop or keep
for k in sorted(avgs, key=avgs.get, reverse=True):
if count < 10:
final_data[k] = data[k]
logger.debug("Keeping data series %s (average over last 7 "
"days of data: %d", k, avgs[k])
else:
logger.debug("Adding data series %s to 'other' (average over "
"last 7 days of data: %d", k, avgs[k])
other.append(data[k])
count += 1
# sum up the other data and add to final
final_data['other'] = [sum(series) for series in zip(*other)]
return final_data | Find the per-day average of each series in the data over the last 7
days; drop all but the top 10.
:param data: original graph data
:type data: dict
:return: dict containing only the top 10 series, based on average over
the last 7 days.
:rtype: dict | entailment |
def _generate_graph(self, name, title, stats_data, y_name):
"""
Generate a downloads graph; append it to ``self._graphs``.
:param name: HTML name of the graph, also used in ``self.GRAPH_KEYS``
:type name: str
:param title: human-readable title for the graph
:type title: str
:param stats_data: data dict from ``self._stats``
:type stats_data: dict
:param y_name: Y axis metric name
:type y_name: str
"""
logger.debug('Generating chart data for %s graph', name)
orig_data, labels = self._data_dict_to_bokeh_chart_data(stats_data)
data = self._limit_data(orig_data)
logger.debug('Generating %s graph', name)
script, div = FancyAreaGraph(
name, '%s %s' % (self.project_name, title), data, labels,
y_name).generate_graph()
logger.debug('%s graph generated', name)
self._graphs[name] = {
'title': title,
'script': script,
'div': div,
'raw_data': stats_data
} | Generate a downloads graph; append it to ``self._graphs``.
:param name: HTML name of the graph, also used in ``self.GRAPH_KEYS``
:type name: str
:param title: human-readable title for the graph
:type title: str
:param stats_data: data dict from ``self._stats``
:type stats_data: dict
:param y_name: Y axis metric name
:type y_name: str | entailment |
def _generate_badges(self):
"""
Generate download badges. Append them to ``self._badges``.
"""
daycount = self._stats.downloads_per_day
day = self._generate_badge('Downloads', '%d/day' % daycount)
self._badges['per-day'] = day
weekcount = self._stats.downloads_per_week
if weekcount is None:
# we don't have enough data for week (or month)
return
week = self._generate_badge('Downloads', '%d/week' % weekcount)
self._badges['per-week'] = week
monthcount = self._stats.downloads_per_month
if monthcount is None:
# we don't have enough data for month
return
month = self._generate_badge('Downloads', '%d/month' % monthcount)
self._badges['per-month'] = month | Generate download badges. Append them to ``self._badges``. | entailment |
def _generate_badge(self, subject, status):
"""
Generate SVG for one badge via shields.io.
:param subject: subject; left-hand side of badge
:type subject: str
:param status: status; right-hand side of badge
:type status: str
:return: badge SVG
:rtype: str
"""
url = 'https://img.shields.io/badge/%s-%s-brightgreen.svg' \
'?style=flat&maxAge=3600' % (subject, status)
logger.debug("Getting badge for %s => %s (%s)", subject, status, url)
res = requests.get(url)
if res.status_code != 200:
raise Exception("Error: got status %s for shields.io badge: %s",
res.status_code, res.text)
logger.debug('Got %d character response from shields.io', len(res.text))
return res.text | Generate SVG for one badge via shields.io.
:param subject: subject; left-hand side of badge
:type subject: str
:param status: status; right-hand side of badge
:type status: str
:return: badge SVG
:rtype: str | entailment |
def generate(self):
"""
Generate all output types and write to disk.
"""
logger.info('Generating graphs')
self._generate_graph(
'by-version',
'Downloads by Version',
self._stats.per_version_data,
'Version'
)
self._generate_graph(
'by-file-type',
'Downloads by File Type',
self._stats.per_file_type_data,
'File Type'
)
self._generate_graph(
'by-installer',
'Downloads by Installer',
self._stats.per_installer_data,
'Installer'
)
self._generate_graph(
'by-implementation',
'Downloads by Python Implementation/Version',
self._stats.per_implementation_data,
'Implementation/Version'
)
self._generate_graph(
'by-system',
'Downloads by System Type',
self._stats.per_system_data,
'System'
)
self._generate_graph(
'by-country',
'Downloads by Country',
self._stats.per_country_data,
'Country'
)
self._generate_graph(
'by-distro',
'Downloads by Distro',
self._stats.per_distro_data,
'Distro'
)
self._generate_badges()
logger.info('Generating HTML')
html = self._generate_html()
html_path = os.path.join(self.output_dir, 'index.html')
with open(html_path, 'wb') as fh:
fh.write(html.encode('utf-8'))
logger.info('HTML report written to %s', html_path)
logger.info('Writing SVG badges')
for name, svg in self._badges.items():
path = os.path.join(self.output_dir, '%s.svg' % name)
with open(path, 'w') as fh:
fh.write(svg)
logger.info('%s badge written to: %s', name, path) | Generate all output types and write to disk. | entailment |
def datetime_format(desired_format, datetime_instance=None, *args, **kwargs):
"""
Replaces format style phrases (listed in the dt_exps dictionary)
with this datetime instance's information.
.. code :: python
reusables.datetime_format("Hey, it's {month-full} already!")
"Hey, it's March already!"
:param desired_format: string to add datetime details too
:param datetime_instance: datetime.datetime instance, defaults to 'now'
:param args: additional args to pass to str.format
:param kwargs: additional kwargs to pass to str format
:return: formatted string
"""
for strf, exp in datetime_regex.datetime.format.items():
desired_format = exp.sub(strf, desired_format)
if not datetime_instance:
datetime_instance = now()
return datetime_instance.strftime(desired_format.format(*args, **kwargs)) | Replaces format style phrases (listed in the dt_exps dictionary)
with this datetime instance's information.
.. code :: python
reusables.datetime_format("Hey, it's {month-full} already!")
"Hey, it's March already!"
:param desired_format: string to add datetime details too
:param datetime_instance: datetime.datetime instance, defaults to 'now'
:param args: additional args to pass to str.format
:param kwargs: additional kwargs to pass to str format
:return: formatted string | entailment |
def datetime_from_iso(iso_string):
"""
Create a DateTime object from a ISO string
.. code :: python
reusables.datetime_from_iso('2017-03-10T12:56:55.031863')
datetime.datetime(2017, 3, 10, 12, 56, 55, 31863)
:param iso_string: string of an ISO datetime
:return: DateTime object
"""
try:
assert datetime_regex.datetime.datetime.match(iso_string).groups()[0]
except (ValueError, AssertionError, IndexError, AttributeError):
raise TypeError("String is not in ISO format")
try:
return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S") | Create a DateTime object from a ISO string
.. code :: python
reusables.datetime_from_iso('2017-03-10T12:56:55.031863')
datetime.datetime(2017, 3, 10, 12, 56, 55, 31863)
:param iso_string: string of an ISO datetime
:return: DateTime object | entailment |
def now(utc=False, tz=None):
"""
Get a current DateTime object. By default is local.
.. code:: python
reusables.now()
# DateTime(2016, 12, 8, 22, 5, 2, 517000)
reusables.now().format("It's {24-hour}:{min}")
# "It's 22:05"
:param utc: bool, default False, UTC time not local
:param tz: TimeZone as specified by the datetime module
:return: reusables.DateTime
"""
return datetime.datetime.utcnow() if utc else datetime.datetime.now(tz=tz) | Get a current DateTime object. By default is local.
.. code:: python
reusables.now()
# DateTime(2016, 12, 8, 22, 5, 2, 517000)
reusables.now().format("It's {24-hour}:{min}")
# "It's 22:05"
:param utc: bool, default False, UTC time not local
:param tz: TimeZone as specified by the datetime module
:return: reusables.DateTime | entailment |
def run(command, input=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
timeout=None, copy_local_env=False, **kwargs):
"""
Cross platform compatible subprocess with CompletedProcess return.
No formatting or encoding is performed on the output of subprocess, so it's
output will appear the same on each version / interpreter as before.
.. code:: python
reusables.run('echo "hello world!', shell=True)
# CPython 3.6
# CompletedProcess(args='echo "hello world!', returncode=0,
# stdout=b'"hello world!\\r\\n', stderr=b'')
#
# PyPy 5.4 (Python 2.7.10)
# CompletedProcess(args='echo "hello world!', returncode=0L,
# stdout='"hello world!\\r\\n')
Timeout is only usable in Python 3.X, as it was not implemented before then,
a NotImplementedError will be raised if specified on 2.x version of Python.
:param command: command to run, str if shell=True otherwise must be list
:param input: send something `communicate`
:param stdout: PIPE or None
:param stderr: PIPE or None
:param timeout: max time to wait for command to complete
:param copy_local_env: Use all current ENV vars in the subprocess as well
:param kwargs: additional arguments to pass to Popen
:return: CompletedProcess class
"""
if copy_local_env:
# Copy local env first and overwrite with anything manually specified
env = os.environ.copy()
env.update(kwargs.get('env', {}))
else:
env = kwargs.get('env')
if sys.version_info >= (3, 5):
return subprocess.run(command, input=input, stdout=stdout,
stderr=stderr, timeout=timeout, env=env,
**kwargs)
# Created here instead of root level as it should never need to be
# manually created or referenced
class CompletedProcess(object):
"""A backwards compatible near clone of subprocess.CompletedProcess"""
def __init__(self, args, returncode, stdout=None, stderr=None):
self.args = args
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __repr__(self):
args = ['args={0!r}'.format(self.args),
'returncode={0!r}'.format(self.returncode),
'stdout={0!r}'.format(self.stdout) if self.stdout else '',
'stderr={0!r}'.format(self.stderr) if self.stderr else '']
return "{0}({1})".format(type(self).__name__,
', '.join(filter(None, args)))
def check_returncode(self):
if self.returncode:
if python_version < (2, 7):
raise subprocess.CalledProcessError(self.returncode,
self.args)
raise subprocess.CalledProcessError(self.returncode,
self.args,
self.stdout)
proc = subprocess.Popen(command, stdout=stdout, stderr=stderr,
env=env, **kwargs)
if PY3:
out, err = proc.communicate(input=input, timeout=timeout)
else:
if timeout:
raise NotImplementedError("Timeout is only available on Python 3")
out, err = proc.communicate(input=input)
return CompletedProcess(command, proc.returncode, out, err) | Cross platform compatible subprocess with CompletedProcess return.
No formatting or encoding is performed on the output of subprocess, so it's
output will appear the same on each version / interpreter as before.
.. code:: python
reusables.run('echo "hello world!', shell=True)
# CPython 3.6
# CompletedProcess(args='echo "hello world!', returncode=0,
# stdout=b'"hello world!\\r\\n', stderr=b'')
#
# PyPy 5.4 (Python 2.7.10)
# CompletedProcess(args='echo "hello world!', returncode=0L,
# stdout='"hello world!\\r\\n')
Timeout is only usable in Python 3.X, as it was not implemented before then,
a NotImplementedError will be raised if specified on 2.x version of Python.
:param command: command to run, str if shell=True otherwise must be list
:param input: send something `communicate`
:param stdout: PIPE or None
:param stderr: PIPE or None
:param timeout: max time to wait for command to complete
:param copy_local_env: Use all current ENV vars in the subprocess as well
:param kwargs: additional arguments to pass to Popen
:return: CompletedProcess class | entailment |
def run_in_pool(target, iterable, threaded=True, processes=4,
asynchronous=False, target_kwargs=None):
""" Run a set of iterables to a function in a Threaded or MP Pool.
.. code: python
def func(a):
return a + a
reusables.run_in_pool(func, [1,2,3,4,5])
# [1, 4, 9, 16, 25]
:param target: function to run
:param iterable: positional arg to pass to function
:param threaded: Threaded if True multiprocessed if False
:param processes: Number of workers
:param asynchronous: will do map_async if True
:param target_kwargs: Keyword arguments to set on the function as a partial
:return: pool results
"""
my_pool = pool.ThreadPool if threaded else pool.Pool
if target_kwargs:
target = partial(target, **target_kwargs if target_kwargs else None)
p = my_pool(processes)
try:
results = (p.map_async(target, iterable) if asynchronous
else p.map(target, iterable))
finally:
p.close()
p.join()
return results | Run a set of iterables to a function in a Threaded or MP Pool.
.. code: python
def func(a):
return a + a
reusables.run_in_pool(func, [1,2,3,4,5])
# [1, 4, 9, 16, 25]
:param target: function to run
:param iterable: positional arg to pass to function
:param threaded: Threaded if True multiprocessed if False
:param processes: Number of workers
:param asynchronous: will do map_async if True
:param target_kwargs: Keyword arguments to set on the function as a partial
:return: pool results | entailment |
def tree_view(dictionary, level=0, sep="| "):
"""
View a dictionary as a tree.
"""
return "".join(["{0}{1}\n{2}".format(sep * level, k,
tree_view(v, level + 1, sep=sep) if isinstance(v, dict)
else "") for k, v in dictionary.items()]) | View a dictionary as a tree. | entailment |
def to_dict(self, in_dict=None):
"""
Turn the Namespace and sub Namespaces back into a native
python dictionary.
:param in_dict: Do not use, for self recursion
:return: python dictionary of this Namespace
"""
in_dict = in_dict if in_dict else self
out_dict = dict()
for k, v in in_dict.items():
if isinstance(v, Namespace):
v = v.to_dict()
out_dict[k] = v
return out_dict | Turn the Namespace and sub Namespaces back into a native
python dictionary.
:param in_dict: Do not use, for self recursion
:return: python dictionary of this Namespace | entailment |
def list(self, item, default=None, spliter=",", strip=True, mod=None):
""" Return value of key as a list
:param item: key of value to transform
:param mod: function to map against list
:param default: value to return if item does not exist
:param spliter: character to split str on
:param strip: clean the list with the `strip`
:return: list of items
"""
try:
item = self.__getattr__(item)
except AttributeError as err:
if default is not None:
return default
raise err
if strip:
item = item.lstrip("[").rstrip("]")
out = [x.strip() if strip else x for x in item.split(spliter)]
if mod:
return list(map(mod, out))
return out | Return value of key as a list
:param item: key of value to transform
:param mod: function to map against list
:param default: value to return if item does not exist
:param spliter: character to split str on
:param strip: clean the list with the `strip`
:return: list of items | entailment |
def download(url, save_to_file=True, save_dir=".", filename=None,
block_size=64000, overwrite=False, quiet=False):
"""
Download a given URL to either file or memory
:param url: Full url (with protocol) of path to download
:param save_to_file: boolean if it should be saved to file or not
:param save_dir: location of saved file, default is current working dir
:param filename: filename to save as
:param block_size: download chunk size
:param overwrite: overwrite file if it already exists
:param quiet: boolean to turn off logging for function
:return: save location (or content if not saved to file)
"""
if save_to_file:
if not filename:
filename = safe_filename(url.split('/')[-1])
if not filename:
filename = "downloaded_at_{}.file".format(time.time())
save_location = os.path.abspath(os.path.join(save_dir, filename))
if os.path.exists(save_location) and not overwrite:
logger.error("File {0} already exists".format(save_location))
return False
else:
save_location = "memory"
try:
request = urlopen(url)
except ValueError as err:
if not quiet and "unknown url type" in str(err):
logger.error("Please make sure URL is formatted correctly and"
" starts with http:// or other protocol")
raise err
except Exception as err:
if not quiet:
logger.error("Could not download {0} - {1}".format(url, err))
raise err
try:
kb_size = int(request.headers["Content-Length"]) / 1024
except Exception as err:
if not quiet:
logger.debug("Could not determine file size - {0}".format(err))
file_size = "(unknown size)"
else:
file_size = "({0:.1f} {1})".format(*(kb_size, "KB") if kb_size < 9999
else (kb_size / 1024, "MB"))
if not quiet:
logger.info("Downloading {0} {1} to {2}".format(url, file_size,
save_location))
if save_to_file:
with open(save_location, "wb") as f:
while True:
buffer = request.read(block_size)
if not buffer:
break
f.write(buffer)
return save_location
else:
return request.read() | Download a given URL to either file or memory
:param url: Full url (with protocol) of path to download
:param save_to_file: boolean if it should be saved to file or not
:param save_dir: location of saved file, default is current working dir
:param filename: filename to save as
:param block_size: download chunk size
:param overwrite: overwrite file if it already exists
:param quiet: boolean to turn off logging for function
:return: save location (or content if not saved to file) | entailment |
def url_to_ips(url, port=None, ipv6=False, connect_type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP, flags=0):
"""
Provide a list of IP addresses, uses `socket.getaddrinfo`
.. code:: python
reusables.url_to_ips("example.com", ipv6=True)
# ['2606:2800:220:1:248:1893:25c8:1946']
:param url: hostname to resolve to IP addresses
:param port: port to send to getaddrinfo
:param ipv6: Return IPv6 address if True, otherwise IPv4
:param connect_type: defaults to STREAM connection, can be 0 for all
:param proto: defaults to TCP, can be 0 for all
:param flags: additional flags to pass
:return: list of resolved IPs
"""
try:
results = socket.getaddrinfo(url, port,
(socket.AF_INET if not ipv6
else socket.AF_INET6),
connect_type,
proto,
flags)
except socket.gaierror:
logger.exception("Could not resolve hostname")
return []
return list(set([result[-1][0] for result in results])) | Provide a list of IP addresses, uses `socket.getaddrinfo`
.. code:: python
reusables.url_to_ips("example.com", ipv6=True)
# ['2606:2800:220:1:248:1893:25c8:1946']
:param url: hostname to resolve to IP addresses
:param port: port to send to getaddrinfo
:param ipv6: Return IPv6 address if True, otherwise IPv4
:param connect_type: defaults to STREAM connection, can be 0 for all
:param proto: defaults to TCP, can be 0 for all
:param flags: additional flags to pass
:return: list of resolved IPs | entailment |
def ip_to_url(ip_addr):
"""
Resolve a hostname based off an IP address.
This is very limited and will
probably not return any results if it is a shared IP address or an
address with improperly setup DNS records.
.. code:: python
reusables.ip_to_url('93.184.216.34') # example.com
# None
reusables.ip_to_url('8.8.8.8')
# 'google-public-dns-a.google.com'
:param ip_addr: IP address to resolve to hostname
:return: string of hostname or None
"""
try:
return socket.gethostbyaddr(ip_addr)[0]
except (socket.gaierror, socket.herror):
logger.exception("Could not resolve hostname") | Resolve a hostname based off an IP address.
This is very limited and will
probably not return any results if it is a shared IP address or an
address with improperly setup DNS records.
.. code:: python
reusables.ip_to_url('93.184.216.34') # example.com
# None
reusables.ip_to_url('8.8.8.8')
# 'google-public-dns-a.google.com'
:param ip_addr: IP address to resolve to hostname
:return: string of hostname or None | entailment |
def start(self):
"""Create a background thread for httpd and serve 'forever'"""
self._process = threading.Thread(target=self._background_runner)
self._process.start() | Create a background thread for httpd and serve 'forever | entailment |
def get_stream_handler(stream=sys.stderr, level=logging.INFO,
log_format=log_formats.easy_read):
"""
Returns a set up stream handler to add to a logger.
:param stream: which stream to use, defaults to sys.stderr
:param level: logging level to set handler at
:param log_format: formatter to use
:return: stream handler
"""
sh = logging.StreamHandler(stream)
sh.setLevel(level)
sh.setFormatter(logging.Formatter(log_format))
return sh | Returns a set up stream handler to add to a logger.
:param stream: which stream to use, defaults to sys.stderr
:param level: logging level to set handler at
:param log_format: formatter to use
:return: stream handler | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.