Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def sample_multinomial(N, p, size=None):
r
# ensure s is array
s = np.array([1]) if size is None else np.array([size]).flatten()
def take_samples(ps):
# we have to flatten to make apply_along_axis work.
return np.random.multinomial(N, ps, np.prod(s)).flatten()
# should have shape (prod(size)*ps.shape[0], ps.shape[1:])
samples = np.apply_along_axis(take_samples, 0, p)
# should have shape (size, p.shape)
samples = samples.reshape(np.concatenate([s, p.shape]))
# should have shape (p.shape, size)
samples = samples.transpose(np.concatenate(
[np.arange(s.ndim, p.ndim+s.ndim), np.arange(s.ndim)]
))
if size is None:
# get rid of trailing singleton dimension.
samples = samples[...,0]
return samples | [
"\n Draws fixed number of samples N from different\n multinomial distributions (with the same number dice sides).\n\n :param int N: How many samples to draw from each distribution.\n :param np.ndarray p: Probabilities specifying each distribution.\n Sum along axis 0 should be 1.\n :param size: Output shape. ``int`` or tuple of\n ``int``s. If the given shape is,\n e.g., ``(m, n, k)``, then m * n * k samples are drawn\n for each distribution.\n Default is None, in which case a single value\n is returned for each distribution.\n\n :rtype: np.ndarray\n :return: Array of shape ``(p.shape, size)`` or p.shape if\n size is ``None``.\n "
] |
Please provide a description of the function:def outer_product(vec):
r
return (
np.dot(vec[:, np.newaxis], vec[np.newaxis, :])
if len(vec.shape) == 1 else
np.dot(vec, vec.T)
) | [
"\n Returns the outer product of a vector :math:`v`\n with itself, :math:`v v^\\T`.\n "
] |
Please provide a description of the function:def particle_meanfn(weights, locations, fn=None):
r
warnings.warn('particle_meanfn is deprecated, please use distributions.ParticleDistribution',
DeprecationWarning)
fn_vals = fn(locations) if fn is not None else locations
return np.sum(weights * fn_vals.transpose([1, 0]),
axis=1) | [
"\n Returns the mean of a function :math:`f` over model\n parameters.\n\n :param numpy.ndarray weights: Weights of each particle.\n :param numpy.ndarray locations: Locations of each\n particle.\n :param callable fn: Function of model parameters to\n take the mean of. If `None`, the identity function\n is assumed.\n "
] |
Please provide a description of the function:def particle_covariance_mtx(weights,locations):
# TODO: add shapes to docstring.
warnings.warn('particle_covariance_mtx is deprecated, please use distributions.ParticleDistribution',
DeprecationWarning)
# Find the mean model vector, shape (n_modelparams, ).
mu = particle_meanfn(weights, locations)
# Transpose the particle locations to have shape
# (n_modelparams, n_particles).
xs = locations.transpose([1, 0])
# Give a shorter name to the particle weights, shape (n_particles, ).
ws = weights
cov = (
# This sum is a reduction over the particle index, chosen to be
# axis=2. Thus, the sum represents an expectation value over the
# outer product $x . x^T$.
#
# All three factors have the particle index as the rightmost
# index, axis=2. Using the Einstein summation convention (ESC),
# we can reduce over the particle index easily while leaving
# the model parameter index to vary between the two factors
# of xs.
#
# This corresponds to evaluating A_{m,n} = w_{i} x_{m,i} x_{n,i}
# using the ESC, where A_{m,n} is the temporary array created.
np.einsum('i,mi,ni', ws, xs, xs)
# We finish by subracting from the above expectation value
# the outer product $mu . mu^T$.
- np.dot(mu[..., np.newaxis], mu[np.newaxis, ...])
)
# The SMC approximation is not guaranteed to produce a
# positive-semidefinite covariance matrix. If a negative eigenvalue
# is produced, we should warn the caller of this.
assert np.all(np.isfinite(cov))
if not np.all(la.eig(cov)[0] >= 0):
warnings.warn('Numerical error in covariance estimation causing positive semidefinite violation.', ApproximationWarning)
return cov | [
"\n Returns an estimate of the covariance of a distribution\n represented by a given set of SMC particle.\n\n :param weights: An array containing the weights of each\n particle.\n :param location: An array containing the locations of\n each particle.\n :rtype: :class:`numpy.ndarray`, shape\n ``(n_modelparams, n_modelparams)``.\n :returns: An array containing the estimated covariance matrix.\n "
] |
Please provide a description of the function:def ellipsoid_volume(A=None, invA=None):
if invA is None and A is None:
raise ValueError("Must pass either inverse(A) or A.")
if invA is None and A is not None:
invA = la.inv(A)
# Find the unit sphere volume.
# http://en.wikipedia.org/wiki/Unit_sphere#General_area_and_volume_formulas
n = invA.shape[0]
Vn = (np.pi ** (n/2)) / gamma(1 + (n/2))
return Vn * la.det(sqrtm(invA)) | [
"\n Returns the volume of an ellipsoid given either its\n matrix or the inverse of its matrix.\n "
] |
Please provide a description of the function:def mvee(points, tol=0.001):
# This function is a port of the matlab function by
# Nima Moshtagh found here:
# https://www.mathworks.com/matlabcentral/fileexchange/9542-minimum-volume-enclosing-ellipsoid
# with accompanying writup here:
# https://www.researchgate.net/profile/Nima_Moshtagh/publication/254980367_MINIMUM_VOLUME_ENCLOSING_ELLIPSOIDS/links/54aab5260cf25c4c472f487a.pdf
N, d = points.shape
Q = np.zeros([N,d+1])
Q[:,0:d] = points[0:N,0:d]
Q[:,d] = np.ones([1,N])
Q = np.transpose(Q)
points = np.transpose(points)
count = 1
err = 1
u = (1/N) * np.ones(shape = (N,))
while err > tol:
X = np.dot(np.dot(Q, np.diag(u)), np.transpose(Q))
M = np.diag( np.dot(np.dot(np.transpose(Q), la.inv(X)),Q))
jdx = np.argmax(M)
step_size = (M[jdx] - d - 1)/((d+1)*(M[jdx] - 1))
new_u = (1 - step_size)*u
new_u[jdx] = new_u[jdx] + step_size
count = count + 1
err = la.norm(new_u - u)
u = new_u
U = np.diag(u)
c = np.dot(points,u)
A = (1/d) * la.inv(np.dot(np.dot(points,U), np.transpose(points)) - np.outer(c,c) )
return A, np.transpose(c) | [
"\n Returns the minimum-volume enclosing ellipse (MVEE)\n of a set of points, using the Khachiyan algorithm.\n "
] |
Please provide a description of the function:def in_ellipsoid(x, A, c):
if x.ndim == 1:
y = c - x
return np.einsum('j,jl,l', y, np.linalg.inv(A), y) <= 1
else:
y = c[np.newaxis,:] - x
return np.einsum('ij,jl,il->i', y, np.linalg.inv(A), y) <= 1 | [
"\n Determines which of the points ``x`` are in the\n closed ellipsoid with shape matrix ``A`` centered at ``c``.\n For a single point ``x``, this is computed as\n\n .. math::\n (c-x)^T\\cdot A^{-1}\\cdot (c-x) \\leq 1\n\n :param np.ndarray x: Shape ``(n_points, dim)`` or ``n_points``.\n :param np.ndarray A: Shape ``(dim, dim)``, positive definite\n :param np.ndarray c: Shape ``(dim)``\n :return: `bool` or array of bools of length ``n_points``\n "
] |
Please provide a description of the function:def assert_sigfigs_equal(x, y, sigfigs=3):
# determine which power of 10 best describes x
xpow = np.floor(np.log10(x))
# now rescale 1 \leq x < 9
x = x * 10**(- xpow)
# scale y by the same amount
y = y * 10**(- xpow)
# now test if abs(x-y) < 0.5 * 10**(-sigfigs)
assert_almost_equal(x, y, sigfigs) | [
"\n Tests if all elements in x and y\n agree up to a certain number of\n significant figures.\n\n :param np.ndarray x: Array of numbers.\n :param np.ndarray y: Array of numbers you want to\n be equal to ``x``.\n :param int sigfigs: How many significant\n figures you demand that they share.\n Default is 3.\n "
] |
Please provide a description of the function:def format_uncertainty(value, uncertianty, scinotn_break=4):
if uncertianty == 0:
# Return the exact number, without the ± annotation as a fixed point
# number, since all digits matter.
# FIXME: this assumes a precision of 6; need to select that dynamically.
return "{0:f}".format(value)
else:
# Return a string of the form "0.00 \pm 0.01".
mag_unc = int(np.log10(np.abs(uncertianty)))
# Zero should be printed as a single digit; that is, as wide as str "1".
mag_val = int(np.log10(np.abs(value))) if value != 0 else 0
n_digits = max(mag_val - mag_unc, 0)
if abs(mag_val) < abs(mag_unc) and abs(mag_unc) > scinotn_break:
# We're formatting something close to zero, so recale uncertianty
# accordingly.
scale = 10**mag_unc
return r"({{0:0.{0}f}} \pm {{1:0.{0}f}}) \times 10^{{2}}".format(
n_digits
).format(
value / scale,
uncertianty / scale,
mag_unc
)
if abs(mag_val) <= scinotn_break:
return r"{{0:0.{n_digits}f}} \pm {{1:0.{n_digits}f}}".format(n_digits=n_digits).format(value, uncertianty)
else:
scale = 10**mag_val
return r"({{0:0.{0}f}} \pm {{1:0.{0}f}}) \times 10^{{2}}".format(
n_digits
).format(
value / scale,
uncertianty / scale,
mag_val
) | [
"\n Given a value and its uncertianty, format as a LaTeX string\n for pretty-printing.\n\n :param int scinotn_break: How many decimal points to print\n before breaking into scientific notation.\n "
] |
Please provide a description of the function:def compactspace(scale, n):
r
logit = logistic(scale=scale).ppf
compact_xs = np.linspace(0, 1, n + 2)[1:-1]
return logit(compact_xs) | [
"\n Returns points :math:`x` spaced in the open interval\n :math:`(-\\infty, \\infty)` by linearly spacing in the compactified\n coordinate :math:`s(x) = e^{-\\alpha x} / (1 + e^{-\\alpha x})^2`,\n where :math:`\\alpha` is a scale factor.\n "
] |
Please provide a description of the function:def to_simplex(y):
r
n = y.shape[-1]
# z are the stick breaking fractions in [0,1]
z = expit(y - np.log(n - np.arange(1, n+1)))
x = np.empty(y.shape)
x[..., 0] = z[..., 0]
x[..., 1:] = z[..., 1:] * (1 - z[..., :-1]).cumprod(axis=-1)
return x | [
"\n Interprets the last index of ``y`` as stick breaking fractions \n in logit space and returns a non-negative array of \n the same shape where the last dimension always sums to unity.\n \n A unit simplex is a list of non-negative numbers :math:`(x_1,...,x_K)`\n that sum to one, :math:`\\sum_{k=1}^K x_k=1`, for example, the \n probabilities of an K-sided die.\n It is sometimes desireable to parameterize this object with variables \n that are unconstrained and \"decorrelated\".\n To this end, we imagine :math:`\\vec{x}` as a partition of the unit \n stick :math:`[0,1]` with :math:`K-1` break points between \n :math:`K` successive intervals of respective lengths :math:`(x_1,...,x_K)`.\n Instead of storing the interval lengths, we start from the left-most break \n point and iteratively store the breaking fractions, :math:`z_k`, \n of the remaining stick.\n This gives the formula \n :math:`z_k=x_k / (1-\\sum_{k'=1}^{k-1}x_k)` with the convention \n :math:`x_0:=0`, \n which has an inverse formula :math:`x_k = z_k(1-z_{k-1})\\cdots(1-z_1)`.\n Note that :math:`z_K=1` since the last stick is not broken; this is the \n result of the redundant information imposed by :math:`\\sum_{k=1}^K x_k=1`.\n To unbound the parameters :math:`z_k` into the real line, \n we pass through the logit function, \n :math:`\\operatorname{logit}(p)=\\log\\frac{p}{1-p}`, \n to end up with the parameterization \n :math:`y_k=\\operatorname{logit}(z_k)+\\log(K-k)`, with the convention \n :math:`y_K=0`.\n The shift by :math:`\\log(K-k)` is largely asthetic and causes the \n uniform simplex :math:`\\vec{x}=(1/K,1/K,...,1/K)` to be mapped to \n :math:`\\vec{x}=(0,0,...,0)`.\n\n Inverse to :func:`from_simplex`.\n\n :param np.ndarray: Array of logit space stick breaking \n fractions along the last index.\n\n :rtype: ``np.ndarray``\n "
] |
Please provide a description of the function:def from_simplex(x):
r
n = x.shape[-1]
# z are the stick breaking fractions in [0,1]
# the last one is always 1, so don't worry about it
z = np.empty(shape=x.shape)
z[..., 0] = x[..., 0]
z[..., 1:-1] = x[..., 1:-1] / (1 - x[..., :-2].cumsum(axis=-1))
# now z are the logit-transformed breaking fractions
z[..., :-1] = logit(z[..., :-1]) - logit(1 / (n - np.arange(n-1, dtype=np.float)))
# set this to 0 manually to avoid subtracting inf-inf
z[..., -1] = 0
return z | [
"\n Inteprets the last index of x as unit simplices and returns a\n real array of the sampe shape in logit space.\n\n Inverse to :func:`to_simplex` ; see that function for more details.\n\n :param np.ndarray: Array of unit simplices along the last index.\n \n :rtype: ``np.ndarray``\n "
] |
Please provide a description of the function:def join_struct_arrays(arrays):
# taken from http://stackoverflow.com/questions/5355744/numpy-joining-structured-arrays
sizes = np.array([a.itemsize for a in arrays])
offsets = np.r_[0, sizes.cumsum()]
shape = arrays[0].shape
joint = np.empty(shape + (offsets[-1],), dtype=np.uint8)
for a, size, offset in zip(arrays, sizes, offsets):
joint[...,offset:offset+size] = np.atleast_1d(a).view(np.uint8).reshape(shape + (size,))
dtype = sum((a.dtype.descr for a in arrays), [])
return joint.ravel().view(dtype) | [
"\n Takes a list of possibly structured arrays, concatenates their\n dtypes, and returns one big array with that dtype. Does the\n inverse of ``separate_struct_array``.\n\n :param list arrays: List of ``np.ndarray``s\n "
] |
Please provide a description of the function:def separate_struct_array(array, dtypes):
try:
offsets = np.cumsum([np.dtype(dtype).itemsize for dtype in dtypes])
except TypeError:
dtype_size = np.dtype(dtypes).itemsize
num_fields = int(array.nbytes / (array.size * dtype_size))
offsets = np.cumsum([dtype_size] * num_fields)
dtypes = [dtypes] * num_fields
offsets = np.concatenate([[0], offsets]).astype(int)
uint_array = array.view(np.uint8).reshape(array.shape + (-1,))
return [
uint_array[..., offsets[idx]:offsets[idx+1]].flatten().view(dtype)
for idx, dtype in enumerate(dtypes)
] | [
"\n Takes an array with a structured dtype, and separates it out into\n a list of arrays with dtypes coming from the input ``dtypes``.\n Does the inverse of ``join_struct_arrays``.\n\n :param np.ndarray array: Structured array.\n :param dtypes: List of ``np.dtype``, or just a ``np.dtype`` and the number of\n them is figured out automatically by counting bytes.\n "
] |
Please provide a description of the function:def sqrtm_psd(A, est_error=True, check_finite=True):
w, v = eigh(A, check_finite=check_finite)
mask = w <= 0
w[mask] = 0
np.sqrt(w, out=w)
A_sqrt = (v * w).dot(v.conj().T)
if est_error:
return A_sqrt, np.linalg.norm(np.dot(A_sqrt, A_sqrt) - A, 'fro')
else:
return A_sqrt | [
"\n Returns the matrix square root of a positive semidefinite matrix,\n truncating negative eigenvalues.\n "
] |
Please provide a description of the function:def decorate_init(init_decorator):
def class_decorator(cls):
cls.__init__ = init_decorator(cls.__init__)
return cls
return class_decorator | [
"\n Given a class definition and a decorator that acts on methods,\n applies that decorator to the class' __init__ method.\n Useful for decorating __init__ while still allowing __init__ to be\n inherited.\n "
] |
Please provide a description of the function:def binom_est_error(p, N, hedge = float(0)):
r
# asymptotic np.sqrt(p * (1 - p) / N)
return np.sqrt(p*(1-p)/(N+2*hedge+1)) | [
"\n "
] |
Please provide a description of the function:def gell_mann_basis(dim):
# Start by making an empty array of the right shape to
# hold the matrices that we construct.
basis = np.zeros((dim**2, dim, dim), dtype=complex)
# The first matrix should be the identity.
basis[0, :, :] = np.eye(dim) / np.sqrt(dim)
# The next dim basis elements should be diagonal,
# with all by one element nonnegative.
for idx_basis in range(1, dim):
basis[idx_basis, :, :] = np.diag(np.concatenate([
np.ones((idx_basis, )),
[-idx_basis],
np.zeros((dim - idx_basis - 1, ))
])) / np.sqrt(idx_basis + idx_basis**2)
# Finally, we get the off-diagonal matrices.
# These rely on some index gymnastics I don't yet fully
# understand.
y_offset = dim * (dim - 1) // 2
for idx_i in range(1, dim):
for idx_j in range(idx_i):
idx_basis = (idx_i - 1) * (idx_i) // 2 + idx_j + dim
basis[idx_basis, [idx_i, idx_j], [idx_j, idx_i]] = 1 / np.sqrt(2)
basis[idx_basis + y_offset, [idx_i, idx_j], [idx_j, idx_i]] = [1j / np.sqrt(2), -1j / np.sqrt(2)]
return TomographyBasis(basis, [dim], r'\gamma', name='gell_mann_basis') | [
" \n Returns a :class:`~qinfer.tomography.TomographyBasis` on dim dimensions\n using the generalized Gell-Mann matrices.\n\n This implementation is based on a MATLAB-language implementation\n provided by Carlos Riofrío, Seth Merkel and Andrew Silberfarb.\n Used with permission.\n\n :param int dim: Dimension of the individual matrices making up\n the returned basis.\n :rtype: :class:`~qinfer.tomography.TomographyBasis`\n :return: A basis of ``dim * dim`` Gell-Mann matrices.\n "
] |
Please provide a description of the function:def tensor_product_basis(*bases):
dim = np.prod([basis.data.shape[1] for basis in bases])
tp_basis = np.zeros((dim**2, dim, dim), dtype=complex)
for idx_factors, factors in enumerate(it.product(*[basis.data for basis in bases])):
tp_basis[idx_factors, :, :] = reduce(np.kron, factors)
return TomographyBasis(tp_basis,
sum((
factor.dims for factor in bases
), []),
list(map(
r"\otimes".join,
it.product(*[
basis.labels for basis in bases
])
))) | [
"\n Returns a TomographyBasis formed by the tensor\n product of two or more factor bases. Each basis element\n is the tensor product of basis elements from the underlying\n factors.\n "
] |
Please provide a description of the function:def pauli_basis(nq=1):
basis = tensor_product_basis(*[
TomographyBasis(
gell_mann_basis(2).data[[0, 2, 3, 1]],
[2],
[u'𝟙', r'\sigma_x', r'\sigma_y', r'\sigma_z']
)
] * nq)
basis._name = 'pauli_basis'
return basis | [
"\n Returns a TomographyBasis for the Pauli basis on ``nq``\n qubits.\n\n :param int nq: Number of qubits on which the returned\n basis is defined.\n "
] |
Please provide a description of the function:def state_to_modelparams(self, state):
basis = self.flat()
data = state.data.todense().view(np.ndarray).flatten()
# NB: assumes Hermitian state and basis!
return np.real(np.dot(basis.conj(), data)) | [
"\n Converts a QuTiP-represented state into a model parameter vector.\n\n :param qutip.Qobj state: State to be converted.\n :rtype: :class:`np.ndarray`\n :return: The representation of the given state in this basis,\n as a vector of real parameters.\n "
] |
Please provide a description of the function:def modelparams_to_state(self, modelparams):
if modelparams.ndim == 1:
qobj = qt.Qobj(
np.tensordot(modelparams, self.data, 1),
dims=[self.dims, self.dims]
)
if self.superrep is not None:
qobj.superrep = self.superrep
return qobj
else:
return list(map(self.modelparams_to_state, modelparams)) | [
"\n Converts one or more vectors of model parameters into\n QuTiP-represented states.\n\n :param np.ndarray modelparams: Array of shape\n ``(basis.dim ** 2, )`` or\n ``(n_states, basis.dim ** 2)`` containing\n states represented as model parameter vectors in this\n basis.\n :rtype: :class:`~qutip.Qobj` or `list` of :class:`~qutip.Qobj`\n instances.\n :return: The given states represented as :class:`~qutip.Qobj`\n instances.\n "
] |
Please provide a description of the function:def covariance_mtx_to_superop(self, mtx):
M = self.flat()
return qt.Qobj(
np.dot(np.dot(M.conj().T, mtx), M),
dims=[[self.dims] * 2] * 2
) | [
"\n Converts a covariance matrix to the corresponding\n superoperator, represented as a QuTiP Qobj\n with ``type=\"super\"``.\n "
] |
Please provide a description of the function:def rescaled_distance_mtx(p, q):
r
# TODO: check that models are actually the same!
p_locs = p.particle_locations if isinstance(p, qinfer.ParticleDistribution) else p
q_locs = q.particle_locations if isinstance(q, qinfer.ParticleDistribution) else q
Q = p.model.Q if isinstance(p, qinfer.smc.SMCUpdater) else 1
# Because the modelparam axis is last in each of the three cases, we're
# good as far as broadcasting goes.
delta = np.sqrt(Q) * (
p_locs[:, np.newaxis, :] -
q_locs[np.newaxis, :, :]
)
return np.sqrt(np.sum(delta**2, axis=-1)) | [
"\n Given two particle updaters for the same model, returns a matrix\n :math:`\\matr{d}` with elements\n\n .. math::\n \\matr{d}_{i,j} = \\left\\Vert \\sqrt{\\matr{Q}} \\cdot\n (\\vec{x}_{p, i} - \\vec{x}_{q, j}) \\right\\Vert_2,\n\n where :math:`\\matr{Q}` is the scale matrix of the model,\n :math:`\\vec{x}_{p,i}` is the :math:`i`th particle of ``p``, and where\n :math:`\\vec{x}_{q,i}` is the :math:`i`th particle of ``q`.\n\n :param qinfer.smc.SMCUpdater p: SMC updater for the distribution\n :math:`p(\\vec{x})`.\n :param qinfer.smc.SMCUpdater q: SMC updater for the distribution\n :math:`q(\\vec{x})`.\n\n Either or both of ``p`` or ``q`` can simply be the locations array for\n an :ref:`SMCUpdater`.\n "
] |
Please provide a description of the function:def weighted_pairwise_distances(X, w, metric='euclidean', w_pow=0.5):
r
if sklearn is None:
raise ImportError("This function requires scikit-learn.")
base_metric = sklearn.metrics.pairwise.pairwise_distances(X, metric=metric)
N = w.shape[0]
w_matrix = outer_product(w) * N**2
return base_metric / (w_matrix ** w_pow) | [
"\n Given a feature matrix ``X`` with weights ``w``, calculates the modified\n distance metric :math:`\\tilde{d}(p, q) = d(p, q) / (w(p) w(q) N^2)^p`, where\n :math:`N` is the length of ``X``. This metric is such that \"heavy\" feature\n vectors are considered to be closer to each other than \"light\" feature\n vectors, and are hence correspondingly less likely to be considered part of\n the same cluster.\n "
] |
Please provide a description of the function:def _dist_kw_arg(self, k):
if self._dist_kw_args is not None:
return {
key:self._dist_kw_args[key][k,:]
for key in self._dist_kw_args.keys()
}
else:
return {} | [
"\n Returns a dictionary of keyword arguments\n for the k'th distribution.\n\n :param int k: Index of the distribution in question.\n :rtype: ``dict``\n "
] |
Please provide a description of the function:def sample(self, n=1):
cumsum_weights = np.cumsum(self.particle_weights)
return self.particle_locations[np.minimum(cumsum_weights.searchsorted(
np.random.random((n,)),
side='right'
), len(cumsum_weights) - 1)] | [
"\n Returns random samples from the current particle distribution according\n to particle weights.\n\n :param int n: The number of samples to draw.\n :return: The sampled model parameter vectors.\n :rtype: `~numpy.ndarray` of shape ``(n, updater.n_rvs)``.\n "
] |
Please provide a description of the function:def est_meanfn(self, fn):
return np.einsum('i...,i...',
self.particle_weights, fn(self.particle_locations)
) | [
"\n Returns an the expectation value of a given function\n :math:`f` over the current particle distribution.\n\n Here, :math:`f` is represented by a function ``fn`` that is vectorized\n over particles, such that ``f(modelparams)`` has shape\n ``(n_particles, k)``, where ``n_particles = modelparams.shape[0]``, and\n where ``k`` is a positive integer.\n\n :param callable fn: Function implementing :math:`f` in a vectorized\n manner. (See above.)\n\n :rtype: :class:`numpy.ndarray`, shape ``(k, )``.\n :returns: An array containing the an estimate of the mean of :math:`f`.\n "
] |
Please provide a description of the function:def est_covariance_mtx(self, corr=False):
cov = self.particle_covariance_mtx(self.particle_weights,
self.particle_locations)
if corr:
dstd = np.sqrt(np.diag(cov))
cov /= (np.outer(dstd, dstd))
return cov | [
"\n Returns the full-rank covariance matrix of the current particle\n distribution.\n\n :param bool corr: If `True`, the covariance matrix is normalized\n by the outer product of the square root diagonal of the covariance matrix,\n i.e. the correlation matrix is returned instead.\n\n :rtype: :class:`numpy.ndarray`, shape\n ``(n_modelparams, n_modelparams)``.\n :returns: An array containing the estimated covariance matrix.\n "
] |
Please provide a description of the function:def est_entropy(self):
r
nz_weights = self.particle_weights[self.particle_weights > 0]
return -np.sum(np.log(nz_weights) * nz_weights) | [
"\n Estimates the entropy of the current particle distribution\n as :math:`-\\sum_i w_i \\log w_i` where :math:`\\{w_i\\}`\n is the set of particles with nonzero weight.\n "
] |
Please provide a description of the function:def _kl_divergence(self, other_locs, other_weights, kernel=None, delta=1e-2):
if kernel is None:
kernel = st.norm(loc=0, scale=1).pdf
dist = rescaled_distance_mtx(self, other_locs) / delta
K = kernel(dist)
return -self.est_entropy() - (1 / delta) * np.sum(
self.particle_weights *
np.log(
np.sum(
other_weights * K,
axis=1 # Sum over the particles of ``other``.
)
),
axis=0 # Sum over the particles of ``self``.
) | [
"\n Finds the KL divergence between this and another particle\n distribution by using a kernel density estimator to smooth over the\n other distribution's particles.\n "
] |
Please provide a description of the function:def est_kl_divergence(self, other, kernel=None, delta=1e-2):
return self._kl_divergence(
other.particle_locations,
other.particle_weights,
kernel, delta
) | [
"\n Finds the KL divergence between this and another particle\n distribution by using a kernel density estimator to smooth over the\n other distribution's particles.\n\n :param SMCUpdater other:\n "
] |
Please provide a description of the function:def est_cluster_metric(self, cluster_opts=None):
wcv, bcv, tv = self.est_cluster_covs(cluster_opts)
return np.diag(bcv) / np.diag(tv) | [
"\n Returns an estimate of how much of the variance in the current posterior\n can be explained by a separation between *clusters*.\n "
] |
Please provide a description of the function:def est_credible_region(self, level=0.95, return_outside=False, modelparam_slice=None):
# which slice of modelparams to take
s_ = np.s_[modelparam_slice] if modelparam_slice is not None else np.s_[:]
mps = self.particle_locations[:, s_]
# Start by sorting the particles by weight.
# We do so by obtaining an array of indices `id_sort` such that
# `particle_weights[id_sort]` is in descending order.
id_sort = np.argsort(self.particle_weights)[::-1]
# Find the cummulative sum of the sorted weights.
cumsum_weights = np.cumsum(self.particle_weights[id_sort])
# Find all the indices where the sum is less than level.
# We first find id_cred such that
# `all(cumsum_weights[id_cred] <= level)`.
id_cred = cumsum_weights <= level
# By construction, by adding the next particle to id_cred, it must be
# true that `cumsum_weights[id_cred] >= level`, as required.
id_cred[np.sum(id_cred)] = True
# We now return a slice onto the particle_locations by first permuting
# the particles according to the sort order, then by selecting the
# credible particles.
if return_outside:
return (
mps[id_sort][id_cred],
mps[id_sort][np.logical_not(id_cred)]
)
else:
return mps[id_sort][id_cred] | [
"\n Returns an array containing particles inside a credible region of a\n given level, such that the described region has probability mass\n no less than the desired level.\n\n Particles in the returned region are selected by including the highest-\n weight particles first until the desired credibility level is reached.\n\n :param float level: Crediblity level to report.\n :param bool return_outside: If `True`, the return value is a tuple\n of the those particles within the credible region, and the rest\n of the posterior particle cloud.\n :param slice modelparam_slice: Slice over which model parameters\n to consider.\n\n :rtype: :class:`numpy.ndarray`, shape ``(n_credible, n_mps)``,\n where ``n_credible`` is the number of particles in the credible\n region and ``n_mps`` corresponds to the size of ``modelparam_slice``.\n If ``return_outside`` is ``True``, this method instead\n returns tuple ``(inside, outside)`` where ``inside`` is as\n described above, and ``outside`` has shape ``(n_particles-n_credible, n_mps)``.\n :return: An array of particles inside the estimated credible region. Or,\n if ``return_outside`` is ``True``, both the particles inside and the\n particles outside, as a tuple.\n "
] |
Please provide a description of the function:def region_est_hull(self, level=0.95, modelparam_slice=None):
points = self.est_credible_region(
level=level,
modelparam_slice=modelparam_slice
)
hull = ConvexHull(points)
return points[hull.simplices], points[u.uniquify(hull.vertices.flatten())] | [
"\n Estimates a credible region over models by taking the convex hull of\n a credible subset of particles.\n\n :param float level: The desired crediblity level (see\n :meth:`SMCUpdater.est_credible_region`).\n :param slice modelparam_slice: Slice over which model parameters\n to consider.\n\n :return: The tuple ``(faces, vertices)`` where ``faces`` describes all the\n vertices of all of the faces on the exterior of the convex hull, and\n ``vertices`` is a list of all vertices on the exterior of the\n convex hull.\n :rtype: ``faces`` is a ``numpy.ndarray`` with shape\n ``(n_face, n_mps, n_mps)`` and indeces ``(idx_face, idx_vertex, idx_mps)``\n where ``n_mps`` corresponds to the size of ``modelparam_slice``.\n ``vertices`` is an ``numpy.ndarray`` of shape ``(n_vertices, n_mps)``.\n "
] |
Please provide a description of the function:def region_est_ellipsoid(self, level=0.95, tol=0.0001, modelparam_slice=None):
r
_, vertices = self.region_est_hull(level=level, modelparam_slice=modelparam_slice)
A, centroid = u.mvee(vertices, tol)
return A, centroid | [
"\n Estimates a credible region over models by finding the minimum volume\n enclosing ellipse (MVEE) of a credible subset of particles.\n\n :param float level: The desired crediblity level (see\n :meth:`SMCUpdater.est_credible_region`).\n :param float tol: The allowed error tolerance in the MVEE optimization\n (see :meth:`~qinfer.utils.mvee`).\n :param slice modelparam_slice: Slice over which model parameters\n to consider.\n\n :return: A tuple ``(A, c)`` where ``A`` is the covariance\n matrix of the ellipsoid and ``c`` is the center.\n A point :math:`\\vec{x}` is in the ellipsoid whenever\n :math:`(\\vec{x}-\\vec{c})^{T}A^{-1}(\\vec{x}-\\vec{c})\\leq 1`.\n :rtype: ``A`` is ``np.ndarray`` of shape ``(n_mps,n_mps)`` and\n ``centroid`` is ``np.ndarray`` of shape ``(n_mps)``.\n ``n_mps`` corresponds to the size of ``param_slice``.\n "
] |
Please provide a description of the function:def in_credible_region(self, points, level=0.95, modelparam_slice=None, method='hpd-hull', tol=0.0001):
if method == 'pce':
s_ = np.s_[modelparam_slice] if modelparam_slice is not None else np.s_[:]
A = self.est_covariance_mtx()[s_, s_]
c = self.est_mean()[s_]
# chi-squared distribution gives correct level curve conversion
mult = st.chi2.ppf(level, c.size)
results = u.in_ellipsoid(points, mult * A, c)
elif method == 'hpd-mvee':
tol = 0.0001 if tol is None else tol
A, c = self.region_est_ellipsoid(level=level, tol=tol, modelparam_slice=modelparam_slice)
results = u.in_ellipsoid(points, np.linalg.inv(A), c)
elif method == 'hpd-hull':
# it would be more natural to call region_est_hull,
# but that function uses ConvexHull which has no
# easy way of determining if a point is interior.
# Here, Delaunay gives us access to all of the
# necessary simplices.
# this fills the convex hull with (n_mps+1)-dimensional
# simplices; the convex hull is an almost-everywhere
# disjoint union of these simplices
hull = Delaunay(self.est_credible_region(level=level, modelparam_slice=modelparam_slice))
# now we just check whether each of the given points are in
# any of the simplices. (http://stackoverflow.com/a/16898636/1082565)
results = hull.find_simplex(points) >= 0
return results | [
"\n Decides whether each of the points lie within a credible region\n of the current distribution.\n\n If ``tol`` is ``None``, the particles are tested directly against\n the convex hull object. If ``tol`` is a positive ``float``,\n particles are tested to be in the interior of the smallest\n enclosing ellipsoid of this convex hull, see\n :meth:`SMCUpdater.region_est_ellipsoid`.\n\n :param np.ndarray points: An ``np.ndarray`` of shape ``(n_mps)`` for\n a single point, or of shape ``(n_points, n_mps)`` for multiple points,\n where ``n_mps`` corresponds to the same dimensionality as ``param_slice``.\n :param float level: The desired crediblity level (see\n :meth:`SMCUpdater.est_credible_region`).\n :param str method: A string specifying which credible region estimator to\n use. One of ``'pce'``, ``'hpd-hull'`` or ``'hpd-mvee'`` (see below).\n :param float tol: The allowed error tolerance for those methods\n which require a tolerance (see :meth:`~qinfer.utils.mvee`).\n :param slice modelparam_slice: A slice describing which model parameters\n to consider in the credible region, effectively marginizing out the\n remaining parameters. By default, all model parameters are included.\n\n :return: A boolean array of shape ``(n_points, )`` specifying whether\n each of the points lies inside the confidence region.\n\n Methods\n ~~~~~~~\n\n The following values are valid for the ``method`` argument.\n\n - ``'pce'``: Posterior Covariance Ellipsoid.\n Computes the covariance\n matrix of the particle distribution marginalized over the excluded\n slices and uses the :math:`\\chi^2` distribution to determine\n how to rescale it such the the corresponding ellipsoid has\n the correct size. The ellipsoid is translated by the\n mean of the particle distribution. It is determined which\n of the ``points`` are on the interior.\n - ``'hpd-hull'``: High Posterior Density Convex Hull.\n See :meth:`SMCUpdater.region_est_hull`. Computes the\n HPD region resulting from the particle approximation, computes\n the convex hull of this, and it is determined which\n of the ``points`` are on the interior.\n - ``'hpd-mvee'``: High Posterior Density Minimum Volume Enclosing Ellipsoid.\n See :meth:`SMCUpdater.region_est_ellipsoid`\n and :meth:`~qinfer.utils.mvee`. Computes the\n HPD region resulting from the particle approximation, computes\n the convex hull of this, and determines the minimum enclosing\n ellipsoid. Deterimines which\n of the ``points`` are on the interior.\n "
] |
Please provide a description of the function:def sample(self, n=1):
samples = np.empty((n, self.n_rvs))
idxs_to_sample = np.arange(n)
iters = 0
while idxs_to_sample.size and iters < self._maxiters:
samples[idxs_to_sample] = self._dist.sample(len(idxs_to_sample))
idxs_to_sample = idxs_to_sample[np.nonzero(np.logical_not(
self._model.are_models_valid(samples[idxs_to_sample, :])
))[0]]
iters += 1
if idxs_to_sample.size:
raise RuntimeError("Did not successfully postselect within {} iterations.".format(self._maxiters))
return samples | [
"\n Returns one or more samples from this probability distribution.\n\n :param int n: Number of samples to return.\n :return numpy.ndarray: An array containing samples from the\n distribution of shape ``(n, d)``, where ``d`` is the number of\n random variables.\n "
] |
Please provide a description of the function:def iter_actions(self):
# pylint: disable=too-many-locals
# pylint: disable=invalid-name
ns = '{urn:schemas-upnp-org:service-1-0}'
# get the scpd body as bytes, and feed directly to elementtree
# which likes to receive bytes
scpd_body = requests.get(self.base_url + self.scpd_url).content
tree = XML.fromstring(scpd_body)
# parse the state variables to get the relevant variable types
vartypes = {}
srvStateTables = tree.findall('{}serviceStateTable'.format(ns))
for srvStateTable in srvStateTables:
statevars = srvStateTable.findall('{}stateVariable'.format(ns))
for state in statevars:
name = state.findtext('{}name'.format(ns))
datatype = state.findtext('{}dataType'.format(ns))
default = state.findtext('{}defaultValue'.format(ns))
value_list_elt = state.find('{}allowedValueList'.format(ns))
if value_list_elt is None:
value_list_elt = ()
value_list = [item.text for item in value_list_elt] or None
value_range_elt = state.find('{}allowedValueRange'.format(ns))
if value_range_elt is None:
value_range_elt = ()
value_range = [item.text for item in value_range_elt] or None
vartypes[name] = Vartype(datatype, default, value_list,
value_range)
# find all the actions
actionLists = tree.findall('{}actionList'.format(ns))
for actionList in actionLists:
actions = actionList.findall('{}action'.format(ns))
for i in actions:
action_name = i.findtext('{}name'.format(ns))
argLists = i.findall('{}argumentList'.format(ns))
for argList in argLists:
args_iter = argList.findall('{}argument'.format(ns))
in_args = []
out_args = []
for arg in args_iter:
arg_name = arg.findtext('{}name'.format(ns))
direction = arg.findtext('{}direction'.format(ns))
related_variable = arg.findtext(
'{}relatedStateVariable'.format(ns))
vartype = vartypes[related_variable]
if direction == "in":
in_args.append(Argument(arg_name, vartype))
else:
out_args.append(Argument(arg_name, vartype))
yield Action(action_name, in_args, out_args) | [
"Yield the service's actions with their arguments.\n\n Yields:\n `Action`: the next action.\n\n Each action is an Action namedtuple, consisting of action_name\n (a string), in_args (a list of Argument namedtuples consisting of name\n and argtype), and out_args (ditto), eg::\n\n Action(\n name='SetFormat',\n in_args=[\n Argument(name='DesiredTimeFormat', vartype=<Vartype>),\n Argument(name='DesiredDateFormat', vartype=<Vartype>)],\n out_args=[]\n )\n "
] |
Please provide a description of the function:def parse_event_xml(xml_event):
result = {}
tree = XML.fromstring(xml_event)
# property values are just under the propertyset, which
# uses this namespace
properties = tree.findall(
'{urn:schemas-upnp-org:event-1-0}property')
for prop in properties: # pylint: disable=too-many-nested-blocks
for variable in prop:
# Special handling for a LastChange event specially. For details on
# LastChange events, see
# http://upnp.org/specs/av/UPnP-av-RenderingControl-v1-Service.pdf
# and http://upnp.org/specs/av/UPnP-av-AVTransport-v1-Service.pdf
if variable.tag == "LastChange":
last_change_tree = XML.fromstring(
variable.text.encode('utf-8'))
# We assume there is only one InstanceID tag. This is true for
# Sonos, as far as we know.
# InstanceID can be in one of two namespaces, depending on
# whether we are looking at an avTransport event, a
# renderingControl event, or a Queue event
# (there, it is named QueueID)
instance = last_change_tree.find(
"{urn:schemas-upnp-org:metadata-1-0/AVT/}InstanceID")
if instance is None:
instance = last_change_tree.find(
"{urn:schemas-upnp-org:metadata-1-0/RCS/}InstanceID")
if instance is None:
instance = last_change_tree.find(
"{urn:schemas-sonos-com:metadata-1-0/Queue/}QueueID")
# Look at each variable within the LastChange event
for last_change_var in instance:
tag = last_change_var.tag
# Remove any namespaces from the tags
if tag.startswith('{'):
tag = tag.split('}', 1)[1]
# Un-camel case it
tag = camel_to_underscore(tag)
# Now extract the relevant value for the variable.
# The UPnP specs suggest that the value of any variable
# evented via a LastChange Event will be in the 'val'
# attribute, but audio related variables may also have a
# 'channel' attribute. In addition, it seems that Sonos
# sometimes uses a text value instead: see
# http://forums.sonos.com/showthread.php?t=34663
value = last_change_var.get('val')
if value is None:
value = last_change_var.text
# If DIDL metadata is returned, convert it to a music
# library data structure
if value.startswith('<DIDL-Lite'):
# Wrap any parsing exception in a SoCoFault, so the
# user can handle it
try:
didl = from_didl_string(value)
if not didl:
continue
value = didl[0]
except SoCoException as original_exception:
log.debug("Event contains illegal metadata"
"for '%s'.\n"
"Error message: '%s'\n"
"The result will be a SoCoFault.",
tag, str(original_exception))
event_parse_exception = EventParseException(
tag, value, original_exception
)
value = SoCoFault(event_parse_exception)
channel = last_change_var.get('channel')
if channel is not None:
if result.get(tag) is None:
result[tag] = {}
result[tag][channel] = value
else:
result[tag] = value
else:
result[camel_to_underscore(variable.tag)] = variable.text
return result | [
"Parse the body of a UPnP event.\n\n Args:\n xml_event (bytes): bytes containing the body of the event encoded\n with utf-8.\n\n Returns:\n dict: A dict with keys representing the evented variables. The\n relevant value will usually be a string representation of the\n variable's value, but may on occasion be:\n\n * a dict (eg when the volume changes, the value will itself be a\n dict containing the volume for each channel:\n :code:`{'Volume': {'LF': '100', 'RF': '100', 'Master': '36'}}`)\n * an instance of a `DidlObject` subclass (eg if it represents\n track metadata).\n * a `SoCoFault` (if a variable contains illegal metadata)\n\n Example:\n\n Run this code, and change your volume, tracks etc::\n\n from __future__ import print_function\n try:\n from queue import Empty\n except: # Py2.7\n from Queue import Empty\n\n import soco\n from pprint import pprint\n from soco.events import event_listener\n # pick a device at random\n device = soco.discover().pop()\n print (device.player_name)\n sub = device.renderingControl.subscribe()\n sub2 = device.avTransport.subscribe()\n\n while True:\n try:\n event = sub.events.get(timeout=0.5)\n pprint (event.variables)\n except Empty:\n pass\n try:\n event = sub2.events.get(timeout=0.5)\n pprint (event.variables)\n except Empty:\n pass\n\n except KeyboardInterrupt:\n sub.unsubscribe()\n sub2.unsubscribe()\n event_listener.stop()\n break\n "
] |
Please provide a description of the function:def unsubscribe(self):
# Trying to unsubscribe if already unsubscribed, or not yet
# subscribed, fails silently
if self._has_been_unsubscribed or not self.is_subscribed:
return
# Cancel any auto renew
self._auto_renew_thread_flag.set()
# Send an unsubscribe request like this:
# UNSUBSCRIBE publisher path HTTP/1.1
# HOST: publisher host:publisher port
# SID: uuid:subscription UUID
headers = {
'SID': self.sid
}
response = None
try:
response = requests.request(
'UNSUBSCRIBE',
self.service.base_url + self.service.event_subscription_url,
headers=headers,
timeout=3)
except requests.exceptions.RequestException:
pass
self.is_subscribed = False
self._timestamp = None
log.info(
"Unsubscribed from %s, sid: %s",
self.service.base_url + self.service.event_subscription_url,
self.sid)
# remove queue from event queues and sid to service mappings
with _subscriptions_lock:
try:
del _subscriptions[self.sid]
except KeyError:
pass
self._has_been_unsubscribed = True
# Ignore "412 Client Error: Precondition Failed for url:"
# from rebooted speakers.
if response and response.status_code != 412:
response.raise_for_status() | [
"Unsubscribe from the service's events.\n\n Once unsubscribed, a Subscription instance should not be reused\n "
] |
Please provide a description of the function:def play_mode(self, playmode):
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
]) | [
"Set the speaker's mode."
] |
Please provide a description of the function:def repeat(self, repeat):
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)] | [
"Set the queue's repeat option"
] |
Please provide a description of the function:def cross_fade(self):
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return bool(int(cross_fade_state)) | [
"bool: The speaker's cross fade state.\n\n True if enabled, False otherwise\n "
] |
Please provide a description of the function:def mute(self):
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state)) | [
"bool: The speaker's mute state.\n\n True if muted, False otherwise.\n "
] |
Please provide a description of the function:def loudness(self):
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return bool(int(loudness)) | [
"bool: The Sonos speaker's loudness compensation.\n\n True if on, False otherwise.\n\n Loudness is a complicated topic. You can find a nice summary about this\n feature here: http://forums.sonos.com/showthread.php?p=4698#post4698\n "
] |
Please provide a description of the function:def join(self, master):
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
self._zgs_cache.clear()
self._parse_zone_group_state() | [
"Join this speaker to another \"master\" speaker."
] |
Please provide a description of the function:def unjoin(self):
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
self._zgs_cache.clear()
self._parse_zone_group_state() | [
"Remove this speaker from a group.\n\n Seems to work ok even if you remove what was previously the group\n master from it's own group. If the speaker was not in a group also\n returns ok.\n "
] |
Please provide a description of the function:def set_sleep_timer(self, sleep_time_seconds):
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None') | [
"Sets the sleep timer.\n\n Args:\n sleep_time_seconds (int or NoneType): How long to wait before\n turning off speaker in seconds, None to cancel a sleep timer.\n Maximum value of 86399\n\n Raises:\n SoCoException: Upon errors interacting with Sonos controller\n ValueError: Argument/Syntax errors\n\n "
] |
Please provide a description of the function:def restore(self, fade=False):
try:
if self.is_coordinator:
self._restore_coordinator()
finally:
self._restore_volume(fade)
# Now everything is set, see if we need to be playing, stopped
# or paused ( only for coordinators)
if self.is_coordinator:
if self.transport_state == 'PLAYING':
self.device.play()
elif self.transport_state == 'STOPPED':
self.device.stop() | [
"Restore the state of a device to that which was previously saved.\n\n For coordinator devices restore everything. For slave devices\n only restore volume etc., not transport info (transport info\n comes from the slave's coordinator).\n\n Args:\n fade (bool): Whether volume should be faded up on restore.\n "
] |
Please provide a description of the function:def _restore_coordinator(self):
# Start by ensuring that the speaker is paused as we don't want
# things all rolling back when we are changing them, as this could
# include things like audio
transport_info = self.device.get_current_transport_info()
if transport_info is not None:
if transport_info['current_transport_state'] == 'PLAYING':
self.device.pause()
# Check if the queue should be restored
self._restore_queue()
# Reinstate what was playing
if self.is_playing_queue and self.playlist_position > 0:
# was playing from playlist
if self.playlist_position is not None:
# The position in the playlist returned by
# get_current_track_info starts at 1, but when
# playing from playlist, the index starts at 0
# if position > 0:
self.playlist_position -= 1
self.device.play_from_queue(self.playlist_position, False)
if self.track_position is not None:
if self.track_position != "":
self.device.seek(self.track_position)
# reinstate track, position, play mode, cross fade
# Need to make sure there is a proper track selected first
self.device.play_mode = self.play_mode
self.device.cross_fade = self.cross_fade
elif self.is_playing_cloud_queue:
# was playing a cloud queue started by Alexa
# No way yet to re-start this so prevent it throwing an error!
pass
else:
# was playing a stream (radio station, file, or nothing)
# reinstate uri and meta data
if self.media_uri != "":
self.device.play_uri(
self.media_uri, self.media_metadata, start=False) | [
"Do the coordinator-only part of the restore."
] |
Please provide a description of the function:def _restore_volume(self, fade):
self.device.mute = self.mute
# Can only change volume on device with fixed volume set to False
# otherwise get uPnP error, so check first. Before issuing a network
# command to check, fixed volume always has volume set to 100.
# So only checked fixed volume if volume is 100.
if self.volume == 100:
fixed_vol = self.device.renderingControl.GetOutputFixed(
[('InstanceID', 0)])['CurrentFixed']
else:
fixed_vol = False
# now set volume if not fixed
if not fixed_vol:
self.device.bass = self.bass
self.device.treble = self.treble
self.device.loudness = self.loudness
if fade:
# if fade requested in restore
# set volume to 0 then fade up to saved volume (non blocking)
self.device.volume = 0
self.device.ramp_to_volume(self.volume)
else:
# set volume
self.device.volume = self.volume | [
"Reinstate volume.\n\n Args:\n fade (bool): Whether volume should be faded up on restore.\n "
] |
Please provide a description of the function:def _discover_thread(callback,
timeout,
include_invisible,
interface_addr):
def create_socket(interface_addr=None):
_sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# UPnP v1.0 requires a TTL of 4
_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL,
struct.pack("B", 4))
_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
if interface_addr is not None:
_sock.setsockopt(
socket.IPPROTO_IP, socket.IP_MULTICAST_IF,
socket.inet_aton(interface_addr))
return _sock
# pylint: disable=invalid-name
PLAYER_SEARCH = dedent().encode('utf-8')
BCAST_ADDR = "255.255.255.255"
MCAST_GRP = "239.255.255.250"
MCAST_PORT = 1900
_sockets = {}
# Use the specified interface, if any
if interface_addr is not None:
try:
address = socket.inet_aton(interface_addr)
except socket.error:
raise ValueError("{0} is not a valid IP address string".format(
interface_addr))
_sockets[interface_addr] = create_socket(interface_addr)
_LOG.info("Sending discovery packets on default interface")
else:
# Find the local network addresses using ifaddr.
addresses = [
ip.ip
for adapter in ifaddr.get_adapters()
for ip in adapter.ips
if ip.is_IPv4
if ip.ip != "127.0.0.1"
]
# Create a socket for each unique address found, and one for the
# default multicast address
for address in addresses:
try:
_sockets[address] = create_socket(address)
except socket.error as e:
_LOG.warning("Can't make a discovery socket for %s: %s: %s",
address, e.__class__.__name__, e)
found_zones = set()
deadline = time.monotonic() + timeout
last_response = None
while not threading.current_thread().stopped():
time_left = deadline - time.monotonic()
if time_left < 0:
break
# Repeated sending, UDP is unreliable
if last_response is None or last_response < time.monotonic() - 1:
for _addr, _sock in _sockets.items():
try:
_LOG.info("Sending discovery packets on %s", _addr)
_sock.sendto(
really_utf8(PLAYER_SEARCH), (MCAST_GRP, MCAST_PORT))
_sock.sendto(
really_utf8(PLAYER_SEARCH), (BCAST_ADDR, MCAST_PORT))
except OSError:
_LOG.info("Discovery failed on %s", _addr)
response, _, _ = select.select(
list(_sockets.values()), [], [], min(1, time_left))
# Only Zone Players should respond, given the value of ST in the
# PLAYER_SEARCH message. However, to prevent misbehaved devices
# on the network disrupting the discovery process, we check that
# the response contains the "Sonos" string; otherwise we keep
# waiting for a correct response.
#
# Here is a sample response from a real Sonos device (actual numbers
# have been redacted):
# HTTP/1.1 200 OK
# CACHE-CONTROL: max-age = 1800
# EXT:
# LOCATION: http://***.***.***.***:1400/xml/device_description.xml
# SERVER: Linux UPnP/1.0 Sonos/26.1-76230 (ZPS3)
# ST: urn:schemas-upnp-org:device:ZonePlayer:1
# USN: uuid:RINCON_B8*************00::urn:schemas-upnp-org:device:
# ZonePlayer:1
# X-RINCON-BOOTSEQ: 3
# X-RINCON-HOUSEHOLD: Sonos_7O********************R7eU
for _sock in response:
last_response = time.monotonic()
data, addr = _sock.recvfrom(1024)
_LOG.debug(
'Received discovery response from %s: "%s"', addr, data
)
if b"Sonos" in data:
# pylint: disable=not-callable
zone = config.SOCO_CLASS(addr[0])
if zone not in found_zones:
if zone.is_visible or include_invisible:
found_zones.add(zone)
callback(zone) | [
" Discover Sonos zones on the local network. ",
" A helper function for creating a socket for discover purposes.\n\n Create and return a socket with appropriate options set for multicast.\n ",
"\\\n M-SEARCH * HTTP/1.1\n HOST: 239.255.255.250:1900\n MAN: \"ssdp:discover\"\n MX: 1\n ST: urn:schemas-upnp-org:device:ZonePlayer:1\n "
] |
Please provide a description of the function:def discover_thread(callback,
timeout=5,
include_invisible=False,
interface_addr=None):
thread = StoppableThread(
target=_discover_thread,
args=(callback, timeout, include_invisible, interface_addr))
thread.start()
return thread | [
" Return a started thread with a discovery callback. "
] |
Please provide a description of the function:def discover(timeout=5,
include_invisible=False,
interface_addr=None,
all_households=False):
found_zones = set()
first_response = None
def callback(zone):
nonlocal first_response
if first_response is None:
first_response = time.monotonic()
if include_invisible:
found_zones.update(zone.all_zones)
else:
found_zones.update(zone.visible_zones)
if not all_households:
thread.stop()
thread = discover_thread(
callback, timeout, include_invisible, interface_addr)
while thread.is_alive() and not thread.stopped():
if first_response is None:
thread.join(timeout=1)
else:
thread.join(timeout=first_response + 1 - time.monotonic())
thread.stop()
return found_zones or None | [
" Discover Sonos zones on the local network.\n\n Return a set of `SoCo` instances for each zone found.\n Include invisible zones (bridges and slave zones in stereo pairs if\n ``include_invisible`` is `True`. Will block for up to ``timeout`` seconds,\n after which return `None` if no zones found.\n\n Args:\n timeout (int, optional): block for this many seconds, at most.\n Defaults to 5.\n include_invisible (bool, optional): include invisible zones in the\n return set. Defaults to `False`.\n interface_addr (str or None): Discovery operates by sending UDP\n multicast datagrams. ``interface_addr`` is a string (dotted\n quad) representation of the network interface address to use as\n the source of the datagrams (i.e. it is a value for\n `socket.IP_MULTICAST_IF <socket>`). If `None` or not specified,\n all system interfaces will be tried. Defaults to `None`.\n all_households (bool, optional): wait for all replies to discover\n multiple households. If `False` or not specified, return only\n the first household found.\n Returns:\n set: a set of `SoCo` instances, one for each zone found, or else\n `None`.\n\n "
] |
Please provide a description of the function:def by_name(name):
devices = discover(all_households=True)
for device in (devices or []):
if device.player_name == name:
return device
return None | [
"Return a device by name.\n\n Args:\n name (str): The name of the device to return.\n\n Returns:\n :class:`~.SoCo`: The first device encountered among all zone with the\n given player name. If none are found `None` is returned.\n "
] |
Please provide a description of the function:def get_ascii(pid=None, name=None, pokemons=None, return_pokemons=False, message=None):
'''get_ascii will return ascii art for a pokemon based on a name or pid.
:param pid: the pokemon ID to return
:param name: the pokemon name to return
:param return_pokemons: return catches (default False)
:param message: add a message to the ascii
'''
pokemon = get_pokemon(name=name,pid=pid,pokemons=pokemons)
printme = message
if len(pokemon) > 0:
for pid,data in pokemon.items():
if message == None:
printme = data["name"].capitalize()
print("%s\n\n%s" % (data['ascii'],printme))
if return_pokemons == True:
return pokemon | [] |
Please provide a description of the function:def get_avatar(name, pokemons=None, print_screen=True, include_name=True):
'''get_avatar will return a unique pokemon for a specific avatar based on the hash
:param name: the name to look up
:param print_screen: if True, will print ascii to the screen (default True) and not return
:param include_name: if True, will add name (minus end of address after @) to avatar
'''
if pokemons is None:
pokemons = catch_em_all()
# The IDs are numbers between 1 and the max
number_pokemons = len(pokemons)
trainer = get_trainer(name)
pid = str(trainer % number_pokemons)
pokemon = get_pokemon(pid=pid,pokemons=pokemons)
avatar = pokemon[pid]["ascii"]
if include_name is True:
avatar = "%s\n\n%s" %(avatar,name.split("@")[0])
if print_screen is True:
print(avatar)
return avatar | [] |
Please provide a description of the function:def get_pokemon(pid=None,name=None,pokemons=None):
'''get_pokemon will return a pokemon with a specific ID, or if none is given,
will select randomly. First the pid will be used, then the name, then any filters.
:param pid: the pokemon ID to return
:param pokemons: the pokemons data structure
'''
if pokemons == None:
pokemons = catch_em_all()
# First see if we want to find a pokemon by name
if name is not None:
catches = lookup_pokemon(field="name",
value=name,
pokemons=pokemons)
if catches is not None:
return catches
print("We don't have a pokemon called %s" %name)
sys.exit(1)
# Next see if they want a random pokemon
if pid is None:
choices = list(pokemons.keys())
pid = int(choice(choices))
# Retrieve the random, or user selected pokemon
if pid is not None and str(pid) in pokemons.keys():
return {pid:pokemons[str(pid)]}
else:
print("Cannot find pokemon with this criteria!") | [] |
Please provide a description of the function:def get_trainer(name):
'''return the unique id for a trainer, determined by the md5 sum
'''
name = name.lower()
return int(hashlib.md5(name.encode('utf-8')).hexdigest(), 16) % 10**8 | [] |
Please provide a description of the function:def catch_em_all(data_file=None, return_names=False):
'''catch_em_all returns the entire database of pokemon, a base function for starting
:param data_file: location of pokemons.json data file (not required)
'''
if data_file == None:
data_file = "%s/database/pokemons.json" %(base)
pokemons = load_json(data_file)
if return_names is True:
names = []
for key,meta in pokemons.items():
names.append(meta['name'])
return names
return pokemons | [] |
Please provide a description of the function:def lookup_pokemon(field,value,pokemons=None):
'''lookup_pokemon will search a particular field (name) for a value. If no pokemons
data structure is provided, all will be used.
:param field: the field to look up.
:param pokemons: the pokemons data structure
'''
if pokemons == None:
pokemons = catch_em_all()
catches = {}
for pid,data in pokemons.items():
if isinstance(data[field],list):
for entry in data[field]:
found = search_entry(entry,value)
if found == True:
catches[pid] = data
else:
found = search_entry(data[field],value)
if found == True:
catches[pid] = data
if len(catches) > 0:
return catches
return None | [] |
Please provide a description of the function:def scale_image(image, new_width):
(original_width, original_height) = image.size
aspect_ratio = original_height/float(original_width)
new_height = int(aspect_ratio * new_width)
# This scales it wider than tall, since characters are biased
new_image = image.resize((new_width*2, new_height))
return new_image | [
"Resizes an image preserving the aspect ratio.\n "
] |
Please provide a description of the function:def map_pixels_to_ascii_chars(image, range_width=25):
pixels_in_image = list(image.getdata())
pixels_to_chars = [ASCII_CHARS[pixel_value/range_width] for pixel_value in
pixels_in_image]
return "".join(pixels_to_chars) | [
"Maps each pixel to an ascii char based on the range\n in which it lies.\n\n 0-255 is divided into 11 ranges of 25 pixels each.\n "
] |
Please provide a description of the function:def load_steps(working_dir=None, steps_dir=None, step_file=None,
step_list=None):
if steps_dir is not None:
step_files = glob.glob(os.path.join(steps_dir, '*.cwl'))
elif step_file is not None:
step_files = [step_file]
elif step_list is not None:
step_files = []
for path in step_list:
if os.path.isdir(path):
step_files += glob.glob(os.path.join(path, '*.cwl'))
else:
step_files.append(path)
else:
step_files = []
if working_dir is not None:
step_files = sort_loading_order(step_files)
steps = {}
for f in step_files:
if working_dir is not None:
# Copy file to working_dir
if not working_dir == os.path.dirname(f) and not is_url(f):
copied_file = os.path.join(working_dir, os.path.basename(f))
shutil.copy2(f, copied_file)
f = copied_file
# Create steps
try:
s = Step(f)
steps[s.name] = s
except (NotImplementedError, ValidationException,
PackedWorkflowException) as e:
logger.warning(e)
return steps | [
"Return a dictionary containing Steps read from file.\n\n Args:\n steps_dir (str, optional): path to directory containing CWL files.\n step_file (str, optional): path or http(s) url to a single CWL file.\n step_list (list, optional): a list of directories, urls or local file\n paths to CWL files or directories containing CWL files.\n\n Return:\n dict containing (name, Step) entries.\n\n "
] |
Please provide a description of the function:def load_yaml(filename):
with open(filename) as myfile:
content = myfile.read()
if "win" in sys.platform:
content = content.replace("\\", "/")
return yaml.safe_load(content) | [
"Return object in yaml file."
] |
Please provide a description of the function:def sort_loading_order(step_files):
tools = []
workflows = []
workflows_with_subworkflows = []
for f in step_files:
# assume that urls are tools
if f.startswith('http://') or f.startswith('https://'):
tools.append(f)
else:
obj = load_yaml(f)
if obj.get('class', '') == 'Workflow':
if 'requirements' in obj.keys():
subw = {'class': 'SubworkflowFeatureRequirement'}
if subw in obj['requirements']:
workflows_with_subworkflows.append(f)
else:
workflows.append(f)
else:
workflows.append(f)
else:
tools.append(f)
return tools + workflows + workflows_with_subworkflows | [
"Sort step files into correct loading order.\n\n The correct loading order is first tools, then workflows without\n subworkflows, and then workflows with subworkflows. This order is\n required to avoid error messages when a working directory is used.\n "
] |
Please provide a description of the function:def load_cwl(fname):
logger.debug('Loading CWL file "{}"'.format(fname))
# Fetching, preprocessing and validating cwl
# Older versions of cwltool
if legacy_cwltool:
try:
(document_loader, workflowobj, uri) = fetch_document(fname)
(document_loader, _, processobj, metadata, uri) = \
validate_document(document_loader, workflowobj, uri)
except TypeError:
from cwltool.context import LoadingContext, getdefault
from cwltool import workflow
from cwltool.resolver import tool_resolver
from cwltool.load_tool import resolve_tool_uri
loadingContext = LoadingContext()
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object,
workflow.default_make_tool)
loadingContext.resolver = getdefault(loadingContext.resolver,
tool_resolver)
uri, tool_file_uri = resolve_tool_uri(
fname, resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor)
document_loader, workflowobj, uri = fetch_document(
uri, resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor)
document_loader, avsc_names, processobj, metadata, uri = \
validate_document(
document_loader, workflowobj, uri,
loadingContext.overrides_list, {},
enable_dev=loadingContext.enable_dev,
strict=loadingContext.strict,
preprocess_only=False,
fetcher_constructor=loadingContext.fetcher_constructor,
skip_schemas=False,
do_validate=loadingContext.do_validate)
# Recent versions of cwltool
else:
(loading_context, workflowobj, uri) = fetch_document(fname)
loading_context, uri = resolve_and_validate_document(loading_context,
workflowobj, uri)
document_loader = loading_context.loader
processobj = workflowobj
metadata = loading_context.metadata
return document_loader, processobj, metadata, uri | [
"Load and validate CWL file using cwltool\n "
] |
Please provide a description of the function:def set_input(self, p_name, value):
name = self.python_names.get(p_name)
if p_name is None or name not in self.get_input_names():
raise ValueError('Invalid input "{}"'.format(p_name))
self.step_inputs[name] = value | [
"Set a Step's input variable to a certain value.\n\n The value comes either from a workflow input or output of a previous\n step.\n\n Args:\n name (str): the name of the Step input\n value (str): the name of the output variable that provides the\n value for this input.\n\n Raises:\n ValueError: The name provided is not a valid input name for this\n Step.\n "
] |
Please provide a description of the function:def output_reference(self, name):
if name not in self.output_names:
raise ValueError('Invalid output "{}"'.format(name))
return Reference(step_name=self.name_in_workflow, output_name=name) | [
"Return a reference to the given output for use in an input\n of a next Step.\n\n For a Step named `echo` that has an output called `echoed`, the\n reference `echo/echoed` is returned.\n\n Args:\n name (str): the name of the Step output\n Raises:\n ValueError: The name provided is not a valid output name for this\n Step.\n "
] |
Please provide a description of the function:def _input_optional(inp):
if 'default' in inp.keys():
return True
typ = inp.get('type')
if isinstance(typ, six.string_types):
return typ.endswith('?')
elif isinstance(typ, dict):
# TODO: handle case where iput type is dict
return False
elif isinstance(typ, list):
# The cwltool validation expands optional arguments to
# [u'null', <type>]
return bool(u'null' in typ)
else:
raise ValueError('Invalid input "{}"'.format(inp.get['id'])) | [
"Returns True if a step input parameter is optional.\n\n Args:\n inp (dict): a dictionary representation of an input.\n\n Raises:\n ValueError: The inp provided is not valid.\n "
] |
Please provide a description of the function:def to_obj(self, wd=False, pack=False, relpath=None):
obj = CommentedMap()
if pack:
obj['run'] = self.orig
elif relpath is not None:
if self.from_url:
obj['run'] = self.run
else:
obj['run'] = os.path.relpath(self.run, relpath)
elif wd:
if self.from_url:
obj['run'] = self.run
else:
obj['run'] = os.path.basename(self.run)
else:
obj['run'] = self.run
obj['in'] = self.step_inputs
obj['out'] = self.output_names
if self.is_scattered:
obj['scatter'] = self.scattered_inputs
# scatter_method is optional when scattering over a single variable
if self.scatter_method is not None:
obj['scatterMethod'] = self.scatter_method
return obj | [
"Return the step as an dict that can be written to a yaml file.\n\n Returns:\n dict: yaml representation of the step.\n "
] |
Please provide a description of the function:def list_inputs(self):
doc = []
for inp, typ in self.input_types.items():
if isinstance(typ, six.string_types):
typ = "'{}'".format(typ)
doc.append('{}: {}'.format(inp, typ))
return '\n'.join(doc) | [
"Return a string listing all the Step's input names and their types.\n\n The types are returned in a copy/pastable format, so if the type is\n `string`, `'string'` (with single quotes) is returned.\n\n Returns:\n str containing all input names and types.\n "
] |
Please provide a description of the function:def load(self, steps_dir=None, step_file=None, step_list=None):
self._closed()
self.steps_library.load(steps_dir=steps_dir, step_file=step_file,
step_list=step_list) | [
"Load CWL steps into the WorkflowGenerator's steps library.\n\n Adds steps (command line tools and workflows) to the\n ``WorkflowGenerator``'s steps library. These steps can be used to\n create workflows.\n\n Args:\n steps_dir (str): path to directory containing CWL files. All CWL in\n the directory are loaded.\n step_file (str): path to a file containing a CWL step that will be\n added to the steps library.\n "
] |
Please provide a description of the function:def _has_requirements(self):
self._closed()
return any([self.has_workflow_step, self.has_scatter_requirement,
self.has_multiple_inputs]) | [
"Returns True if the workflow needs a requirements section.\n\n Returns:\n bool: True if the workflow needs a requirements section, False\n otherwise.\n "
] |
Please provide a description of the function:def inputs(self, name):
self._closed()
step = self._get_step(name, make_copy=False)
return step.list_inputs() | [
"List input names and types of a step in the steps library.\n\n Args:\n name (str): name of a step in the steps library.\n "
] |
Please provide a description of the function:def _add_step(self, step):
self._closed()
self.has_workflow_step = self.has_workflow_step or step.is_workflow
self.wf_steps[step.name_in_workflow] = step | [
"Add a step to the workflow.\n\n Args:\n step (Step): a step from the steps library.\n "
] |
Please provide a description of the function:def add_input(self, **kwargs):
self._closed()
def _get_item(args):
if not args:
raise ValueError("No parameter specified.")
item = args.popitem()
if args:
raise ValueError("Too many parameters, not clear what to do "
"with {}".format(kwargs))
return item
symbols = None
input_dict = CommentedMap()
if 'default' in kwargs:
input_dict['default'] = kwargs.pop('default')
if 'label' in kwargs:
input_dict['label'] = kwargs.pop('label')
if 'symbols' in kwargs:
symbols = kwargs.pop('symbols')
name, input_type = _get_item(kwargs)
if input_type == 'enum':
typ = CommentedMap()
typ['type'] = 'enum'
# make sure symbols is set
if symbols is None:
raise ValueError("Please specify the enum's symbols.")
# make sure symbols is not empty
if symbols == []:
raise ValueError("The enum's symbols cannot be empty.")
# make sure the symbols are a list
if type(symbols) != list:
raise ValueError('Symbols should be a list.')
# make sure symbols is a list of strings
symbols = [str(s) for s in symbols]
typ['symbols'] = symbols
input_dict['type'] = typ
else:
# Set the 'type' if we can't use simple notation (because there is
# a default value or a label)
if bool(input_dict):
input_dict['type'] = input_type
msg = '"{}" is already used as a workflow input. Please use a ' +\
'different name.'
if name in self.wf_inputs:
raise ValueError(msg.format(name))
# Add 'type' for complex input types, so the user doesn't have to do it
if isinstance(input_type, dict):
input_dict['type'] = input_type
# Make sure we can use the notation without 'type' if the input allows
# it.
if bool(input_dict):
self.wf_inputs[name] = input_dict
else:
self.wf_inputs[name] = input_type
return Reference(input_name=name) | [
"Add workflow input.\n\n Args:\n kwargs (dict): A dict with a `name: type` item\n and optionally a `default: value` item, where name is the\n name (id) of the workflow input (e.g., `dir_in`) and type is\n the type of the input (e.g., `'Directory'`).\n The type of input parameter can be learned from\n `step.inputs(step_name=input_name)`.\n\n Returns:\n inputname\n\n Raises:\n ValueError: No or multiple parameter(s) have been specified.\n ",
"Get a single item from args."
] |
Please provide a description of the function:def add_outputs(self, **kwargs):
self._closed()
for name, source_name in kwargs.items():
obj = {}
obj['outputSource'] = source_name
obj['type'] = self.step_output_types[source_name]
self.wf_outputs[name] = obj | [
"Add workflow outputs.\n\n The output type is added automatically, based on the steps in the steps\n library.\n\n Args:\n kwargs (dict): A dict containing ``name=source name`` pairs.\n ``name`` is the name of the workflow output (e.g.,\n ``txt_files``) and source name is the name of the step that\n produced this output plus the output name (e.g.,\n ``saf-to-txt/out_files``).\n "
] |
Please provide a description of the function:def _get_step(self, name, make_copy=True):
self._closed()
s = self.steps_library.get_step(name)
if s is None:
msg = '"{}" not found in steps library. Please check your ' \
'spelling or load additional steps'
raise ValueError(msg.format(name))
if make_copy:
s = copy.deepcopy(s)
return s | [
"Return step from steps library.\n\n Optionally, the step returned is a deep copy from the step in the steps\n library, so additional information (e.g., about whether the step was\n scattered) can be stored in the copy.\n\n Args:\n name (str): name of the step in the steps library.\n make_copy (bool): whether a deep copy of the step should be\n returned or not (default: True).\n\n Returns:\n Step from steps library.\n\n Raises:\n ValueError: The requested step cannot be found in the steps\n library.\n "
] |
Please provide a description of the function:def to_obj(self, wd=False, pack=False, relpath=None):
self._closed()
obj = CommentedMap()
obj['cwlVersion'] = 'v1.0'
obj['class'] = 'Workflow'
try:
obj['doc'] = self.documentation
except (AttributeError, ValueError):
pass
try:
obj['label'] = self.label
except (AttributeError, ValueError):
pass
if self._has_requirements():
obj['requirements'] = []
if self.has_workflow_step:
obj['requirements'].append(
{'class': 'SubworkflowFeatureRequirement'})
if self.has_scatter_requirement:
obj['requirements'].append({'class': 'ScatterFeatureRequirement'})
if self.has_multiple_inputs:
obj['requirements'].append(
{'class': 'MultipleInputFeatureRequirement'})
obj['inputs'] = self.wf_inputs
obj['outputs'] = self.wf_outputs
steps_obj = CommentedMap()
for key in self.wf_steps:
steps_obj[key] = self.wf_steps[key].to_obj(relpath=relpath,
pack=pack,
wd=wd)
obj['steps'] = steps_obj
return obj | [
"Return the created workflow as a dict.\n\n The dict can be written to a yaml file.\n\n Returns:\n A yaml-compatible dict representing the workflow.\n "
] |
Please provide a description of the function:def to_script(self, wf_name='wf'):
self._closed()
script = []
# Workflow documentation
# if self.documentation:
# if is_multiline(self.documentation):
# print('doc = ')
# print('{}.set_documentation(doc)'.format(wf_name))
# else:
# print('{}.set_documentation(\'{}\')'.format(wf_name,
# self.documentation))
# Workflow inputs
params = []
returns = []
for name, typ in self.wf_inputs.items():
params.append('{}=\'{}\''.format(name, typ))
returns.append(name)
script.append('{} = {}.add_inputs({})'.format(
', '.join(returns), wf_name, ', '.join(params)))
# Workflow steps
returns = []
for name, step in self.wf_steps.items():
pyname = step.python_name
returns = ['{}_{}'.format(pyname, o) for o in step['out']]
params = ['{}={}'.format(name, python_name(param))
for name, param in step['in'].items()]
script.append('{} = {}.{}({})'.format(
', '.join(returns), wf_name, pyname, ', '.join(params)))
# Workflow outputs
params = []
for name, details in self.wf_outputs.items():
params.append('{}={}'.format(
name, python_name(details['outputSource'])))
script.append('{}.add_outputs({})'.format(wf_name, ', '.join(params)))
return '\n'.join(script) | [
"Generated and print the scriptcwl script for the currunt workflow.\n\n Args:\n wf_name (str): string used for the WorkflowGenerator object in the\n generated script (default: ``wf``).\n ",
"')\n # print(self.documentation)\n # print('"
] |
Please provide a description of the function:def _types_match(type1, type2):
if isinstance(type1, six.string_types) and \
isinstance(type2, six.string_types):
type1 = type1.rstrip('?')
type2 = type2.rstrip('?')
if type1 != type2:
return False
return True | [
"Returns False only if it can show that no value of type1\n can possibly match type2.\n\n Supports only a limited selection of types.\n "
] |
Please provide a description of the function:def validate(self):
# define tmpfile
(fd, tmpfile) = tempfile.mkstemp()
os.close(fd)
try:
# save workflow object to tmpfile,
# do not recursively call validate function
self.save(tmpfile, mode='abs', validate=False)
# load workflow from tmpfile
document_loader, processobj, metadata, uri = load_cwl(tmpfile)
finally:
# cleanup tmpfile
os.remove(tmpfile) | [
"Validate workflow object.\n\n This method currently validates the workflow object with the use of\n cwltool. It writes the workflow to a tmp CWL file, reads it, validates\n it and removes the tmp file again. By default, the workflow is written\n to file using absolute paths to the steps.\n "
] |
Please provide a description of the function:def _pack(self, fname, encoding):
(fd, tmpfile) = tempfile.mkstemp()
os.close(fd)
try:
self.save(tmpfile, mode='abs', validate=False)
document_loader, processobj, metadata, uri = load_cwl(tmpfile)
finally:
# cleanup tmpfile
os.remove(tmpfile)
with codecs.open(fname, 'wb', encoding=encoding) as f:
f.write(print_pack(document_loader, processobj, uri, metadata)) | [
"Save workflow with ``--pack`` option\n\n This means that al tools and subworkflows are included in the workflow\n file that is created. A packed workflow cannot be loaded and used in\n scriptcwl.\n "
] |
Please provide a description of the function:def save(self, fname, mode=None, validate=True, encoding='utf-8',
wd=False, inline=False, relative=False, pack=False):
self._closed()
if mode is None:
mode = 'abs'
if pack:
mode = 'pack'
elif wd:
mode = 'wd'
elif relative:
mode = 'rel'
msg = 'Using deprecated save method. Please save the workflow ' \
'with: wf.save(\'{}\', mode=\'{}\'). Redirecting to new ' \
'save method.'.format(fname, mode)
warnings.warn(msg, DeprecationWarning)
modes = ('rel', 'abs', 'wd', 'inline', 'pack')
if mode not in modes:
msg = 'Illegal mode "{}". Choose one of ({}).'\
.format(mode, ','.join(modes))
raise ValueError(msg)
if validate:
self.validate()
dirname = os.path.dirname(os.path.abspath(fname))
if not os.path.exists(dirname):
os.makedirs(dirname)
if mode == 'inline':
msg = ('Inline saving is deprecated. Please save the workflow '
'using mode=\'pack\'. Setting mode to pack.')
warnings.warn(msg, DeprecationWarning)
mode = 'pack'
if mode == 'rel':
relpath = dirname
save_yaml(fname=fname, wf=self, pack=False, relpath=relpath,
wd=False)
if mode == 'abs':
save_yaml(fname=fname, wf=self, pack=False, relpath=None,
wd=False)
if mode == 'pack':
self._pack(fname, encoding)
if mode == 'wd':
if self.get_working_dir() is None:
raise ValueError('Working directory not set.')
else:
# save in working_dir
bn = os.path.basename(fname)
wd_file = os.path.join(self.working_dir, bn)
save_yaml(fname=wd_file, wf=self, pack=False, relpath=None,
wd=True)
# and copy workflow file to other location (as though all steps
# are in the same directory as the workflow)
try:
shutil.copy2(wd_file, fname)
except shutil.Error:
pass | [
"Save the workflow to file.\n\n Save the workflow to a CWL file that can be run with a CWL runner.\n\n Args:\n fname (str): file to save the workflow to.\n mode (str): one of (rel, abs, wd, inline, pack)\n encoding (str): file encoding to use (default: ``utf-8``).\n "
] |
Please provide a description of the function:def add_inputs(self, **kwargs):
msg = ('The add_inputs() function is deprecation in favour of the '
'add_input() function, redirecting...')
warnings.warn(msg, DeprecationWarning)
return self.add_input(**kwargs) | [
"Deprecated function, use add_input(self, **kwargs) instead.\n Add workflow input.\n\n Args:\n kwargs (dict): A dict with a `name: type` item\n and optionally a `default: value` item, where name is the\n name (id) of the workflow input (e.g., `dir_in`) and type is\n the type of the input (e.g., `'Directory'`).\n The type of input parameter can be learned from\n `step.inputs(step_name=input_name)`.\n\n Returns:\n inputname\n\n Raises:\n ValueError: No or multiple parameter(s) have been specified.\n "
] |
Please provide a description of the function:def str_presenter(dmpr, data):
if is_multiline(data):
return dmpr.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dmpr.represent_scalar('tag:yaml.org,2002:str', data) | [
"Return correct str_presenter to write multiple lines to a yaml field.\n\n\n Source: http://stackoverflow.com/a/33300001\n "
] |
Please provide a description of the function:def build_grad_matrices(V, points):
# See <https://www.allanswered.com/post/lkbkm/#zxqgk>
mesh = V.mesh()
bbt = BoundingBoxTree()
bbt.build(mesh)
dofmap = V.dofmap()
el = V.element()
rows = []
cols = []
datax = []
datay = []
for i, xy in enumerate(points):
cell_id = bbt.compute_first_entity_collision(Point(*xy))
cell = Cell(mesh, cell_id)
coordinate_dofs = cell.get_vertex_coordinates()
rows.append([i, i, i])
cols.append(dofmap.cell_dofs(cell_id))
v = el.evaluate_basis_derivatives_all(1, xy, coordinate_dofs, cell_id)
v = v.reshape(3, 2)
datax.append(v[:, 0])
datay.append(v[:, 1])
rows = numpy.concatenate(rows)
cols = numpy.concatenate(cols)
datax = numpy.concatenate(datax)
datay = numpy.concatenate(datay)
m = len(points)
n = V.dim()
dx_matrix = sparse.csr_matrix((datax, (rows, cols)), shape=(m, n))
dy_matrix = sparse.csr_matrix((datay, (rows, cols)), shape=(m, n))
return dx_matrix, dy_matrix | [
"Build the sparse m-by-n matrices that map a coefficient set for a function in V\n to the values of dx and dy at a number m of points.\n "
] |
Please provide a description of the function:def apply_M(self, ax, ay):
jac = numpy.array(
[[self.dx.dot(ax), self.dy.dot(ax)], [self.dx.dot(ay), self.dy.dot(ay)]]
)
# jacs and J are of shape (2, 2, k). M must be of the same shape and
# contain the result of the k 2x2 dot products. Perhaps there's a
# dot() for this.
M = numpy.einsum("ijl,jkl->ikl", jac, self.J)
# M = numpy.array([
# [
# jac[0][0]*self.J[0][0] + jac[0][1]*self.J[1][0],
# jac[0][0]*self.J[0][1] + jac[0][1]*self.J[1][1],
# ],
# [
# jac[1][0]*self.J[0][0] + jac[1][1]*self.J[1][0],
# jac[1][0]*self.J[0][1] + jac[1][1]*self.J[1][1],
# ],
# ])
# One could use
#
# M = numpy.moveaxis(M, -1, 0)
# _, sigma, _ = numpy.linalg.svd(M)
#
# but computing the singular values explicitly via
# <https://scicomp.stackexchange.com/a/14103/3980> is faster and more
# explicit.
a = (M[0, 0] + M[1, 1]) / 2
b = (M[0, 0] - M[1, 1]) / 2
c = (M[1, 0] + M[0, 1]) / 2
d = (M[1, 0] - M[0, 1]) / 2
return a, b, c, d | [
"Linear operator that converts ax, ay to abcd.\n "
] |
Please provide a description of the function:def cost_min2(self, alpha):
n = self.V.dim()
ax = alpha[:n]
ay = alpha[n:]
# ml = pyamg.ruge_stuben_solver(self.L)
# # ml = pyamg.smoothed_aggregation_solver(self.L)
# print(ml)
# print()
# print(self.L)
# print()
# x = ml.solve(ax, tol=1e-10)
# print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x)))
# print()
# print(ax)
# print()
# print(x)
# exit(1)
# x = sparse.linalg.spsolve(self.L, ax)
# print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x)))
# exit(1)
q2, r2 = self.get_q2_r2(ax, ay)
Lax = self.L * ax
Lay = self.L * ay
out = [
0.5 * numpy.dot(Lax, Lax),
0.5 * numpy.dot(Lay, Lay),
0.5 * numpy.dot(q2 - 1, q2 - 1),
0.5 * numpy.dot(r2, r2),
]
if self.num_f_eval % 10000 == 0:
print("{:7d} {:e} {:e} {:e} {:e}".format(self.num_f_eval, *out))
self.num_f_eval += 1
return numpy.sum(out) | [
"Residual formulation, Hessian is a low-rank update of the identity.\n "
] |
Please provide a description of the function:def delta(a, b):
diff = a - b
return numpy.einsum("i...,i...->...", diff, diff) | [
"Computes the distances between two colors or color sets. The shape of\n `a` and `b` must be equal.\n "
] |
Please provide a description of the function:def plot_flat_gamut(
xy_to_2d=lambda xy: xy,
axes_labels=("x", "y"),
plot_rgb_triangle=True,
fill_horseshoe=True,
plot_planckian_locus=True,
):
observer = observers.cie_1931_2()
# observer = observers.cie_1964_10()
_plot_monochromatic(observer, xy_to_2d, fill_horseshoe=fill_horseshoe)
# plt.grid()
if plot_rgb_triangle:
_plot_rgb_triangle(xy_to_2d)
if plot_planckian_locus:
_plot_planckian_locus(observer, xy_to_2d)
plt.gca().set_aspect("equal")
# plt.legend()
plt.xlabel(axes_labels[0])
plt.ylabel(axes_labels[1])
return | [
"Show a flat color gamut, by default xy. There exists a chroma gamut for\n all color models which transform lines in XYZ to lines, and hence have a\n natural decomposition into lightness and chroma components. Also, the flat\n gamut is the same for every lightness value. Examples for color models with\n this property are CIELUV and IPT, examples for color models without are\n CIELAB and CIECAM02.\n "
] |
Please provide a description of the function:def plot_macadam(
ellipse_scaling=10,
plot_filter_positions=False,
plot_standard_deviations=False,
plot_rgb_triangle=True,
plot_mesh=True,
n=1,
xy_to_2d=lambda xy: xy,
axes_labels=("x", "y"),
):
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, "data/macadam1942/table3.yaml")) as f:
data = yaml.safe_load(f)
# if plot_filter_positions:
# with open(os.path.join(dir_path, 'data/macadam1942/table1.yaml')) as f:
# filters_xyz = yaml.safe_load(f)
# filters_xyz = {
# key: 100 * numpy.array(value) for key, value in filters_xyz.items()
# }
# for key, xyz in filters_xyz.items():
# x, y = xyz100_to_2d(xyz)
# plt.plot(x, y, 'xk')
# ax.annotate(key, (x, y))
# collect the ellipse centers and offsets
centers = []
offsets = []
for datak in data:
# collect ellipse points
_, _, _, _, delta_y_delta_x, delta_s = numpy.array(datak["data"]).T
offset = (
numpy.array([numpy.ones(delta_y_delta_x.shape[0]), delta_y_delta_x])
/ numpy.sqrt(1 + delta_y_delta_x ** 2)
* delta_s
)
if offset.shape[1] < 2:
continue
centers.append([datak["x"], datak["y"]])
offsets.append(numpy.column_stack([+offset, -offset]))
centers = numpy.array(centers)
_plot_ellipse_data(
centers,
offsets,
ellipse_scaling=ellipse_scaling,
xy_to_2d=xy_to_2d,
plot_mesh=plot_mesh,
n=n,
plot_rgb_triangle=plot_rgb_triangle,
)
return | [
"See <https://en.wikipedia.org/wiki/MacAdam_ellipse>,\n <https://doi.org/10.1364%2FJOSA.32.000247>.\n "
] |
Please provide a description of the function:def _get_xy_tree(xy, degree):
x, y = xy
tree = [numpy.array([numpy.ones(x.shape, dtype=int)])]
for d in range(degree):
tree.append(numpy.concatenate([tree[-1] * x, [tree[-1][-1] * y]]))
return tree | [
"Evaluates the entire tree of 2d mononomials.\n\n The return value is a list of arrays, where `out[k]` hosts the `2*k+1`\n values of the `k`th level of the tree\n\n (0, 0)\n (1, 0) (0, 1)\n (2, 0) (1, 1) (0, 2)\n ... ... ...\n "
] |
Please provide a description of the function:def _get_dx_tree(xy, degree):
x, y = xy
# build smaller tree
one = numpy.array([numpy.ones(x.shape, dtype=int)])
tree = [one]
for d in range(1, degree):
tree.append(
numpy.concatenate(
[
# Integer division `//` would be nice here, but
# <https://github.com/sympy/sympy/issues/14542>.
[tree[-1][0] / d * (d + 1) * x],
tree[-1] * y,
]
)
)
# append zeros
zero = numpy.array([numpy.zeros(x.shape, dtype=int)])
tree = [zero] + [numpy.concatenate([t, zero]) for t in tree]
return tree | [
"\n 0\n 1*(0, 0) 0\n 2*(1, 0) 1*(0, 1) 0\n 3*(2, 0) 2*(1, 1) 1*(0, 2) 0\n ... ... ... ...\n "
] |
Please provide a description of the function:def jac(self, xy=None):
if xy is not None:
self.set_xy(xy)
ux = numpy.dot(self.ax, self.xy_list[: len(self.ax)])
vx = numpy.dot(self.bx, self.xy_list[: len(self.bx)])
uy = numpy.dot(self.ay, self.xy_list[: len(self.ay)])
vy = numpy.dot(self.by, self.xy_list[: len(self.by)])
ux_dx = numpy.dot(self.ax, self.dx_list[: len(self.ax)])
vx_dx = numpy.dot(self.bx, self.dx_list[: len(self.bx)])
uy_dx = numpy.dot(self.ay, self.dx_list[: len(self.ay)])
vy_dx = numpy.dot(self.by, self.dx_list[: len(self.by)])
ux_dy = numpy.dot(self.ax, self.dy_list[: len(self.ax)])
vx_dy = numpy.dot(self.bx, self.dy_list[: len(self.bx)])
uy_dy = numpy.dot(self.ay, self.dy_list[: len(self.ay)])
vy_dy = numpy.dot(self.by, self.dy_list[: len(self.by)])
jac = numpy.array(
[
[
(ux_dx * vx - vx_dx * ux) / vx ** 2,
(ux_dy * vx - vx_dy * ux) / vx ** 2,
],
[
(uy_dx * vy - vy_dx * uy) / vy ** 2,
(uy_dy * vy - vy_dy * uy) / vy ** 2,
],
]
)
return jac | [
"Get the Jacobian at (x, y).\n "
] |
Please provide a description of the function:def spectrum_to_xyz100(spectrum, observer):
lambda_o, data_o = observer
lambda_s, data_s = spectrum
# form the union of lambdas
lmbda = numpy.sort(numpy.unique(numpy.concatenate([lambda_o, lambda_s])))
# The technical document prescribes that the integration be performed over
# the wavelength range corresponding to the entire visible spectrum, 360 nm
# to 830 nm.
assert lmbda[0] < 361e-9
assert lmbda[-1] > 829e-9
# interpolate data
idata_o = numpy.array([numpy.interp(lmbda, lambda_o, dt) for dt in data_o])
# The technical report specifies the interpolation techniques, too:
# ```
# Use one of the four following methods to calculate needed but unmeasured
# values of phi(l), R(l) or tau(l) within the range of measurements:
# 1) the third-order polynomial interpolation (Lagrange) from the four
# neighbouring data points around the point to be interpolated, or
# 2) cubic spline interpolation formula, or
# 3) a fifth order polynomial interpolation formula from the six
# neighboring data points around the point to be interpolated, or
# 4) a Sprague interpolation (see Seve, 2003).
# ```
# Well, don't do that but simply use linear interpolation now. We only use
# the midpoint rule for integration anyways.
idata_s = numpy.interp(lmbda, lambda_s, data_s)
# step sizes
delta = numpy.zeros(len(lmbda))
diff = lmbda[1:] - lmbda[:-1]
delta[1:] += diff
delta[:-1] += diff
delta /= 2
values = numpy.dot(idata_o, idata_s * delta)
return values * 100 | [
"Computes the tristimulus values XYZ from a given spectrum for a given\n observer via\n\n X_i = int_lambda spectrum_i(lambda) * observer_i(lambda) dlambda.\n\n In section 7, the technical report CIE Standard Illuminants for\n Colorimetry, 1999, gives a recommendation on how to perform the\n computation.\n "
] |
Please provide a description of the function:def white_point(illuminant, observer=observers.cie_1931_2()):
values = spectrum_to_xyz100(illuminant, observer)
# normalize for relative luminance, Y=100
values /= values[1]
values *= 100
return values | [
"From <https://en.wikipedia.org/wiki/White_point>:\n The white point of an illuminant is the chromaticity of a white object\n under the illuminant.\n "
] |
Please provide a description of the function:def a(interval=1.0e-9):
# https://en.wikipedia.org/wiki/Standard_illuminant#Illuminant_A
lmbda = numpy.arange(300e-9, 831e-9, interval)
c2 = 1.435e-2
color_temp = 2848
numpy.exp(c2 / (color_temp * 560e-9))
vals = (
100
* (560e-9 / lmbda) ** 5
* (
(numpy.exp(c2 / (color_temp * 560e-9)) - 1)
/ (numpy.exp(c2 / (color_temp * lmbda)) - 1)
)
)
return lmbda, vals | [
"CIE Standard Illuminants for Colorimetry, 1999:\n CIE standard illuminant A is intended to represent typical, domestic,\n tungsten-filament lighting. Its relative spectral power distribution is\n that of a Planckian radiator at a temperature of approximately 2856 K. CIE\n standard illuminant A should be used in all applications of colorimetry\n involving the use of incandescent lighting, unless there are specific\n reasons for using a different illuminant.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.