code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def forward_substitution(matrix_l, matrix_b):
""" Forward substitution method for the solution of linear systems.
Solves the equation :math:`Ly = b` using forward substitution method
where :math:`L` is a lower triangular matrix and :math:`b` is a column matrix.
:param matrix_l: L, lower triangular matrix
:type matrix_l: list, tuple
:param matrix_b: b, column matrix
:type matrix_b: list, tuple
:return: y, column matrix
:rtype: list
"""
q = len(matrix_b)
matrix_y = [0.0 for _ in range(q)]
matrix_y[0] = float(matrix_b[0]) / float(matrix_l[0][0])
for i in range(1, q):
matrix_y[i] = float(matrix_b[i]) - sum([matrix_l[i][j] * matrix_y[j] for j in range(0, i)])
matrix_y[i] /= float(matrix_l[i][i])
return matrix_y | Forward substitution method for the solution of linear systems.
Solves the equation :math:`Ly = b` using forward substitution method
where :math:`L` is a lower triangular matrix and :math:`b` is a column matrix.
:param matrix_l: L, lower triangular matrix
:type matrix_l: list, tuple
:param matrix_b: b, column matrix
:type matrix_b: list, tuple
:return: y, column matrix
:rtype: list |
def _get_qvm_based_on_real_device(name: str, device: Device,
noisy: bool, connection: ForestConnection = None,
qvm_type: str = 'qvm'):
"""
A qvm with a based on a real device.
This is the most realistic QVM.
:param name: The full name of this QVM
:param device: The device from :py:func:`get_lattice`.
:param noisy: Whether to construct a noisy quantum computer by using the device's
associated noise model.
:param connection: An optional :py:class:`ForestConnection` object. If not specified,
the default values for URL endpoints will be used.
:return: A pre-configured QuantumComputer based on the named device.
"""
if noisy:
noise_model = device.noise_model
else:
noise_model = None
return _get_qvm_qc(name=name, connection=connection, device=device,
noise_model=noise_model, requires_executable=True,
qvm_type=qvm_type) | A qvm with a based on a real device.
This is the most realistic QVM.
:param name: The full name of this QVM
:param device: The device from :py:func:`get_lattice`.
:param noisy: Whether to construct a noisy quantum computer by using the device's
associated noise model.
:param connection: An optional :py:class:`ForestConnection` object. If not specified,
the default values for URL endpoints will be used.
:return: A pre-configured QuantumComputer based on the named device. |
def valuefrompostdata(self, postdata):
"""This parameter method searches the POST data and retrieves the values it needs. It does not set the value yet though, but simply returns it. Needs to be explicitly passed to parameter.set()"""
if self.id in postdata and postdata[self.id] != '':
return int(postdata[self.id])
else:
return None | This parameter method searches the POST data and retrieves the values it needs. It does not set the value yet though, but simply returns it. Needs to be explicitly passed to parameter.set() |
def compute(cls, observation, prediction):
"""Compute a Cohen's D from an observation and a prediction."""
assert isinstance(observation, dict)
assert isinstance(prediction, dict)
p_mean = prediction['mean'] # Use the prediction's mean.
p_std = prediction['std']
o_mean = observation['mean']
o_std = observation['std']
try: # Try to pool taking samples sizes into account.
p_n = prediction['n']
o_n = observation['n']
s = (((p_n-1)*(p_std**2) + (o_n-1)*(o_std**2))/(p_n+o_n-2))**0.5
except KeyError: # If sample sizes are not available.
s = (p_std**2 + o_std**2)**0.5
value = (p_mean - o_mean)/s
value = utils.assert_dimensionless(value)
return CohenDScore(value) | Compute a Cohen's D from an observation and a prediction. |
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag | Returns iterator that splits (wraps) mdiff text lines |
def svd(g, svdcut=1e-12, wgts=False, add_svdnoise=False):
""" Apply SVD cuts to collection of |GVar|\s in ``g``.
Standard usage is, for example, ::
svdcut = ...
gmod = svd(g, svdcut=svdcut)
where ``g`` is an array of |GVar|\s or a dictionary containing |GVar|\s
and/or arrays of |GVar|\s. When ``svdcut>0``, ``gmod`` is
a copy of ``g`` whose |GVar|\s have been modified to make
their correlation matrix less singular than that of the
original ``g``: each eigenvalue ``eig`` of the correlation matrix is
replaced by ``max(eig, svdcut * max_eig)`` where ``max_eig`` is
the largest eigenvalue. This SVD cut, which is applied separately
to each block-diagonal sub-matrix of the correlation matrix,
increases the variance of the eigenmodes with eigenvalues smaller
than ``svdcut * max_eig``.
The modification of ``g``'s covariance matrix is implemented by adding
(to ``g``) a set of |GVar|\s with zero means::
gmod = g + gmod.svdcorrection
where ``gmod.svdcorrection`` is an array/dictionary
containing the |GVar|\s. If
parameter ``add_svdnoise=True``,
noise is included in ``gmod.svdcorrection``,
::
gmod.svdcorrection += gv.sample(gmod.svdcorrection),
before it is added to ``g``. The noise can be useful for testing fits
and other applications.
When ``svdcut`` is negative, eigenmodes of the correlation matrix
whose eigenvalues are smaller than ``|svdcut| * max_eig`` are dropped
from the new matrix and the corresponding components of ``g`` are
zeroed out (that is, replaced by 0(0)) in ``gmod``.
There is an additional parameter ``wgts`` in :func:`gvar.svd` whose
default value is ``False``. Setting ``wgts=1`` or ``wgts=-1`` instead
causes :func:`gvar.svd` to return a tuple ``(gmod, i_wgts)`` where
``gmod`` is the modified copy of ``g``, and ``i_wgts`` contains a
spectral decomposition of the covariance matrix corresponding to
the modified correlation matrix if ``wgts=1``, or a decomposition of its
inverse if ``wgts=-1``. The first entry ``i, wgts = i_wgts[0]`` specifies
the diagonal part of the matrix: ``i`` is a list of the indices in
``gmod.flat`` corresponding to diagonal elements, and ``wgts ** 2``
gives the corresponding matrix elements. The second and subsequent
entries, ``i, wgts = i_wgts[n]`` for ``n > 0``, each correspond
to block-diagonal sub-matrices, where ``i`` is the list of
indices corresponding to the block, and ``wgts[j]`` are eigenvectors of
the sub-matrix rescaled so that ::
numpy.sum(numpy.outer(wi, wi) for wi in wgts[j]
is the sub-matrix (``wgts=1``) or its inverse (``wgts=-1``).
To compute the inverse of the covariance matrix from ``i_wgts``,
for example, one could use code like::
gmod, i_wgts = svd(g, svdcut=svdcut, wgts=-1)
inv_cov = numpy.zeros((n, n), float)
i, wgts = i_wgts[0] # 1x1 sub-matrices
if len(i) > 0:
inv_cov[i, i] = numpy.array(wgts) ** 2
for i, wgts in i_wgts[1:]: # nxn sub-matrices (n>1)
for w in wgts:
inv_cov[i[:, None], i] += numpy.outer(w, w)
This sets ``inv_cov`` equal to the inverse of the covariance matrix of
the ``gmod``\s. Similarly, we can compute the expectation value,
``u.dot(inv_cov.dot(v))``, between two vectors (:mod:`numpy` arrays)
using::
result = 0.0
i, wgts = i_wgts[0] # 1x1 sub-matrices
if len(i) > 0:
result += numpy.sum((u[i] * wgts) * (v[i] * wgts))
for i, wgts in i_wgts[1:]: # nxn sub-matrices (n>1)
result += numpy.sum(wgts.dot(u[i]) * wgts.dot(v[i]))
where ``result`` is the desired expectation value.
Args:
g: An array of |GVar|\s or a dicitionary whose values are
|GVar|\s and/or arrays of |GVar|\s.
svdcut (None or float): If positive, replace eigenvalues ``eig``
of the correlation matrix with ``max(eig, svdcut * max_eig)``
where ``max_eig`` is the largest eigenvalue; if negative, discard
eigenmodes with eigenvalues smaller than ``|svdcut| * max_eig``.
Note ``|svdcut| < 1``. Default is 1e-12.
wgts: Setting ``wgts=1`` causes :func:`gvar.svd` to compute
and return a spectral decomposition of the covariance matrix of
the modified |GVar|\s, ``gmod``. Setting ``wgts=-1`` results in
a decomposition of the inverse of the covariance matrix. The
default value is ``False``, in which case only ``gmod`` is returned.
add_svdnoise: If ``True``, noise is added to the SVD correction (see
above).
Returns:
A copy ``gmod`` of ``g`` whose correlation matrix is modified by
SVD cuts. If ``wgts`` is not ``False``,
a tuple ``(g, i_wgts)`` is returned where ``i_wgts``
contains a spectral decomposition of ``gmod``'s
covariance matrix or its inverse.
Data from the SVD analysis is stored in ``gmod``:
.. attribute:: gmod.svdcut
SVD cut used to create ``gmod``.
.. attribute:: gmod.dof
Number of independent degrees of freedom left after the
SVD cut. This is the same as the number initially unless
``svdcut < 0`` in which case it may be smaller.
.. attribute:: gmod.nmod
Number of modes whose eignevalue was modified by the
SVD cut.
.. attribute:: gmod.nblocks
A dictionary where ``gmod.nblocks[s]`` contains the number of
block-diagonal ``s``-by-``s`` sub-matrices in the correlation
matrix.
.. attribute:: gmod.eigen_range
Ratio of the smallest to largest eigenvalue before SVD cuts are
applied (but after rescaling).
.. attribute:: gmod.logdet
Logarithm of the determinant of the covariance matrix after SVD
cuts are applied (excluding any omitted modes when
``svdcut < 0`` and any diagonal zero modes).
.. attribute:: gmod.svdcorrection
Array or dictionary containing the SVD corrections added to ``g``
to create ``gmod``: ``gmod = g + gmod.svdcorrection``.
"""
# replace g by a copy of g
if hasattr(g,'keys'):
is_dict = True
g = BufferDict(g)
else:
is_dict = False
class svdarray(numpy.ndarray):
def __new__(cls, inputarray):
obj = numpy.array(g).view(cls)
return obj
g = svdarray(g)
idx_bcov = evalcov_blocks(g.flat)
g.logdet = 0.0
svdcorrection = numpy.zeros(len(g.flat), object)
svdcorrection[:] = gvar(0, 0)
g.eigen_range = 1.
g.nmod = 0
if wgts is not False:
i_wgts = [([], [])] # 1st entry for all 1x1 blocks
lost_modes = 0
g.nblocks = {}
for idx, block_cov in idx_bcov:
g.nblocks[len(idx)] = g.nblocks.get(len(idx), 0) + 1
if len(idx) == 1:
i = idx[0]
if block_cov[0, 0] == 0:
g.logdet = numpy.inf
else:
g.logdet += numpy.log(block_cov[0, 0])
if wgts is not False:
i_wgts[0][0].append(i)
i_wgts[0][1].append(block_cov[0, 0] ** (wgts * 0.5))
else:
s = SVD(block_cov, svdcut=svdcut, rescale=True, compute_delta=True)
if s.D is not None:
g.logdet -= 2 * sum(numpy.log(di) for di in s.D)
g.logdet += sum(numpy.log(vali) for vali in s.val)
g.nmod += s.nmod
if s.delta is not None:
if add_svdnoise:
for vali, valorigi, veci in zip(s.val, s.valorig, s.vec):
if vali > valorigi:
# add next(raniter(s.delta)) to s.delta in svdcorrection
s.delta += (veci / s.D) * (
numpy.random.normal(0.0, (vali - valorigi) ** 0.5)
)
svdcorrection[idx] = s.delta
g.flat[idx] += s.delta
elif svdcut is not None and svdcut < 0:
newg = numpy.zeros(len(idx), object)
for veci in s.vec:
veci_D = veci / s.D
newg += veci_D * (veci.dot(s.D * g.flat[idx]))
lost_modes += len(idx) - len(s.vec)
g.flat[idx] = newg
if wgts is not False:
i_wgts.append(
(idx, [w for w in s.decomp(wgts)[::-1]])
)
if s.eigen_range < g.eigen_range:
g.eigen_range = s.eigen_range
g.nmod += lost_modes
g.dof = len(g.flat) - lost_modes
g.svdcut = svdcut
# repackage svdcorrection
if is_dict:
g.svdcorrection = BufferDict(g, buf=svdcorrection)
else:
g.svdcorrection = svdcorrection.reshape(g.shape)
##### for legacy code (don't use)
svd.dof = g.dof
svd.nmod = g.nmod
svd.eigen_range = g.eigen_range
svd.logdet = g.logdet
svd.correction = g.svdcorrection.flat[:]
svd.nblocks = g.nblocks
##### end of legacy code
# repack into numpy arrays
if wgts is not False:
tmp = []
for iw, wgts in i_wgts:
tmp.append(
(numpy.array(iw, numpy.intp), numpy.array(wgts, numpy.double))
)
i_wgts = tmp
return (g, i_wgts)
else:
return g | Apply SVD cuts to collection of |GVar|\s in ``g``.
Standard usage is, for example, ::
svdcut = ...
gmod = svd(g, svdcut=svdcut)
where ``g`` is an array of |GVar|\s or a dictionary containing |GVar|\s
and/or arrays of |GVar|\s. When ``svdcut>0``, ``gmod`` is
a copy of ``g`` whose |GVar|\s have been modified to make
their correlation matrix less singular than that of the
original ``g``: each eigenvalue ``eig`` of the correlation matrix is
replaced by ``max(eig, svdcut * max_eig)`` where ``max_eig`` is
the largest eigenvalue. This SVD cut, which is applied separately
to each block-diagonal sub-matrix of the correlation matrix,
increases the variance of the eigenmodes with eigenvalues smaller
than ``svdcut * max_eig``.
The modification of ``g``'s covariance matrix is implemented by adding
(to ``g``) a set of |GVar|\s with zero means::
gmod = g + gmod.svdcorrection
where ``gmod.svdcorrection`` is an array/dictionary
containing the |GVar|\s. If
parameter ``add_svdnoise=True``,
noise is included in ``gmod.svdcorrection``,
::
gmod.svdcorrection += gv.sample(gmod.svdcorrection),
before it is added to ``g``. The noise can be useful for testing fits
and other applications.
When ``svdcut`` is negative, eigenmodes of the correlation matrix
whose eigenvalues are smaller than ``|svdcut| * max_eig`` are dropped
from the new matrix and the corresponding components of ``g`` are
zeroed out (that is, replaced by 0(0)) in ``gmod``.
There is an additional parameter ``wgts`` in :func:`gvar.svd` whose
default value is ``False``. Setting ``wgts=1`` or ``wgts=-1`` instead
causes :func:`gvar.svd` to return a tuple ``(gmod, i_wgts)`` where
``gmod`` is the modified copy of ``g``, and ``i_wgts`` contains a
spectral decomposition of the covariance matrix corresponding to
the modified correlation matrix if ``wgts=1``, or a decomposition of its
inverse if ``wgts=-1``. The first entry ``i, wgts = i_wgts[0]`` specifies
the diagonal part of the matrix: ``i`` is a list of the indices in
``gmod.flat`` corresponding to diagonal elements, and ``wgts ** 2``
gives the corresponding matrix elements. The second and subsequent
entries, ``i, wgts = i_wgts[n]`` for ``n > 0``, each correspond
to block-diagonal sub-matrices, where ``i`` is the list of
indices corresponding to the block, and ``wgts[j]`` are eigenvectors of
the sub-matrix rescaled so that ::
numpy.sum(numpy.outer(wi, wi) for wi in wgts[j]
is the sub-matrix (``wgts=1``) or its inverse (``wgts=-1``).
To compute the inverse of the covariance matrix from ``i_wgts``,
for example, one could use code like::
gmod, i_wgts = svd(g, svdcut=svdcut, wgts=-1)
inv_cov = numpy.zeros((n, n), float)
i, wgts = i_wgts[0] # 1x1 sub-matrices
if len(i) > 0:
inv_cov[i, i] = numpy.array(wgts) ** 2
for i, wgts in i_wgts[1:]: # nxn sub-matrices (n>1)
for w in wgts:
inv_cov[i[:, None], i] += numpy.outer(w, w)
This sets ``inv_cov`` equal to the inverse of the covariance matrix of
the ``gmod``\s. Similarly, we can compute the expectation value,
``u.dot(inv_cov.dot(v))``, between two vectors (:mod:`numpy` arrays)
using::
result = 0.0
i, wgts = i_wgts[0] # 1x1 sub-matrices
if len(i) > 0:
result += numpy.sum((u[i] * wgts) * (v[i] * wgts))
for i, wgts in i_wgts[1:]: # nxn sub-matrices (n>1)
result += numpy.sum(wgts.dot(u[i]) * wgts.dot(v[i]))
where ``result`` is the desired expectation value.
Args:
g: An array of |GVar|\s or a dicitionary whose values are
|GVar|\s and/or arrays of |GVar|\s.
svdcut (None or float): If positive, replace eigenvalues ``eig``
of the correlation matrix with ``max(eig, svdcut * max_eig)``
where ``max_eig`` is the largest eigenvalue; if negative, discard
eigenmodes with eigenvalues smaller than ``|svdcut| * max_eig``.
Note ``|svdcut| < 1``. Default is 1e-12.
wgts: Setting ``wgts=1`` causes :func:`gvar.svd` to compute
and return a spectral decomposition of the covariance matrix of
the modified |GVar|\s, ``gmod``. Setting ``wgts=-1`` results in
a decomposition of the inverse of the covariance matrix. The
default value is ``False``, in which case only ``gmod`` is returned.
add_svdnoise: If ``True``, noise is added to the SVD correction (see
above).
Returns:
A copy ``gmod`` of ``g`` whose correlation matrix is modified by
SVD cuts. If ``wgts`` is not ``False``,
a tuple ``(g, i_wgts)`` is returned where ``i_wgts``
contains a spectral decomposition of ``gmod``'s
covariance matrix or its inverse.
Data from the SVD analysis is stored in ``gmod``:
.. attribute:: gmod.svdcut
SVD cut used to create ``gmod``.
.. attribute:: gmod.dof
Number of independent degrees of freedom left after the
SVD cut. This is the same as the number initially unless
``svdcut < 0`` in which case it may be smaller.
.. attribute:: gmod.nmod
Number of modes whose eignevalue was modified by the
SVD cut.
.. attribute:: gmod.nblocks
A dictionary where ``gmod.nblocks[s]`` contains the number of
block-diagonal ``s``-by-``s`` sub-matrices in the correlation
matrix.
.. attribute:: gmod.eigen_range
Ratio of the smallest to largest eigenvalue before SVD cuts are
applied (but after rescaling).
.. attribute:: gmod.logdet
Logarithm of the determinant of the covariance matrix after SVD
cuts are applied (excluding any omitted modes when
``svdcut < 0`` and any diagonal zero modes).
.. attribute:: gmod.svdcorrection
Array or dictionary containing the SVD corrections added to ``g``
to create ``gmod``: ``gmod = g + gmod.svdcorrection``. |
def build_tensor_serving_input_receiver_fn(shape, dtype=tf.float32,
batch_size=1):
"""Returns a input_receiver_fn that can be used during serving.
This expects examples to come through as float tensors, and simply
wraps them as TensorServingInputReceivers.
Arguably, this should live in tf.estimator.export. Testing here first.
Args:
shape: list representing target size of a single example.
dtype: the expected datatype for the input example
batch_size: number of input tensors that will be passed for prediction
Returns:
A function that itself returns a TensorServingInputReceiver.
"""
def serving_input_receiver_fn():
# Prep a placeholder where the input example will be fed in
features = tf.placeholder(
dtype=dtype, shape=[batch_size] + shape, name='input_tensor')
return tf.estimator.export.TensorServingInputReceiver(
features=features, receiver_tensors=features)
return serving_input_receiver_fn | Returns a input_receiver_fn that can be used during serving.
This expects examples to come through as float tensors, and simply
wraps them as TensorServingInputReceivers.
Arguably, this should live in tf.estimator.export. Testing here first.
Args:
shape: list representing target size of a single example.
dtype: the expected datatype for the input example
batch_size: number of input tensors that will be passed for prediction
Returns:
A function that itself returns a TensorServingInputReceiver. |
def walk(self, basedir):
"""Walk all the directories of basedir except hidden directories
:param basedir: string, the directory to walk
:returns: generator, same as os.walk
"""
system_d = SitePackagesDir()
filter_system_d = system_d and os.path.commonprefix([system_d, basedir]) != system_d
for root, dirs, files in os.walk(basedir, topdown=True):
# ignore dot directories and private directories (start with underscore)
dirs[:] = [d for d in dirs if d[0] != '.' and d[0] != "_"]
if filter_system_d:
dirs[:] = [d for d in dirs if not d.startswith(system_d)]
yield root, dirs, files | Walk all the directories of basedir except hidden directories
:param basedir: string, the directory to walk
:returns: generator, same as os.walk |
def on_play_speed(self, *args):
"""Change the interval at which ``self.play`` is called to match my
current ``play_speed``.
"""
Clock.unschedule(self.play)
Clock.schedule_interval(self.play, 1.0 / self.play_speed) | Change the interval at which ``self.play`` is called to match my
current ``play_speed``. |
def NRTL(xs, taus, alphas):
r'''Calculates the activity coefficients of each species in a mixture
using the Non-Random Two-Liquid (NRTL) method, given their mole fractions,
dimensionless interaction parameters, and nonrandomness constants. Those
are normally correlated with temperature in some form, and need to be
calculated separately.
.. math::
\ln(\gamma_i)=\frac{\displaystyle\sum_{j=1}^{n}{x_{j}\tau_{ji}G_{ji}}}
{\displaystyle\sum_{k=1}^{n}{x_{k}G_{ki}}}+\sum_{j=1}^{n}
{\frac{x_{j}G_{ij}}{\displaystyle\sum_{k=1}^{n}{x_{k}G_{kj}}}}
{\left ({\tau_{ij}-\frac{\displaystyle\sum_{m=1}^{n}{x_{m}\tau_{mj}
G_{mj}}}{\displaystyle\sum_{k=1}^{n}{x_{k}G_{kj}}}}\right )}
G_{ij}=\text{exp}\left ({-\alpha_{ij}\tau_{ij}}\right )
Parameters
----------
xs : list[float]
Liquid mole fractions of each species, [-]
taus : list[list[float]]
Dimensionless interaction parameters of each compound with each other,
[-]
alphas : list[list[float]]
Nonrandomness constants of each compound interacting with each other, [-]
Returns
-------
gammas : list[float]
Activity coefficient for each species in the liquid mixture, [-]
Notes
-----
This model needs N^2 parameters.
One common temperature dependence of the nonrandomness constants is:
.. math::
\alpha_{ij}=c_{ij}+d_{ij}T
Most correlations for the interaction parameters include some of the terms
shown in the following form:
.. math::
\tau_{ij}=A_{ij}+\frac{B_{ij}}{T}+\frac{C_{ij}}{T^{2}}+D_{ij}
\ln{\left ({T}\right )}+E_{ij}T^{F_{ij}}
Examples
--------
Ethanol-water example, at 343.15 K and 1 MPa:
>>> NRTL(xs=[0.252, 0.748], taus=[[0, -0.178], [1.963, 0]],
... alphas=[[0, 0.2974],[.2974, 0]])
[1.9363183763514304, 1.1537609663170014]
References
----------
.. [1] Renon, Henri, and J. M. Prausnitz. "Local Compositions in
Thermodynamic Excess Functions for Liquid Mixtures." AIChE Journal 14,
no. 1 (1968): 135-144. doi:10.1002/aic.690140124.
.. [2] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey.
Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim:
Wiley-VCH, 2012.
'''
gammas = []
cmps = range(len(xs))
Gs = [[exp(-alphas[i][j]*taus[i][j]) for j in cmps] for i in cmps]
for i in cmps:
tn1, td1, total2 = 0., 0., 0.
for j in cmps:
# Term 1, numerator and denominator
tn1 += xs[j]*taus[j][i]*Gs[j][i]
td1 += xs[j]*Gs[j][i]
# Term 2
tn2 = xs[j]*Gs[i][j]
td2 = td3 = sum([xs[k]*Gs[k][j] for k in cmps])
tn3 = sum([xs[m]*taus[m][j]*Gs[m][j] for m in cmps])
total2 += tn2/td2*(taus[i][j] - tn3/td3)
gamma = exp(tn1/td1 + total2)
gammas.append(gamma)
return gammas | r'''Calculates the activity coefficients of each species in a mixture
using the Non-Random Two-Liquid (NRTL) method, given their mole fractions,
dimensionless interaction parameters, and nonrandomness constants. Those
are normally correlated with temperature in some form, and need to be
calculated separately.
.. math::
\ln(\gamma_i)=\frac{\displaystyle\sum_{j=1}^{n}{x_{j}\tau_{ji}G_{ji}}}
{\displaystyle\sum_{k=1}^{n}{x_{k}G_{ki}}}+\sum_{j=1}^{n}
{\frac{x_{j}G_{ij}}{\displaystyle\sum_{k=1}^{n}{x_{k}G_{kj}}}}
{\left ({\tau_{ij}-\frac{\displaystyle\sum_{m=1}^{n}{x_{m}\tau_{mj}
G_{mj}}}{\displaystyle\sum_{k=1}^{n}{x_{k}G_{kj}}}}\right )}
G_{ij}=\text{exp}\left ({-\alpha_{ij}\tau_{ij}}\right )
Parameters
----------
xs : list[float]
Liquid mole fractions of each species, [-]
taus : list[list[float]]
Dimensionless interaction parameters of each compound with each other,
[-]
alphas : list[list[float]]
Nonrandomness constants of each compound interacting with each other, [-]
Returns
-------
gammas : list[float]
Activity coefficient for each species in the liquid mixture, [-]
Notes
-----
This model needs N^2 parameters.
One common temperature dependence of the nonrandomness constants is:
.. math::
\alpha_{ij}=c_{ij}+d_{ij}T
Most correlations for the interaction parameters include some of the terms
shown in the following form:
.. math::
\tau_{ij}=A_{ij}+\frac{B_{ij}}{T}+\frac{C_{ij}}{T^{2}}+D_{ij}
\ln{\left ({T}\right )}+E_{ij}T^{F_{ij}}
Examples
--------
Ethanol-water example, at 343.15 K and 1 MPa:
>>> NRTL(xs=[0.252, 0.748], taus=[[0, -0.178], [1.963, 0]],
... alphas=[[0, 0.2974],[.2974, 0]])
[1.9363183763514304, 1.1537609663170014]
References
----------
.. [1] Renon, Henri, and J. M. Prausnitz. "Local Compositions in
Thermodynamic Excess Functions for Liquid Mixtures." AIChE Journal 14,
no. 1 (1968): 135-144. doi:10.1002/aic.690140124.
.. [2] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey.
Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim:
Wiley-VCH, 2012. |
def resolve_object(self, object_arg_name, resolver):
"""
A helper decorator to resolve object instance from arguments (e.g. identity).
Example:
>>> @namespace.route('/<int:user_id>')
... class MyResource(Resource):
... @namespace.resolve_object(
... object_arg_name='user',
... resolver=lambda kwargs: User.query.get_or_404(kwargs.pop('user_id'))
... )
... def get(self, user):
... # user is a User instance here
"""
def decorator(func_or_class):
if isinstance(func_or_class, type):
# Handle Resource classes decoration
# pylint: disable=protected-access
func_or_class._apply_decorator_to_methods(decorator)
return func_or_class
@wraps(func_or_class)
def wrapper(*args, **kwargs):
kwargs[object_arg_name] = resolver(kwargs)
return func_or_class(*args, **kwargs)
return wrapper
return decorator | A helper decorator to resolve object instance from arguments (e.g. identity).
Example:
>>> @namespace.route('/<int:user_id>')
... class MyResource(Resource):
... @namespace.resolve_object(
... object_arg_name='user',
... resolver=lambda kwargs: User.query.get_or_404(kwargs.pop('user_id'))
... )
... def get(self, user):
... # user is a User instance here |
def _get(self, uri, params={}):
"""
HTTP GET function
:param uri: REST endpoint
:param params: optional HTTP params to pass to the endpoint
:return: list of results (usually a list of dicts)
Example:
ret = cli.get('/search', params={ 'q': 'example.org' })
"""
if not uri.startswith(self.remote):
uri = '{}{}'.format(self.remote, uri)
return self._make_request(uri, params) | HTTP GET function
:param uri: REST endpoint
:param params: optional HTTP params to pass to the endpoint
:return: list of results (usually a list of dicts)
Example:
ret = cli.get('/search', params={ 'q': 'example.org' }) |
def _parse_csv_col_rules(self):
"""
splits the CSV line of the current format and puts into
local class variables - mainly for testing, though this is
not the best method long term. (TODO - fix this)
"""
self.cols = self.csv_line.split(',')
self.table = self.extract_col(0)
self.column = self.extract_col(1)
self.data_type = self.extract_col(2)
self.aikif_map = self.extract_col(3)
self.aikif_map_name = self.extract_col(4)
self.extract = self.extract_col(5)
self.format = self.extract_col(6)
self.where = self.extract_col(7)
self.index = self.extract_col(8) | splits the CSV line of the current format and puts into
local class variables - mainly for testing, though this is
not the best method long term. (TODO - fix this) |
def _join(segments):
"""simply list by joining adjacent segments."""
new = []
start = segments[0][0]
end = segments[0][1]
for i in range(len(segments)-1):
if segments[i+1][0] != segments[i][1]:
new.append((start, end))
start = segments[i+1][0]
end = segments[i+1][1]
new.append((start, end))
return new | simply list by joining adjacent segments. |
def timestr_mod24(timestr: str) -> int:
"""
Given a GTFS HH:MM:SS time string, return a timestring in the same
format but with the hours taken modulo 24.
"""
try:
hours, mins, secs = [int(x) for x in timestr.split(":")]
hours %= 24
result = f"{hours:02d}:{mins:02d}:{secs:02d}"
except:
result = None
return result | Given a GTFS HH:MM:SS time string, return a timestring in the same
format but with the hours taken modulo 24. |
def write_template(fn, lang="python"):
"""
Write language-specific script template to file.
Arguments:
- fn(``string``) path to save the template to
- lang('python', 'bash') which programming language
"""
with open(fn, "wb") as fh:
if lang == "python":
fh.write(PY_TEMPLATE)
elif lang == "bash":
fh.write(SH_TEMPLATE) | Write language-specific script template to file.
Arguments:
- fn(``string``) path to save the template to
- lang('python', 'bash') which programming language |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(RenameRelation, self).fix_config(options)
opt = "name"
if opt not in options:
options[opt] = "newname"
if opt not in self.help:
self.help[opt] = "The new relation name to use (string)."
return options | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
def get_link_name (self, tag, attrs, attr):
"""Parse attrs for link name. Return name of link."""
if tag == 'a' and attr == 'href':
# Look for name only up to MAX_NAMELEN characters
data = self.parser.peek(MAX_NAMELEN)
data = data.decode(self.parser.encoding, "ignore")
name = linkname.href_name(data)
if not name:
name = attrs.get_true('title', u'')
elif tag == 'img':
name = attrs.get_true('alt', u'')
if not name:
name = attrs.get_true('title', u'')
else:
name = u""
return name | Parse attrs for link name. Return name of link. |
def memory_read16(self, addr, num_halfwords, zone=None):
"""Reads memory from the target system in units of 16-bits.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to read from
num_halfwords (int): number of half words to read
zone (str): memory zone to read from
Returns:
List of halfwords read from the target system.
Raises:
JLinkException: if memory could not be read
"""
return self.memory_read(addr, num_halfwords, zone=zone, nbits=16) | Reads memory from the target system in units of 16-bits.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to read from
num_halfwords (int): number of half words to read
zone (str): memory zone to read from
Returns:
List of halfwords read from the target system.
Raises:
JLinkException: if memory could not be read |
def cli(env, identifier, name, all, note):
"""Capture one or all disks from a virtual server to a SoftLayer image."""
vsi = SoftLayer.VSManager(env.client)
vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS')
capture = vsi.capture(vs_id, name, all, note)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
table.add_row(['vs_id', capture['guestId']])
table.add_row(['date', capture['createDate'][:10]])
table.add_row(['time', capture['createDate'][11:19]])
table.add_row(['transaction', formatting.transaction_status(capture)])
table.add_row(['transaction_id', capture['id']])
table.add_row(['all_disks', all])
env.fout(table) | Capture one or all disks from a virtual server to a SoftLayer image. |
def _get_position(self, position, prev=False):
"""Return the next/previous position or raise IndexError."""
if position == self.POSITION_LOADING:
if prev:
raise IndexError('Reached last position')
else:
return self._conversation.events[0].id_
else:
ev = self._conversation.next_event(position, prev=prev)
if ev is None:
if prev:
return self.POSITION_LOADING
else:
raise IndexError('Reached first position')
else:
return ev.id_ | Return the next/previous position or raise IndexError. |
def store(self, deferred_result):
"""
Store a EventualResult.
Return an integer, a unique identifier that can be used to retrieve
the object.
"""
self._counter += 1
self._stored[self._counter] = deferred_result
return self._counter | Store a EventualResult.
Return an integer, a unique identifier that can be used to retrieve
the object. |
def mean_abs_tree_shap(model, data):
""" mean(|TreeExplainer|)
color = red_blue_circle(0.25)
linestyle = solid
"""
def f(X):
v = TreeExplainer(model).shap_values(X)
if isinstance(v, list):
return [np.tile(np.abs(sv).mean(0), (X.shape[0], 1)) for sv in v]
else:
return np.tile(np.abs(v).mean(0), (X.shape[0], 1))
return f | mean(|TreeExplainer|)
color = red_blue_circle(0.25)
linestyle = solid |
def changelist_view(self, request, extra_context=None):
""" Get object currently tracked and add a button to get back to it """
extra_context = extra_context or {}
if 'object' in request.GET.keys():
value = request.GET['object'].split(':')
content_type = get_object_or_404(
ContentType,
id=value[0],
)
tracked_object = get_object_or_404(
content_type.model_class(),
id=value[1],
)
extra_context['tracked_object'] = tracked_object
extra_context['tracked_object_opts'] = tracked_object._meta
return super(TrackingEventAdmin, self).changelist_view(
request, extra_context) | Get object currently tracked and add a button to get back to it |
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/requests/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/requests/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp | Receives a Response. Returns a generator of Responses or Requests. |
def _initialize(self, chain, length):
"""Create an SQL table.
"""
if self._getfunc is None:
self._getfunc = self.db.model._funs_to_tally[self.name]
# Determine size
try:
self._shape = np.shape(self._getfunc())
except TypeError:
self._shape = None
self._vstr = ', '.join(var_str(self._shape))
# If the table already exists, exit now.
if chain != 0:
return
# Create the variable name strings.
vstr = ', '.join(v + ' FLOAT' for v in var_str(self._shape))
query = """CREATE TABLE IF NOT EXISTS [%s]
(recid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
trace int(5), %s)""" % (self.name, vstr)
self.db.cur.execute(query) | Create an SQL table. |
def _read_execute_info(path, parents):
"""Read the ExecuteInfo.txt file and return the base directory."""
path = os.path.join(path, "StarCraft II/ExecuteInfo.txt")
if os.path.exists(path):
with open(path, "rb") as f: # Binary because the game appends a '\0' :(.
for line in f:
parts = [p.strip() for p in line.decode("utf-8").split("=")]
if len(parts) == 2 and parts[0] == "executable":
exec_path = parts[1].replace("\\", "/") # For windows compatibility.
for _ in range(parents):
exec_path = os.path.dirname(exec_path)
return exec_path | Read the ExecuteInfo.txt file and return the base directory. |
def Clift(Re):
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \left\{ \begin{array}{ll}
\frac{24}{Re} + \frac{3}{16} & \mbox{if $Re < 0.01$}\\
\frac{24}{Re}(1 + 0.1315Re^{0.82 - 0.05\log Re}) & \mbox{if $0.01 < Re < 20$}\\
\frac{24}{Re}(1 + 0.1935Re^{0.6305}) & \mbox{if $20 < Re < 260$}\\
10^{[1.6435 - 1.1242\log Re + 0.1558[\log Re]^2} & \mbox{if $260 < Re < 1500$}\\
10^{[-2.4571 + 2.5558\log Re - 0.9295[\log Re]^2 + 0.1049[\log Re]^3} & \mbox{if $1500 < Re < 12000$}\\
10^{[-1.9181 + 0.6370\log Re - 0.0636[\log Re]^2} & \mbox{if $12000 < Re < 44000$}\\
10^{[-4.3390 + 1.5809\log Re - 0.1546[\log Re]^2} & \mbox{if $44000 < Re < 338000$}\\
9.78 - 5.3\log Re & \mbox{if $338000 < Re < 400000$}\\
0.19\log Re - 0.49 & \mbox{if $400000 < Re < 1000000$}\end{array} \right.
Parameters
----------
Re : float
Particle Reynolds number of the sphere using the surrounding fluid
density and viscosity, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 1E6.
Examples
--------
>>> Clift(200)
0.7756342422322543
References
----------
.. [1] R. Clift, J.R. Grace, M.E. Weber, Bubbles, Drops, and Particles,
Academic, New York, 1978.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
'''
if Re < 0.01:
return 24./Re + 3/16.
elif Re < 20:
return 24./Re*(1 + 0.1315*Re**(0.82 - 0.05*log10(Re)))
elif Re < 260:
return 24./Re*(1 + 0.1935*Re**(0.6305))
elif Re < 1500:
return 10**(1.6435 - 1.1242*log10(Re) + 0.1558*(log10(Re))**2)
elif Re < 12000:
return 10**(-2.4571 + 2.5558*log10(Re) - 0.9295*(log10(Re))**2 + 0.1049*log10(Re)**3)
elif Re < 44000:
return 10**(-1.9181 + 0.6370*log10(Re) - 0.0636*(log10(Re))**2)
elif Re < 338000:
return 10**(-4.3390 + 1.5809*log10(Re) - 0.1546*(log10(Re))**2)
elif Re < 400000:
return 29.78 - 5.3*log10(Re)
else:
return 0.19*log10(Re) - 0.49 | r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \left\{ \begin{array}{ll}
\frac{24}{Re} + \frac{3}{16} & \mbox{if $Re < 0.01$}\\
\frac{24}{Re}(1 + 0.1315Re^{0.82 - 0.05\log Re}) & \mbox{if $0.01 < Re < 20$}\\
\frac{24}{Re}(1 + 0.1935Re^{0.6305}) & \mbox{if $20 < Re < 260$}\\
10^{[1.6435 - 1.1242\log Re + 0.1558[\log Re]^2} & \mbox{if $260 < Re < 1500$}\\
10^{[-2.4571 + 2.5558\log Re - 0.9295[\log Re]^2 + 0.1049[\log Re]^3} & \mbox{if $1500 < Re < 12000$}\\
10^{[-1.9181 + 0.6370\log Re - 0.0636[\log Re]^2} & \mbox{if $12000 < Re < 44000$}\\
10^{[-4.3390 + 1.5809\log Re - 0.1546[\log Re]^2} & \mbox{if $44000 < Re < 338000$}\\
9.78 - 5.3\log Re & \mbox{if $338000 < Re < 400000$}\\
0.19\log Re - 0.49 & \mbox{if $400000 < Re < 1000000$}\end{array} \right.
Parameters
----------
Re : float
Particle Reynolds number of the sphere using the surrounding fluid
density and viscosity, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 1E6.
Examples
--------
>>> Clift(200)
0.7756342422322543
References
----------
.. [1] R. Clift, J.R. Grace, M.E. Weber, Bubbles, Drops, and Particles,
Academic, New York, 1978.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045. |
def list_clusters(self):
"""List the clusters in this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_list_clusters_on_instance]
:end-before: [END bigtable_list_clusters_on_instance]
:rtype: tuple
:returns:
(clusters, failed_locations), where 'clusters' is list of
:class:`google.cloud.bigtable.instance.Cluster`, and
'failed_locations' is a list of locations which could not
be resolved.
"""
resp = self._client.instance_admin_client.list_clusters(self.name)
clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters]
return clusters, resp.failed_locations | List the clusters in this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_list_clusters_on_instance]
:end-before: [END bigtable_list_clusters_on_instance]
:rtype: tuple
:returns:
(clusters, failed_locations), where 'clusters' is list of
:class:`google.cloud.bigtable.instance.Cluster`, and
'failed_locations' is a list of locations which could not
be resolved. |
def scaleToSeconds(requestContext, seriesList, seconds):
"""
Takes one metric or a wildcard seriesList and returns "value per seconds"
where seconds is a last argument to this functions.
Useful in conjunction with derivative or integral function if you want
to normalize its result to a known resolution for arbitrary retentions
"""
for series in seriesList:
series.name = "scaleToSeconds(%s,%d)" % (series.name, seconds)
series.pathExpression = series.name
factor = seconds * 1.0 / series.step
for i, value in enumerate(series):
series[i] = safeMul(value, factor)
return seriesList | Takes one metric or a wildcard seriesList and returns "value per seconds"
where seconds is a last argument to this functions.
Useful in conjunction with derivative or integral function if you want
to normalize its result to a known resolution for arbitrary retentions |
def get_pull_request_query(self, queries, repository_id, project=None):
"""GetPullRequestQuery.
[Preview API] This API is used to find what pull requests are related to a given commit. It can be used to either find the pull request that created a particular merge commit or it can be used to find all pull requests that have ever merged a particular commit. The input is a list of queries which each contain a list of commits. For each commit that you search against, you will get back a dictionary of commit -> pull requests.
:param :class:`<GitPullRequestQuery> <azure.devops.v5_1.git.models.GitPullRequestQuery>` queries: The list of queries to perform.
:param str repository_id: ID of the repository.
:param str project: Project ID or project name
:rtype: :class:`<GitPullRequestQuery> <azure.devops.v5_1.git.models.GitPullRequestQuery>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
content = self._serialize.body(queries, 'GitPullRequestQuery')
response = self._send(http_method='POST',
location_id='b3a6eebe-9cf0-49ea-b6cb-1a4c5f5007b0',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('GitPullRequestQuery', response) | GetPullRequestQuery.
[Preview API] This API is used to find what pull requests are related to a given commit. It can be used to either find the pull request that created a particular merge commit or it can be used to find all pull requests that have ever merged a particular commit. The input is a list of queries which each contain a list of commits. For each commit that you search against, you will get back a dictionary of commit -> pull requests.
:param :class:`<GitPullRequestQuery> <azure.devops.v5_1.git.models.GitPullRequestQuery>` queries: The list of queries to perform.
:param str repository_id: ID of the repository.
:param str project: Project ID or project name
:rtype: :class:`<GitPullRequestQuery> <azure.devops.v5_1.git.models.GitPullRequestQuery>` |
def punchcard(self, branch='master', limit=None, days=None, by=None, normalize=None, ignore_globs=None,
include_globs=None):
"""
Returns a pandas DataFrame containing all of the data for a punchcard.
* day_of_week
* hour_of_day
* author / committer
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param days: (optional, default=None) number of days to return, if limit is None
:param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author'
:param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
ch = self.commit_history(
branch=branch,
limit=limit,
days=days,
ignore_globs=ignore_globs,
include_globs=include_globs
)
# add in the date fields
ch['day_of_week'] = ch.index.map(lambda x: x.weekday())
ch['hour_of_day'] = ch.index.map(lambda x: x.hour)
aggs = ['hour_of_day', 'day_of_week']
if by is not None:
aggs.append(by)
punch_card = ch.groupby(aggs).agg({
'lines': np.sum,
'insertions': np.sum,
'deletions': np.sum,
'net': np.sum
})
punch_card.reset_index(inplace=True)
# normalize all cols
if normalize is not None:
for col in ['lines', 'insertions', 'deletions', 'net']:
punch_card[col] = (punch_card[col] / punch_card[col].sum()) * normalize
return punch_card | Returns a pandas DataFrame containing all of the data for a punchcard.
* day_of_week
* hour_of_day
* author / committer
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param days: (optional, default=None) number of days to return, if limit is None
:param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author'
:param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame |
def __draw_cmp(self, obj1, obj2):
"""Defines how our drawable objects should be sorted"""
if obj1.draw_order > obj2.draw_order:
return 1
elif obj1.draw_order < obj2.draw_order:
return -1
else:
return 0 | Defines how our drawable objects should be sorted |
def destroy_iam(app='', env='dev', **_):
"""Destroy IAM Resources.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment, i.e. dev, stage, prod.
Returns:
True upon successful completion.
"""
session = boto3.Session(profile_name=env)
client = session.client('iam')
generated = get_details(env=env, app=app)
generated_iam = generated.iam()
app_details = collections.namedtuple('AppDetails', generated_iam.keys())
details = app_details(**generated_iam)
LOG.debug('Application details: %s', details)
resource_action(
client,
action='remove_user_from_group',
log_format='Removed user from group: %(UserName)s ~> %(GroupName)s',
GroupName=details.group,
UserName=details.user)
resource_action(client, action='delete_user', log_format='Destroyed user: %(UserName)s', UserName=details.user)
resource_action(client, action='delete_group', log_format='Destroyed group: %(GroupName)s', GroupName=details.group)
resource_action(
client,
action='remove_role_from_instance_profile',
log_format='Destroyed Instance Profile from Role: '
'%(InstanceProfileName)s ~> %(RoleName)s',
InstanceProfileName=details.profile,
RoleName=details.role)
resource_action(
client,
action='delete_instance_profile',
log_format='Destroyed Instance Profile: %(InstanceProfileName)s',
InstanceProfileName=details.profile)
role_policies = []
try:
role_policies = resource_action(
client,
action='list_role_policies',
log_format='Found Role Policies for %(RoleName)s.',
RoleName=details.role)['PolicyNames']
except TypeError:
LOG.info('Role %s not found.', details.role)
for policy in role_policies:
resource_action(
client,
action='delete_role_policy',
log_format='Removed Inline Policy from Role: '
'%(PolicyName)s ~> %(RoleName)s',
RoleName=details.role,
PolicyName=policy)
attached_role_policies = []
try:
attached_role_policies = resource_action(
client,
action='list_attached_role_policies',
log_format='Found attached Role Polices for %(RoleName)s.',
RoleName=details.role)['AttachedPolicies']
except TypeError:
LOG.info('Role %s not found.', details.role)
for policy in attached_role_policies:
resource_action(
client,
action='detach_role_policy',
log_format='Detached Policy from Role: '
'%(PolicyArn)s ~> %(RoleName)s',
RoleName=details.role,
PolicyArn=policy['PolicyArn'])
resource_action(client, action='delete_role', log_format='Destroyed Role: %(RoleName)s', RoleName=details.role) | Destroy IAM Resources.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment, i.e. dev, stage, prod.
Returns:
True upon successful completion. |
def undersampling(X, y, cost_mat=None, per=0.5):
"""Under-sampling.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y : array-like of shape = [n_samples]
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4], optional (default=None)
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
per: float, optional (default = 0.5)
Percentage of the minority class in the under-sampled data
"""
n_samples = X.shape[0]
#TODO: allow y different from (0, 1)
num_y1 = y.sum()
num_y0 = n_samples - num_y1
filter_rand = np.random.rand(int(num_y1 + num_y0))
#TODO: rewrite in a more readable way
if num_y1 < num_y0:
num_y0_new = num_y1 * 1.0 / per - num_y1
num_y0_new_per = num_y0_new * 1.0 / num_y0
filter_0 = np.logical_and(y == 0, filter_rand <= num_y0_new_per)
filter_ = np.nonzero(np.logical_or(y == 1, filter_0))[0]
else:
num_y1_new = num_y0 * 1.0 / per - num_y0
num_y1_new_per = num_y1_new * 1.0 / num_y1
filter_1 = np.logical_and(y == 1, filter_rand <= num_y1_new_per)
filter_ = np.nonzero(np.logical_or(y == 0, filter_1))[0]
X_u = X[filter_, :]
y_u = y[filter_]
if not cost_mat is None:
cost_mat_u = cost_mat[filter_, :]
return X_u, y_u, cost_mat_u
else:
return X_u, y_u | Under-sampling.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y : array-like of shape = [n_samples]
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4], optional (default=None)
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
per: float, optional (default = 0.5)
Percentage of the minority class in the under-sampled data |
def respond_from_question(self, question, user_question, importance):
"""Copy the answer given in `question` to the logged in user's
profile.
:param question: A :class:`~.Question` instance to copy.
:param user_question: An instance of :class:`~.UserQuestion` that
corresponds to the same question as `question`.
This is needed to retrieve the answer id from
the question text answer on question.
:param importance: The importance to assign to the response to the
answered question.
"""
option_index = user_question.answer_text_to_option[
question.their_answer
].id
self.respond(question.id, [option_index], [option_index], importance) | Copy the answer given in `question` to the logged in user's
profile.
:param question: A :class:`~.Question` instance to copy.
:param user_question: An instance of :class:`~.UserQuestion` that
corresponds to the same question as `question`.
This is needed to retrieve the answer id from
the question text answer on question.
:param importance: The importance to assign to the response to the
answered question. |
def unpublish(namespace, name, version, registry=None):
''' Try to unpublish a recently published version. Return any errors that
occur.
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/versions/%s' % (
registry,
namespace,
name,
version
)
headers = _headersForRegistry(registry)
response = requests.delete(url, headers=headers)
response.raise_for_status()
return None | Try to unpublish a recently published version. Return any errors that
occur. |
def solarzenithangle(time: datetime, glat: float, glon: float, alt_m: float) -> tuple:
"""
Input:
t: scalar or array of datetime
"""
time = totime(time)
obs = EarthLocation(lat=glat*u.deg, lon=glon*u.deg, height=alt_m*u.m)
times = Time(time, scale='ut1')
sun = get_sun(times)
sunobs = sun.transform_to(AltAz(obstime=times, location=obs))
return 90 - sunobs.alt.degree, sun, sunobs | Input:
t: scalar or array of datetime |
def is_ancestor_of_bin(self, id_, bin_id):
"""Tests if an ``Id`` is an ancestor of a bin.
arg: id (osid.id.Id): an ``Id``
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``bin_id,`` ``false`` otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``id`` or ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_ancestor_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=bin_id)
return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=bin_id) | Tests if an ``Id`` is an ancestor of a bin.
arg: id (osid.id.Id): an ``Id``
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``bin_id,`` ``false`` otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``id`` or ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``. |
def report_question(self, concern, pub_name, ext_name, question_id):
"""ReportQuestion.
[Preview API] Flags a concern with an existing question for an extension.
:param :class:`<Concern> <azure.devops.v5_1.gallery.models.Concern>` concern: User reported concern with a question for the extension.
:param str pub_name: Name of the publisher who published the extension.
:param str ext_name: Name of the extension.
:param long question_id: Identifier of the question to be updated for the extension.
:rtype: :class:`<Concern> <azure.devops.v5_1.gallery.models.Concern>`
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
content = self._serialize.body(concern, 'Concern')
response = self._send(http_method='POST',
location_id='784910cd-254a-494d-898b-0728549b2f10',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Concern', response) | ReportQuestion.
[Preview API] Flags a concern with an existing question for an extension.
:param :class:`<Concern> <azure.devops.v5_1.gallery.models.Concern>` concern: User reported concern with a question for the extension.
:param str pub_name: Name of the publisher who published the extension.
:param str ext_name: Name of the extension.
:param long question_id: Identifier of the question to be updated for the extension.
:rtype: :class:`<Concern> <azure.devops.v5_1.gallery.models.Concern>` |
def spearmanr(x, y):
"""
Michiel de Hoon's library (available in BioPython or standalone as
PyCluster) returns Spearman rsb which does include a tie correction.
>>> x = [5.05, 6.75, 3.21, 2.66]
>>> y = [1.65, 26.5, -5.93, 7.96]
>>> z = [1.65, 2.64, 2.64, 6.95]
>>> round(spearmanr(x, y), 4)
0.4
>>> round(spearmanr(x, z), 4)
-0.6325
"""
from scipy import stats
if not x or not y:
return 0
corr, pvalue = stats.spearmanr(x, y)
return corr | Michiel de Hoon's library (available in BioPython or standalone as
PyCluster) returns Spearman rsb which does include a tie correction.
>>> x = [5.05, 6.75, 3.21, 2.66]
>>> y = [1.65, 26.5, -5.93, 7.96]
>>> z = [1.65, 2.64, 2.64, 6.95]
>>> round(spearmanr(x, y), 4)
0.4
>>> round(spearmanr(x, z), 4)
-0.6325 |
def form_user_label_matrix(user_twitter_list_keywords_gen, id_to_node, max_number_of_labels):
"""
Forms the user-label matrix to be used in multi-label classification.
Input: - user_twitter_list_keywords_gen:
- id_to_node: A Twitter id to node map as a python dictionary.
Outputs: - user_label_matrix: A user-to-label matrix in scipy sparse matrix format.
- annotated_nodes: A numpy array containing graph nodes.
- label_to_lemma: A python dictionary that maps a numerical label to a string topic lemma.
- lemma_to_keyword: A python dictionary that maps a lemma to the original keyword.
"""
user_label_matrix, annotated_nodes, label_to_lemma, node_to_lemma_tokeywordbag = form_user_term_matrix(user_twitter_list_keywords_gen,
id_to_node,
None)
# write_terms_and_frequencies("/home/georgerizos/Documents/term_matrix.txt", user_label_matrix, label_to_lemma)
user_label_matrix, annotated_nodes, label_to_lemma = filter_user_term_matrix(user_label_matrix,
annotated_nodes,
label_to_lemma,
max_number_of_labels)
# write_terms_and_frequencies("/home/georgerizos/Documents/label_matrix.txt", user_label_matrix, label_to_lemma)
lemma_to_keyword = form_lemma_tokeyword_map(annotated_nodes, node_to_lemma_tokeywordbag)
return user_label_matrix, annotated_nodes, label_to_lemma, lemma_to_keyword | Forms the user-label matrix to be used in multi-label classification.
Input: - user_twitter_list_keywords_gen:
- id_to_node: A Twitter id to node map as a python dictionary.
Outputs: - user_label_matrix: A user-to-label matrix in scipy sparse matrix format.
- annotated_nodes: A numpy array containing graph nodes.
- label_to_lemma: A python dictionary that maps a numerical label to a string topic lemma.
- lemma_to_keyword: A python dictionary that maps a lemma to the original keyword. |
def _mapping_to_tuple_pairs(d):
"""
Convert a mapping object (such as a dictionary) to tuple pairs,
using its keys and values to generate the pairs and then generating
all possible combinations between those
e.g. {1: (1,2,3)} -> (((1, 1),), ((1, 2),), ((1, 3),))
"""
# order the keys, this will prevent different implementations of Python,
# return different results from the same dictionary since the order of
# iteration depends on it
t = []
ord_keys = sorted(d.keys())
for k in ord_keys:
t.append(_product(k, d[k]))
return tuple(product(*t)) | Convert a mapping object (such as a dictionary) to tuple pairs,
using its keys and values to generate the pairs and then generating
all possible combinations between those
e.g. {1: (1,2,3)} -> (((1, 1),), ((1, 2),), ((1, 3),)) |
def set_record(self, record, **kw):
"""
check the record is valid and set keys in the dict
parameters
----------
record: string
Dict representing a record or a string representing a FITS header
card
"""
if isstring(record):
card = FITSCard(record)
self.update(card)
self.verify()
else:
if isinstance(record, FITSRecord):
self.update(record)
elif isinstance(record, dict):
if 'name' in record and 'value' in record:
self.update(record)
elif 'card_string' in record:
self.set_record(record['card_string'])
else:
raise ValueError('record must have name,value fields '
'or a card_string field')
else:
raise ValueError("record must be a string card or "
"dictionary or FITSRecord") | check the record is valid and set keys in the dict
parameters
----------
record: string
Dict representing a record or a string representing a FITS header
card |
def assert_strong_password(username, password, old_password=None):
"""Raises ValueError if the password isn't strong.
Returns the password otherwise."""
# test the length
try:
minlength = settings.MIN_PASSWORD_LENGTH
except AttributeError:
minlength = 12
if len(password) < minlength:
raise ValueError(
"Password must be at least %s characters long" % minlength)
if username is not None and username in password:
raise ValueError("Password contains username")
return _assert_password(password, old_password) | Raises ValueError if the password isn't strong.
Returns the password otherwise. |
def libvlc_audio_set_format(mp, format, rate, channels):
'''Set decoded audio format.
This only works in combination with L{libvlc_audio_set_callbacks}(),
and is mutually exclusive with L{libvlc_audio_set_format_callbacks}().
@param mp: the media player.
@param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32").
@param rate: sample rate (expressed in Hz).
@param channels: channels count.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_format', None) or \
_Cfunction('libvlc_audio_set_format', ((1,), (1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint)
return f(mp, format, rate, channels) | Set decoded audio format.
This only works in combination with L{libvlc_audio_set_callbacks}(),
and is mutually exclusive with L{libvlc_audio_set_format_callbacks}().
@param mp: the media player.
@param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32").
@param rate: sample rate (expressed in Hz).
@param channels: channels count.
@version: LibVLC 2.0.0 or later. |
def event_update_status(self, event_id, status, scores=[], account=None, **kwargs):
""" Update the status of an event. This needs to be **proposed**.
:param str event_id: Id of the event to update
:param str status: Event status
:param list scores: List of strings that represent the scores of a
match (defaults to [])
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account)
event = Event(event_id)
# Do not try to update status of it doesn't change it on the chain
if event["status"] == status:
status = None
op = operations.Event_update_status(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"event_id": event["id"],
"status": status,
"scores": scores,
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active", **kwargs) | Update the status of an event. This needs to be **proposed**.
:param str event_id: Id of the event to update
:param str status: Event status
:param list scores: List of strings that represent the scores of a
match (defaults to [])
:param str account: (optional) the account to allow access
to (defaults to ``default_account``) |
def _get_first_urn(self, urn):
""" Provisional route for GetFirstUrn request
:param urn: URN to filter the resource
:param inv: Inventory Identifier
:return: GetFirstUrn response
"""
urn = URN(urn)
subreference = None
textId = urn.upTo(URN.NO_PASSAGE)
if urn.reference is not None:
subreference = str(urn.reference)
firstId = self.resolver.getTextualNode(textId=textId, subreference=subreference).firstId
r = render_template(
"cts/GetFirstUrn.xml",
firstId=firstId,
full_urn=textId,
request_urn=str(urn)
)
return r, 200, {"content-type": "application/xml"} | Provisional route for GetFirstUrn request
:param urn: URN to filter the resource
:param inv: Inventory Identifier
:return: GetFirstUrn response |
def _startMqtt(self):
"""
The client start method. Starts the thread for the MQTT Client
and publishes the connected message.
"""
LOGGER.info('Connecting to MQTT... {}:{}'.format(self._server, self._port))
try:
# self._mqttc.connect_async(str(self._server), int(self._port), 10)
self._mqttc.connect_async('{}'.format(self._server), int(self._port), 10)
self._mqttc.loop_forever()
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
LOGGER.error("MQTT Connection error: {}".format(message), exc_info=True) | The client start method. Starts the thread for the MQTT Client
and publishes the connected message. |
def get(self, request, provider=None):
"""prepare the social friend model"""
# Get the social auth connections
if USING_ALLAUTH:
self.social_auths = request.user.socialaccount_set.all()
else:
self.social_auths = request.user.social_auth.all()
self.social_friend_lists = []
# if the user did not connect any social accounts, no need to continue
if self.social_auths.count() == 0:
if REDIRECT_IF_NO_ACCOUNT:
return HttpResponseRedirect(REDIRECT_URL)
return super(FriendListView, self).get(request)
# for each social network, get or create social_friend_list
self.social_friend_lists = SocialFriendList.objects.get_or_create_with_social_auths(self.social_auths)
return super(FriendListView, self).get(request) | prepare the social friend model |
def rates_angles(fk_candidate_observations):
"""
:param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted
"""
detections = fk_candidate_observations.get_sources()
for detection in detections:
measures = detection.get_readings()
for measure in measures:
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--astrom-filename', default=None, help="Give the astrom file directly instead of looking-up "
"using the field/ccd naming scheme.")
parser.add_argument('--reals', action='store_true', default=False)
parser.add_argument('--type', choices=['o', 'p', 's'], help="Which type of image.", default='s')
parser.add_argument('--measure3', default='vos:OSSOS/measure3/2013B-L_redo/')
parser.add_argument('--dbimages', default=None)
parser.add_argument('--dry-run', action='store_true', default=False)
parser.add_argument('--force', action='store_true', default=False)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
prefix = 'fk'
ext = args.reals and 'reals' or 'cands'
storage.MEASURE3 = args.measure3
if args.dbimages is not None:
storage.DBIMAGES = args.dbimages
astrom.DATASET_ROOT = args.dbimages
astrom_uri = storage.get_cands_uri(args.field,
ccd=args.ccd,
version=args.type,
prefix=prefix,
ext="measure3.{}.astrom".format(ext))
if args.astrom_filename is None:
astrom_filename = os.path.basename(astrom_uri)
else:
astrom_filename = args.astrom_filename
if not os.access(astrom_filename, os.F_OK):
astrom_filename = os.path.dirname(astrom_uri) + "/" + astrom_filename
# Load the list of astrometric observations that will be looked at.
fk_candidate_observations = astrom.parse(astrom_filename) | :param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted |
def keep_mask(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to their mean.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[nkeep[i]:]] = mean_vals[ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test) | The model is revaluated for each test sample with the non-important features set to their mean. |
def logpdf(self, mu):
"""
Log PDF for t prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.t.logpdf(mu, df=self.df0, loc=self.loc0, scale=self.scale0) | Log PDF for t prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu)) |
def linear_elasticity(grid, spacing=None, E=1e5, nu=0.3, format=None):
"""Linear elasticity problem discretizes with Q1 finite elements on a regular rectangular grid.
Parameters
----------
grid : tuple
length 2 tuple of grid sizes, e.g. (10, 10)
spacing : tuple
length 2 tuple of grid spacings, e.g. (1.0, 0.1)
E : float
Young's modulus
nu : float
Poisson's ratio
format : string
Format of the returned sparse matrix (eg. 'csr', 'bsr', etc.)
Returns
-------
A : csr_matrix
FE Q1 stiffness matrix
B : array
rigid body modes
See Also
--------
linear_elasticity_p1
Notes
-----
- only 2d for now
Examples
--------
>>> from pyamg.gallery import linear_elasticity
>>> A, B = linear_elasticity((4, 4))
References
----------
.. [1] J. Alberty, C. Carstensen, S. A. Funken, and R. KloseDOI
"Matlab implementation of the finite element method in elasticity"
Computing, Volume 69, Issue 3 (November 2002) Pages: 239 - 263
http://www.math.hu-berlin.de/~cc/
"""
if len(grid) == 2:
return q12d(grid, spacing=spacing, E=E, nu=nu, format=format)
else:
raise NotImplemented('no support for grid=%s' % str(grid)) | Linear elasticity problem discretizes with Q1 finite elements on a regular rectangular grid.
Parameters
----------
grid : tuple
length 2 tuple of grid sizes, e.g. (10, 10)
spacing : tuple
length 2 tuple of grid spacings, e.g. (1.0, 0.1)
E : float
Young's modulus
nu : float
Poisson's ratio
format : string
Format of the returned sparse matrix (eg. 'csr', 'bsr', etc.)
Returns
-------
A : csr_matrix
FE Q1 stiffness matrix
B : array
rigid body modes
See Also
--------
linear_elasticity_p1
Notes
-----
- only 2d for now
Examples
--------
>>> from pyamg.gallery import linear_elasticity
>>> A, B = linear_elasticity((4, 4))
References
----------
.. [1] J. Alberty, C. Carstensen, S. A. Funken, and R. KloseDOI
"Matlab implementation of the finite element method in elasticity"
Computing, Volume 69, Issue 3 (November 2002) Pages: 239 - 263
http://www.math.hu-berlin.de/~cc/ |
def get_meta_image_url(request, image):
"""
Resize an image for metadata tags, and return an absolute URL to it.
"""
rendition = image.get_rendition(filter='original')
return request.build_absolute_uri(rendition.url) | Resize an image for metadata tags, and return an absolute URL to it. |
def StartAFF4Flow(args=None,
runner_args=None,
parent_flow=None,
sync=True,
token=None,
**kwargs):
"""The main factory function for creating and executing a new flow.
Args:
args: An arg protocol buffer which is an instance of the required flow's
args_type class attribute.
runner_args: an instance of FlowRunnerArgs() protocol buffer which is used
to initialize the runner for this flow.
parent_flow: A parent flow or None if this is a top level flow.
sync: If True, the Start method of this flow will be called inline.
Otherwise we schedule the starting of this flow on another worker.
token: Security credentials token identifying the user.
**kwargs: If args or runner_args are not specified, we construct these
protobufs from these keywords.
Returns:
the session id of the flow.
Raises:
RuntimeError: Unknown or invalid parameters were provided.
"""
# Build the runner args from the keywords.
if runner_args is None:
runner_args = rdf_flow_runner.FlowRunnerArgs()
FilterArgsFromSemanticProtobuf(runner_args, kwargs)
# Is the required flow a known flow?
try:
flow_cls = registry.AFF4FlowRegistry.FlowClassByName(runner_args.flow_name)
except ValueError:
stats_collector_instance.Get().IncrementCounter(
"grr_flow_invalid_flow_count")
raise RuntimeError("Unable to locate flow %s" % runner_args.flow_name)
# If no token is specified, raise.
if not token:
raise access_control.UnauthorizedAccess("A token must be specified.")
# For the flow itself we use a supervisor token.
token = token.SetUID()
# Extend the expiry time of this token indefinitely. Python on Windows only
# supports dates up to the year 3000.
token.expiry = rdfvalue.RDFDatetime.FromHumanReadable("2997-01-01")
if flow_cls.category and not runner_args.client_id:
raise RuntimeError("Flow with category (user-visible flow) has to be "
"started on a client, but runner_args.client_id "
"is missing.")
# We create an anonymous AFF4 object first, The runner will then generate
# the appropriate URN.
flow_obj = aff4.FACTORY.Create(None, flow_cls, token=token)
# Now parse the flow args into the new object from the keywords.
if args is None:
args = flow_obj.args_type()
FilterArgsFromSemanticProtobuf(args, kwargs)
# Check that the flow args are valid.
args.Validate()
# Store the flow args.
flow_obj.args = args
flow_obj.runner_args = runner_args
# At this point we should exhaust all the keyword args. If any are left
# over, we do not know what to do with them so raise.
if kwargs:
raise type_info.UnknownArg("Unknown parameters to StartAFF4Flow: %s" %
kwargs)
# Create a flow runner to run this flow with.
if parent_flow:
parent_runner = parent_flow.runner
else:
parent_runner = None
runner = flow_obj.CreateRunner(
parent_runner=parent_runner, runner_args=runner_args)
logging.info(u"Scheduling %s(%s) on %s", flow_obj.urn, runner_args.flow_name,
runner_args.client_id)
if sync:
# Just run the first state inline. NOTE: Running synchronously means
# that this runs on the thread that starts the flow. The advantage is
# that that Start method can raise any errors immediately.
flow_obj.Start()
else:
# Running Asynchronously: Schedule the start method on another worker.
runner.CallState(next_state="Start")
# The flow does not need to actually remain running.
if not flow_obj.outstanding_requests:
flow_obj.Terminate()
flow_obj.Close()
# Publish an audit event, only for top level flows.
if parent_flow is None:
events.Events.PublishEvent(
"Audit",
rdf_events.AuditEvent(
user=token.username,
action="RUN_FLOW",
flow_name=runner_args.flow_name,
urn=flow_obj.urn,
client=runner_args.client_id),
token=token)
return flow_obj.urn | The main factory function for creating and executing a new flow.
Args:
args: An arg protocol buffer which is an instance of the required flow's
args_type class attribute.
runner_args: an instance of FlowRunnerArgs() protocol buffer which is used
to initialize the runner for this flow.
parent_flow: A parent flow or None if this is a top level flow.
sync: If True, the Start method of this flow will be called inline.
Otherwise we schedule the starting of this flow on another worker.
token: Security credentials token identifying the user.
**kwargs: If args or runner_args are not specified, we construct these
protobufs from these keywords.
Returns:
the session id of the flow.
Raises:
RuntimeError: Unknown or invalid parameters were provided. |
def _handle_chat_name(self, data):
"""Handle user name changes"""
self.room.user.nick = data
self.conn.enqueue_data("user", self.room.user) | Handle user name changes |
def get_PSD(self, NPerSegment=1000000, window="hann", timeStart=None, timeEnd=None, override=False):
"""
Extracts the power spectral density (PSD) from the data.
Parameters
----------
NPerSegment : int, optional
Length of each segment used in scipy.welch
default = 1000000
window : str or tuple or array_like, optional
Desired window to use. See get_window for a list of windows
and required parameters. If window is array_like it will be
used directly as the window and its length will be used for
nperseg.
default = "hann"
Returns
-------
freqs : ndarray
Array containing the frequencies at which the PSD has been
calculated
PSD : ndarray
Array containing the value of the PSD at the corresponding
frequency value in V**2/Hz
"""
if timeStart == None and timeEnd == None:
freqs, PSD = calc_PSD(self.voltage, self.SampleFreq, NPerSegment=NPerSegment)
self.PSD = PSD
self.freqs = freqs
else:
if timeStart == None:
timeStart = self.timeStart
if timeEnd == None:
timeEnd = self.timeEnd
time = self.time.get_array()
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
if EndIndex == len(time) - 1:
EndIndex = EndIndex + 1 # so that it does not remove the last element
freqs, PSD = calc_PSD(self.voltage[StartIndex:EndIndex], self.SampleFreq, NPerSegment=NPerSegment)
if override == True:
self.freqs = freqs
self.PSD = PSD
return freqs, PSD | Extracts the power spectral density (PSD) from the data.
Parameters
----------
NPerSegment : int, optional
Length of each segment used in scipy.welch
default = 1000000
window : str or tuple or array_like, optional
Desired window to use. See get_window for a list of windows
and required parameters. If window is array_like it will be
used directly as the window and its length will be used for
nperseg.
default = "hann"
Returns
-------
freqs : ndarray
Array containing the frequencies at which the PSD has been
calculated
PSD : ndarray
Array containing the value of the PSD at the corresponding
frequency value in V**2/Hz |
def action_draft(self):
"""Set a change request as draft"""
for rec in self:
if not rec.state == 'cancelled':
raise UserError(
_('You need to cancel it before reopening.'))
if not (rec.am_i_owner or rec.am_i_approver):
raise UserError(
_('You are not authorized to do this.\r\n'
'Only owners or approvers can reopen Change Requests.'))
rec.write({'state': 'draft'}) | Set a change request as draft |
def extract(data, items, out_dir=None):
"""Extract germline calls for the given sample, if tumor only.
"""
if vcfutils.get_paired_phenotype(data):
if len(items) == 1:
germline_vcf = _remove_prioritization(data["vrn_file"], data, out_dir)
germline_vcf = vcfutils.bgzip_and_index(germline_vcf, data["config"])
data["vrn_file_plus"] = {"germline": germline_vcf}
return data | Extract germline calls for the given sample, if tumor only. |
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""on_failure
http://docs.celeryproject.org/en/latest/userguide/tasks.html#task-inheritance
:param exc: exception
:param task_id: task id
:param args: arguments passed into task
:param kwargs: keyword arguments passed into task
:param einfo: exception info
"""
use_exc = str(exc)
log.error(("{} FAIL - exc={} "
"args={} kwargs={}")
.format(
self.log_label,
use_exc,
args,
kwargs)) | on_failure
http://docs.celeryproject.org/en/latest/userguide/tasks.html#task-inheritance
:param exc: exception
:param task_id: task id
:param args: arguments passed into task
:param kwargs: keyword arguments passed into task
:param einfo: exception info |
def anonymous_user_required(*decorator_args, msg=None, category=None, redirect_url=None):
"""
Decorator requiring that there is no user currently logged in.
Aborts with ``HTTP 403: Forbidden`` if there is an authenticated user.
"""
def wrapper(fn):
@wraps(fn)
def decorated(*args, **kwargs):
if current_user.is_authenticated:
if request.is_json:
abort(HTTPStatus.FORBIDDEN)
else:
if msg:
flash(msg, category)
return redirect('SECURITY_POST_LOGIN_REDIRECT_ENDPOINT',
override=redirect_url)
return fn(*args, **kwargs)
return decorated
if decorator_args and callable(decorator_args[0]):
return wrapper(decorator_args[0])
return wrapper | Decorator requiring that there is no user currently logged in.
Aborts with ``HTTP 403: Forbidden`` if there is an authenticated user. |
def check(self, **kwargs):
"""
In addition to parent class' checks, also ensure that MULTITENANT_STATICFILES_DIRS
is a tuple or a list.
"""
errors = super().check(**kwargs)
multitenant_staticfiles_dirs = settings.MULTITENANT_STATICFILES_DIRS
if not isinstance(multitenant_staticfiles_dirs, (list, tuple)):
errors.append(
Error(
"Your MULTITENANT_STATICFILES_DIRS setting is not a tuple or list.",
hint="Perhaps you forgot a trailing comma?",
)
)
return errors | In addition to parent class' checks, also ensure that MULTITENANT_STATICFILES_DIRS
is a tuple or a list. |
def draft_pick(self):
"""Returns when in the draft the player was picked.
:returns: TODO
"""
doc = self.get_main_doc()
try:
p_tags = doc('div#meta p')
draft_p_tag = next(p for p in p_tags.items() if p.text().lower().startswith('draft'))
draft_pick = int(re.search(r'(\d+)\w{,3}\s+?overall', draft_p_tag.text()).group(1))
return draft_pick
except Exception as e:
return None | Returns when in the draft the player was picked.
:returns: TODO |
def log_train_metric(period, auto_reset=False):
"""Callback to log the training evaluation result every period.
Parameters
----------
period : int
The number of batch to log the training evaluation metric.
auto_reset : bool
Reset the metric after each log.
Returns
-------
callback : function
The callback function that can be passed as iter_epoch_callback to fit.
"""
def _callback(param):
"""The checkpoint function."""
if param.nbatch % period == 0 and param.eval_metric is not None:
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
logging.info('Iter[%d] Batch[%d] Train-%s=%f',
param.epoch, param.nbatch, name, value)
if auto_reset:
param.eval_metric.reset_local()
return _callback | Callback to log the training evaluation result every period.
Parameters
----------
period : int
The number of batch to log the training evaluation metric.
auto_reset : bool
Reset the metric after each log.
Returns
-------
callback : function
The callback function that can be passed as iter_epoch_callback to fit. |
def get_attributes(file, *, attributes=None, mime_type=None,
force_document=False, voice_note=False, video_note=False,
supports_streaming=False):
"""
Get a list of attributes for the given file and
the mime type as a tuple ([attribute], mime_type).
"""
# Note: ``file.name`` works for :tl:`InputFile` and some `IOBase` streams
name = file if isinstance(file, str) else getattr(file, 'name', 'unnamed')
if mime_type is None:
mime_type = mimetypes.guess_type(name)[0]
attr_dict = {types.DocumentAttributeFilename:
types.DocumentAttributeFilename(os.path.basename(name))}
if is_audio(file):
m = _get_metadata(file)
if m:
attr_dict[types.DocumentAttributeAudio] = \
types.DocumentAttributeAudio(
voice=voice_note,
title=m.get('title') if m.has('title') else None,
performer=m.get('author') if m.has('author') else None,
duration=int(m.get('duration').seconds
if m.has('duration') else 0)
)
if not force_document and is_video(file):
m = _get_metadata(file)
if m:
doc = types.DocumentAttributeVideo(
round_message=video_note,
w=m.get('width') if m.has('width') else 0,
h=m.get('height') if m.has('height') else 0,
duration=int(m.get('duration').seconds
if m.has('duration') else 0),
supports_streaming=supports_streaming
)
else:
doc = types.DocumentAttributeVideo(
0, 1, 1, round_message=video_note,
supports_streaming=supports_streaming)
attr_dict[types.DocumentAttributeVideo] = doc
if voice_note:
if types.DocumentAttributeAudio in attr_dict:
attr_dict[types.DocumentAttributeAudio].voice = True
else:
attr_dict[types.DocumentAttributeAudio] = \
types.DocumentAttributeAudio(0, voice=True)
# Now override the attributes if any. As we have a dict of
# {cls: instance}, we can override any class with the list
# of attributes provided by the user easily.
if attributes:
for a in attributes:
attr_dict[type(a)] = a
# Ensure we have a mime type, any; but it cannot be None
# 'The "octet-stream" subtype is used to indicate that a body
# contains arbitrary binary data.'
if not mime_type:
mime_type = 'application/octet-stream'
return list(attr_dict.values()), mime_type | Get a list of attributes for the given file and
the mime type as a tuple ([attribute], mime_type). |
def delimited_file(
self,
hdfs_dir,
schema,
name=None,
database=None,
delimiter=',',
na_rep=None,
escapechar=None,
lineterminator=None,
external=True,
persist=False,
):
"""
Interpret delimited text files (CSV / TSV / etc.) as an Ibis table. See
`parquet_file` for more exposition on what happens under the hood.
Parameters
----------
hdfs_dir : string
HDFS directory name containing delimited text files
schema : ibis Schema
name : string, default None
Name for temporary or persistent table; otherwise random one
generated
database : string
Database to create the (possibly temporary) table in
delimiter : length-1 string, default ','
Pass None if there is no delimiter
escapechar : length-1 string
Character used to escape special characters
lineterminator : length-1 string
Character used to delimit lines
external : boolean, default True
Create table as EXTERNAL (data will not be deleted on drop). Not that
if persist=False and external=False, whatever data you reference will
be deleted
persist : boolean, default False
If True, do not delete the table upon garbage collection of ibis
table object
Returns
-------
delimited_table : ImpalaTable
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
stmt = ddl.CreateTableDelimited(
name,
hdfs_dir,
schema,
database=database,
delimiter=delimiter,
external=external,
na_rep=na_rep,
lineterminator=lineterminator,
escapechar=escapechar,
)
self._execute(stmt)
return self._wrap_new_table(name, database, persist) | Interpret delimited text files (CSV / TSV / etc.) as an Ibis table. See
`parquet_file` for more exposition on what happens under the hood.
Parameters
----------
hdfs_dir : string
HDFS directory name containing delimited text files
schema : ibis Schema
name : string, default None
Name for temporary or persistent table; otherwise random one
generated
database : string
Database to create the (possibly temporary) table in
delimiter : length-1 string, default ','
Pass None if there is no delimiter
escapechar : length-1 string
Character used to escape special characters
lineterminator : length-1 string
Character used to delimit lines
external : boolean, default True
Create table as EXTERNAL (data will not be deleted on drop). Not that
if persist=False and external=False, whatever data you reference will
be deleted
persist : boolean, default False
If True, do not delete the table upon garbage collection of ibis
table object
Returns
-------
delimited_table : ImpalaTable |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document
if hasattr(self, 'targets') and self.targets is not None:
_dict['targets'] = self.targets
return _dict | Return a json dictionary representing this model. |
def ImportConfig(filename, config):
"""Reads an old config file and imports keys and user accounts."""
sections_to_import = ["PrivateKeys"]
entries_to_import = [
"Client.executable_signing_public_key", "CA.certificate",
"Frontend.certificate"
]
options_imported = 0
old_config = grr_config.CONFIG.MakeNewConfig()
old_config.Initialize(filename)
for entry in old_config.raw_data:
try:
section = entry.split(".")[0]
if section in sections_to_import or entry in entries_to_import:
config.Set(entry, old_config.Get(entry))
print("Imported %s." % entry)
options_imported += 1
except Exception as e: # pylint: disable=broad-except
print("Exception during import of %s: %s" % (entry, e))
return options_imported | Reads an old config file and imports keys and user accounts. |
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> IGraphNode:
"""
Returns a new `IGraphNode` instance with the given index and name.
Arguments:
index (int): The index of the node to create.
name (str): The name of the node to create.
external_id (Optional[str]): The external ID of the node.
"""
return IGraphNode(graph=self._graph, index=index, name=name, external_id=external_id) | Returns a new `IGraphNode` instance with the given index and name.
Arguments:
index (int): The index of the node to create.
name (str): The name of the node to create.
external_id (Optional[str]): The external ID of the node. |
async def info(self, fields: Iterable[str] = None) -> dict:
'''
Returns the keypair's information such as resource limits.
:param fields: Additional per-agent query fields to fetch.
.. versionadded:: 18.12
'''
if fields is None:
fields = (
'access_key', 'secret_key',
'is_active', 'is_admin',
)
q = 'query {' \
' keypair {' \
' $fields' \
' }' \
'}'
q = q.replace('$fields', ' '.join(fields))
rqst = Request(self.session, 'POST', '/admin/graphql')
rqst.set_json({
'query': q,
})
async with rqst.fetch() as resp:
data = await resp.json()
return data['keypair'] | Returns the keypair's information such as resource limits.
:param fields: Additional per-agent query fields to fetch.
.. versionadded:: 18.12 |
def from_points(cls, iterable_of_points):
"""
Creates a MultiPoint from an iterable collection of `pyowm.utils.geo.Point` instances
:param iterable_of_points: iterable whose items are `pyowm.utils.geo.Point` instances
:type iterable_of_points: iterable
:return: a *MultiPoint* instance
"""
return MultiPoint([(p.lon, p.lat) for p in iterable_of_points]) | Creates a MultiPoint from an iterable collection of `pyowm.utils.geo.Point` instances
:param iterable_of_points: iterable whose items are `pyowm.utils.geo.Point` instances
:type iterable_of_points: iterable
:return: a *MultiPoint* instance |
def es_version(self, url):
"""Get Elasticsearch version.
Get the version of Elasticsearch. This is useful because
Elasticsearch and Kibiter are paired (same major version for 5, 6).
:param url: Elasticseearch url hosting Kibiter indices
:returns: major version, as string
"""
try:
res = self.grimoire_con.get(url)
res.raise_for_status()
major = res.json()['version']['number'].split(".")[0]
except Exception:
logger.error("Error retrieving Elasticsearch version: " + url)
raise
return major | Get Elasticsearch version.
Get the version of Elasticsearch. This is useful because
Elasticsearch and Kibiter are paired (same major version for 5, 6).
:param url: Elasticseearch url hosting Kibiter indices
:returns: major version, as string |
def get_iam_policy(self):
"""Return the current IAM policy as a json-serialized string"""
checker = AwsLimitChecker()
policy = checker.get_required_iam_policy()
return json.dumps(policy, sort_keys=True, indent=2) | Return the current IAM policy as a json-serialized string |
def default_project(self, value):
"""
Setter for **self.__default_project** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, \
"'{0}' attribute: '{1}' type is not 'unicode'!".format("default_project", value)
self.__default_project = value | Setter for **self.__default_project** attribute.
:param value: Attribute value.
:type value: unicode |
def ConsumeCommentOrTrailingComment(self):
"""Consumes a comment, returns a 2-tuple (trailing bool, comment str)."""
# Tokenizer initializes _previous_line and _previous_column to 0. As the
# tokenizer starts, it looks like there is a previous token on the line.
just_started = self._line == 0 and self._column == 0
before_parsing = self._previous_line
comment = self.ConsumeComment()
# A trailing comment is a comment on the same line than the previous token.
trailing = (self._previous_line == before_parsing
and not just_started)
return trailing, comment | Consumes a comment, returns a 2-tuple (trailing bool, comment str). |
def run(self):
"""Method representing the process’s activity."""
random.seed(self.seed)
np.random.seed(self.np_seed)
if not isinstance(self, multiprocessing.Process):
# Calling mxnet methods in a subprocess will raise an exception if
# mxnet is built with GPU support
# https://github.com/apache/incubator-mxnet/issues/4659
mx.random.seed(self.mx_seed)
# Startup - Master waits for this
try:
stream_iter = iter(self.stream)
self._errorq.put(None)
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
self._errorq.put((e, tb))
# Async work
while True:
try: # Check control queue
c = self._controlq.get(False)
if c is None:
break
else:
raise RuntimeError('Got unexpected control code {}'.format(repr(c)))
except queue.Empty:
pass
except RuntimeError as e:
tb = traceback.format_exc()
self._errorq.put((e, tb))
self._dataq.put(None)
try:
data = next(stream_iter)
error = None
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
error = (e, tb)
data = None
finally:
self._errorq.put(error)
self._dataq.put(data) | Method representing the process’s activity. |
def process_frame(self, f, frame_str):
"""
:param Frame f: Frame object
:param bytes frame_str: Raw frame content
"""
frame_type = f.cmd.lower()
if frame_type in ['disconnect']:
return
if frame_type == 'send':
frame_type = 'message'
f.cmd = 'MESSAGE'
if frame_type in ['connected', 'message', 'receipt', 'error', 'heartbeat']:
if frame_type == 'message':
if f.headers['destination'] not in self.subscriptions.values():
return
(f.headers, f.body) = self.notify('before_message', f.headers, f.body)
self.notify(frame_type, f.headers, f.body)
if 'receipt' in f.headers:
receipt_frame = Frame('RECEIPT', {'receipt-id': f.headers['receipt']})
lines = convert_frame(receipt_frame)
self.send(encode(pack(lines)))
log.debug("Received frame: %r, headers=%r, body=%r", f.cmd, f.headers, f.body) | :param Frame f: Frame object
:param bytes frame_str: Raw frame content |
def rmd_options_to_metadata(options):
"""
Parse rmd options and return a metadata dictionary
:param options:
:return:
"""
options = re.split(r'\s|,', options, 1)
if len(options) == 1:
language = options[0]
chunk_options = []
else:
language, others = options
language = language.rstrip(' ,')
others = others.lstrip(' ,')
chunk_options = parse_rmd_options(others)
language = 'R' if language == 'r' else language
metadata = {}
for i, opt in enumerate(chunk_options):
name, value = opt
if i == 0 and name == '':
metadata['name'] = value
continue
else:
if update_metadata_from_rmd_options(name, value, metadata):
continue
try:
metadata[name] = _py_logical_values(value)
continue
except RLogicalValueError:
metadata[name] = value
for name in metadata:
try_eval_metadata(metadata, name)
if ('active' in metadata or metadata.get('run_control', {}).get('frozen') is True) and 'eval' in metadata:
del metadata['eval']
return metadata.get('language') or language, metadata | Parse rmd options and return a metadata dictionary
:param options:
:return: |
def add_file(self, name, filename, compress_hint=True):
"""Saves the actual file in the store.
``compress_hint`` suggests whether the file should be compressed
before transfer
Works like :meth:`add_stream`, but ``filename`` is the name of
an existing file in the filesystem.
"""
return self.add_stream(name, open(filename, 'rb')) | Saves the actual file in the store.
``compress_hint`` suggests whether the file should be compressed
before transfer
Works like :meth:`add_stream`, but ``filename`` is the name of
an existing file in the filesystem. |
def zone_absent(domain, profile):
'''
Ensures a record is absent.
:param domain: Zone name, i.e. the domain name
:type domain: ``str``
:param profile: The profile key
:type profile: ``str``
'''
zones = __salt__['libcloud_dns.list_zones'](profile)
matching_zone = [z for z in zones if z['domain'] == domain]
if not matching_zone:
return state_result(True, 'Zone already absent', domain)
else:
result = __salt__['libcloud_dns.delete_zone'](matching_zone[0]['id'], profile)
return state_result(result, 'Deleted zone', domain) | Ensures a record is absent.
:param domain: Zone name, i.e. the domain name
:type domain: ``str``
:param profile: The profile key
:type profile: ``str`` |
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404) | Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object |
def timezone(self, value=0.0):
"""Corresponds to IDD Field `timezone` Time relative to GMT.
Args:
value (float): value for IDD Field `timezone`
Unit: hr - not on standard units list???
Default value: 0.0
value >= -12.0
value <= 12.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `timezone`'.format(value))
if value < -12.0:
raise ValueError('value need to be greater or equal -12.0 '
'for field `timezone`')
if value > 12.0:
raise ValueError('value need to be smaller 12.0 '
'for field `timezone`')
self._timezone = value | Corresponds to IDD Field `timezone` Time relative to GMT.
Args:
value (float): value for IDD Field `timezone`
Unit: hr - not on standard units list???
Default value: 0.0
value >= -12.0
value <= 12.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False):
"""Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object
"""
# TODO: show_structure_file does not work for MMTF files - need to check for that and load accordingly
if ssbio.utils.is_ipynb():
import nglview as nv
else:
raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment')
if not self.structure_file:
raise ValueError("Structure file not loaded")
only_chains = ssbio.utils.force_list(only_chains)
to_show_chains = '( '
for c in only_chains:
to_show_chains += ':{} or'.format(c)
to_show_chains = to_show_chains.strip(' or ')
to_show_chains += ' )'
if self.file_type == 'mmtf' or self.file_type == 'mmtf.gz':
view = nv.NGLWidget()
view.add_component(self.structure_path)
else:
view = nv.show_structure_file(self.structure_path, gui=gui)
if recolor:
view.clear_representations()
if only_chains:
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
else:
view.add_cartoon(selection='protein', color='silver', opacity=opacity)
elif only_chains:
view.clear_representations()
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
return view | Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object |
def upload_feature_value_file(self, mapobject_type_name, plate_name,
well_name, well_pos_y, well_pos_x, tpoint, filename, index_col):
'''Uploads feature values for the given
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>` at the
specified :class:`Site <tmlib.models.site.Site>`.
Parameters
----------
mapobject_type_name: str
type of the segmented objects
plate_name: str
name of the plate
well_name: str
name of the well
well_pos_y: int
y-position of the site relative to the well grid
well_pos_x: int
x-position of the site relative to the well grid
tpoint: int
zero-based time point index
filename: str
path to the file on disk
index_col: str
column name containing the object labels
See also
--------
:func:`tmserver.api.feature.add_feature_values`
:class:`tmlib.models.feature.FeatureValues`
'''
logger.info('upload feature value file "%s"', filename)
if not filename.lower().endswith('csv'):
raise IOError('Filename must have "csv" extension.')
filename = os.path.expanduser(os.path.expandvars(filename))
data = pd.read_csv(filename, index_col=index_col)
self._upload_feature_values(
mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x,
tpoint, data
) | Uploads feature values for the given
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>` at the
specified :class:`Site <tmlib.models.site.Site>`.
Parameters
----------
mapobject_type_name: str
type of the segmented objects
plate_name: str
name of the plate
well_name: str
name of the well
well_pos_y: int
y-position of the site relative to the well grid
well_pos_x: int
x-position of the site relative to the well grid
tpoint: int
zero-based time point index
filename: str
path to the file on disk
index_col: str
column name containing the object labels
See also
--------
:func:`tmserver.api.feature.add_feature_values`
:class:`tmlib.models.feature.FeatureValues` |
def answer (self, headers, **options):
"""
Places a call or sends an an IM, Twitter, or SMS message. To start a call, use the Session API headers tell Tropo headers launch your code.
Arguments: headers is a String.
Argument: **options is a set of optional keyword arguments.
See https://www.tropo.com/docs/webapi/answer
"""
self._steps.append(Answer (headers, **options).obj) | Places a call or sends an an IM, Twitter, or SMS message. To start a call, use the Session API headers tell Tropo headers launch your code.
Arguments: headers is a String.
Argument: **options is a set of optional keyword arguments.
See https://www.tropo.com/docs/webapi/answer |
def semilocal_linear_trend_transition_matrix(autoregressive_coef):
"""Build the transition matrix for a semi-local linear trend model."""
# We want to write the following 2 x 2 matrix:
# [[1., 1., ], # level(t+1) = level(t) + slope(t)
# [0., ar_coef], # slope(t+1) = ar_coef * slope(t)
# but it's slightly tricky to properly incorporate the batch shape of
# autoregressive_coef. E.g., if autoregressive_coef has shape [4,6], we want
# to return shape [4, 6, 2, 2]. We do this by breaking the matrix into its
# fixed entries, written explicitly, and then the autoregressive_coef part
# which we add in after using a mask to broadcast to the correct matrix shape.
fixed_entries = tf.constant(
[[1., 1.],
[0., 0.]],
dtype=autoregressive_coef.dtype)
autoregressive_coef_mask = tf.constant([[0., 0.],
[0., 1.]],
dtype=autoregressive_coef.dtype)
bottom_right_entry = (autoregressive_coef[..., tf.newaxis, tf.newaxis] *
autoregressive_coef_mask)
return tf.linalg.LinearOperatorFullMatrix(
fixed_entries + bottom_right_entry) | Build the transition matrix for a semi-local linear trend model. |
def __handle_changed_state(self, state):
"""
we need to pack a struct with the following five numbers:
tv_sec, tv_usec, ev_type, code, value
then write it using __write_to_character_device
seconds, mircroseconds, ev_type, code, value
time we just use now
ev_type we look up
code we look up
value is 0 or 1 for the buttons
axis value is maybe the same as Linux? Hope so!
"""
timeval = self.__get_timeval()
events = self.__get_button_events(state, timeval)
events.extend(self.__get_axis_events(state, timeval))
if events:
self.__write_to_character_device(events, timeval) | we need to pack a struct with the following five numbers:
tv_sec, tv_usec, ev_type, code, value
then write it using __write_to_character_device
seconds, mircroseconds, ev_type, code, value
time we just use now
ev_type we look up
code we look up
value is 0 or 1 for the buttons
axis value is maybe the same as Linux? Hope so! |
def update_title_to_proceeding(self):
"""Move title info from 245 to 111 proceeding style."""
titles = record_get_field_instances(self.record,
tag="245")
for title in titles:
subs = field_get_subfields(title)
new_subs = []
if "a" in subs:
new_subs.append(("a", subs['a'][0]))
if "b" in subs:
new_subs.append(("c", subs['b'][0]))
record_add_field(self.record,
tag="111",
subfields=new_subs)
record_delete_fields(self.record, tag="245")
record_delete_fields(self.record, tag="246") | Move title info from 245 to 111 proceeding style. |
def popup(self, title, callfn, initialdir=None):
"""Let user select a directory."""
super(DirectorySelection, self).popup(title, callfn, initialdir) | Let user select a directory. |
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f | Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint. |
def load_directory(self, directory, ext=None):
"""Load RiveScript documents from a directory.
:param str directory: The directory of RiveScript documents to load
replies from.
:param []str ext: List of file extensions to consider as RiveScript
documents. The default is ``[".rive", ".rs"]``.
"""
self._say("Loading from directory: " + directory)
if ext is None:
# Use the default extensions - .rive is preferable.
ext = ['.rive', '.rs']
elif type(ext) == str:
# Backwards compatibility for ext being a string value.
ext = [ext]
if not os.path.isdir(directory):
self._warn("Error: " + directory + " is not a directory.")
return
for root, subdirs, files in os.walk(directory):
for file in files:
for extension in ext:
if file.lower().endswith(extension):
# Load this file.
self.load_file(os.path.join(root, file))
break | Load RiveScript documents from a directory.
:param str directory: The directory of RiveScript documents to load
replies from.
:param []str ext: List of file extensions to consider as RiveScript
documents. The default is ``[".rive", ".rs"]``. |
def _preprocess(df):
"""
given a DataFrame where records are stored row-wise, rearrange it
such that records are stored column-wise.
"""
df = df.stack()
df.index.rename(["id", "time"], inplace=True) # .reset_index()
df.name = "value"
df = df.reset_index()
return df | given a DataFrame where records are stored row-wise, rearrange it
such that records are stored column-wise. |
def receive_loop_with_callback(self, queue_name, callback):
"""
Process incoming messages with callback until close is called.
:param queue_name: str: name of the queue to poll
:param callback: func(ch, method, properties, body) called with data when data arrives
:return:
"""
self.connect()
channel = self.create_channel(queue_name)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue=queue_name)
channel.start_consuming() | Process incoming messages with callback until close is called.
:param queue_name: str: name of the queue to poll
:param callback: func(ch, method, properties, body) called with data when data arrives
:return: |
def install_python_package(self, arch, name=None, env=None, is_dir=True):
'''Automate the installation of a Python package (or a cython
package where the cython components are pre-built).'''
if env is None:
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.ctx.hostpython)
shprint(hostpython, 'setup.py', 'build_ext', '--static-libpq',
_env=env)
shprint(hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(self.ctx.get_python_install_dir()),
'--install-lib=.', _env=env) | Automate the installation of a Python package (or a cython
package where the cython components are pre-built). |
def _generate_main_scripts(self):
"""
Include the scripts used by solutions.
"""
head = self.parser.find('head').first_result()
if head is not None:
common_functions_script = self.parser.find(
'#'
+ AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS
).first_result()
if common_functions_script is None:
common_functions_file = open(
os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)
))),
'js',
'common.js'
),
'r'
)
common_functions_content = common_functions_file.read()
common_functions_file.close()
common_functions_script = self.parser.create_element('script')
common_functions_script.set_attribute(
'id',
AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS
)
common_functions_script.set_attribute(
'type',
'text/javascript'
)
common_functions_script.append_text(common_functions_content)
head.prepend_element(common_functions_script)
if (
self.parser.find(
'#'
+ AccessibleEventImplementation.ID_SCRIPT_EVENT_LISTENER
).first_result() is None
):
event_listener_file = open(
os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)
))),
'js',
'eventlistener.js'
),
'r'
)
event_listener_script_content = event_listener_file.read()
event_listener_file.close()
script = self.parser.create_element('script')
script.set_attribute(
'id',
AccessibleEventImplementation.ID_SCRIPT_EVENT_LISTENER
)
script.set_attribute('type', 'text/javascript')
script.append_text(event_listener_script_content)
common_functions_script.insert_after(script)
local = self.parser.find('body').first_result()
if local is not None:
self.script_list = self.parser.find(
'#'
+ AccessibleEventImplementation.ID_LIST_IDS_SCRIPT
).first_result()
if self.script_list is None:
self.script_list = self.parser.create_element('script')
self.script_list.set_attribute(
'id',
AccessibleEventImplementation.ID_LIST_IDS_SCRIPT
)
self.script_list.set_attribute('type', 'text/javascript')
self.script_list.append_text('var activeElements = [];')
self.script_list.append_text('var hoverElements = [];')
self.script_list.append_text('var dragElements = [];')
self.script_list.append_text('var dropElements = [];')
local.append_element(self.script_list)
if self.parser.find(
'#'
+ AccessibleEventImplementation.ID_FUNCTION_SCRIPT_FIX
).first_result() is None:
include_file = open(
os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)
))),
'js',
'include.js'
),
'r'
)
local_include_script_content = include_file.read()
include_file.close()
script_function = self.parser.create_element('script')
script_function.set_attribute(
'id',
AccessibleEventImplementation.ID_FUNCTION_SCRIPT_FIX
)
script_function.set_attribute('type', 'text/javascript')
script_function.append_text(local_include_script_content)
local.append_element(script_function)
self.main_script_added = True | Include the scripts used by solutions. |
def translate(self, dx, dy):
"""
Move the polygons from one place to another
Parameters
----------
dx : number
distance to move in the x-direction
dy : number
distance to move in the y-direction
Returns
-------
out : ``PolygonSet``
This object.
"""
vec = numpy.array((dx, dy))
self.polygons = [points + vec for points in self.polygons]
return self | Move the polygons from one place to another
Parameters
----------
dx : number
distance to move in the x-direction
dy : number
distance to move in the y-direction
Returns
-------
out : ``PolygonSet``
This object. |
def project(self, **kwargs: Dict[str, Any]) -> Union[Hist, Dict[str, Hist]]:
""" Perform the requested projection(s).
Note:
All cuts on the original histograms will be reset when this function is completed.
Args:
kwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...)
Returns:
The projected histogram(s). The projected histograms are also stored in ``output_observable``.
"""
if self.single_observable_projection:
return self._project_single_observable(**kwargs)
else:
return self._project_dict(**kwargs) | Perform the requested projection(s).
Note:
All cuts on the original histograms will be reset when this function is completed.
Args:
kwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...)
Returns:
The projected histogram(s). The projected histograms are also stored in ``output_observable``. |
def pretty_exe_doc(program, parser, stack=1, under='-'):
"""
Takes the name of a script and a parser that will give the help message for it.
The module that called this function will then add a header to the docstring
of the script, followed immediately by the help message generated
by the OptionParser
:param str program: Name of the program that we want to make the header
:param optparser.Option parser: Either a parser or a callable with no arguments
that will give the desired parser
:param int stack: How far up the stack to get the docstring to change
:param str under: The character you want for the program underline
"""
if os.path.basename(sys.argv[0]) == 'sphinx-build':
# Get the calling module
mod = inspect.getmodule(inspect.stack()[stack][0])
# Get parser
_parser = parser() if '__call__' in dir(parser) else parser
# Make the parser use the correct program
_parser.set_usage(mod.__usage__.replace('%prog', program))
# Modify docs by adding a header and usate
mod.__doc__ = '\n'.join(['', program, under * len(program), '::', ''] +
[' %s' % l for l in _parser.format_help().split('\n')]) + \
mod.__doc__ | Takes the name of a script and a parser that will give the help message for it.
The module that called this function will then add a header to the docstring
of the script, followed immediately by the help message generated
by the OptionParser
:param str program: Name of the program that we want to make the header
:param optparser.Option parser: Either a parser or a callable with no arguments
that will give the desired parser
:param int stack: How far up the stack to get the docstring to change
:param str under: The character you want for the program underline |
def _setup_process_environment(self, env):
"""
Sets up the process environment.
"""
environ = self._process.processEnvironment()
if env is None:
env = {}
for k, v in os.environ.items():
environ.insert(k, v)
for k, v in env.items():
environ.insert(k, v)
if sys.platform != 'win32':
environ.insert('TERM', 'xterm')
environ.insert('LINES', '24')
environ.insert('COLUMNS', '450')
environ.insert('PYTHONUNBUFFERED', '1')
environ.insert('QT_LOGGING_TO_CONSOLE', '1')
return environ | Sets up the process environment. |
Subsets and Splits