code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
async def wait_for_election_success(cls):
"""Await this function if your cluster must have a leader"""
if cls.leader is None:
cls.leader_future = asyncio.Future(loop=cls.loop)
await cls.leader_future | Await this function if your cluster must have a leader | Below is the the instruction that describes the task:
### Input:
Await this function if your cluster must have a leader
### Response:
async def wait_for_election_success(cls):
"""Await this function if your cluster must have a leader"""
if cls.leader is None:
cls.leader_future = asyncio.Future(loop=cls.loop)
await cls.leader_future |
def alias_repository(self, repository_id, alias_id):
"""Adds an ``Id`` to a ``Repository`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Repository`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another repository, it is reassigned
to the given repository ``Id``.
arg: repository_id (osid.id.Id): the ``Id`` of a
``Repository``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is in use as a primary
``Id``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.alias_bin_template
if self._catalog_session is not None:
return self._catalog_session.alias_catalog(catalog_id=repository_id, alias_id=alias_id)
self._alias_id(primary_id=repository_id, equivalent_id=alias_id) | Adds an ``Id`` to a ``Repository`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Repository`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another repository, it is reassigned
to the given repository ``Id``.
arg: repository_id (osid.id.Id): the ``Id`` of a
``Repository``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is in use as a primary
``Id``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Adds an ``Id`` to a ``Repository`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Repository`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another repository, it is reassigned
to the given repository ``Id``.
arg: repository_id (osid.id.Id): the ``Id`` of a
``Repository``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is in use as a primary
``Id``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def alias_repository(self, repository_id, alias_id):
"""Adds an ``Id`` to a ``Repository`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Repository`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another repository, it is reassigned
to the given repository ``Id``.
arg: repository_id (osid.id.Id): the ``Id`` of a
``Repository``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is in use as a primary
``Id``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.alias_bin_template
if self._catalog_session is not None:
return self._catalog_session.alias_catalog(catalog_id=repository_id, alias_id=alias_id)
self._alias_id(primary_id=repository_id, equivalent_id=alias_id) |
def get_managed_policies(group, **conn):
"""Get a list of the managed policy names that are attached to the group."""
managed_policies = list_attached_group_managed_policies(group['GroupName'], **conn)
managed_policy_names = []
for policy in managed_policies:
managed_policy_names.append(policy['PolicyName'])
return managed_policy_names | Get a list of the managed policy names that are attached to the group. | Below is the the instruction that describes the task:
### Input:
Get a list of the managed policy names that are attached to the group.
### Response:
def get_managed_policies(group, **conn):
"""Get a list of the managed policy names that are attached to the group."""
managed_policies = list_attached_group_managed_policies(group['GroupName'], **conn)
managed_policy_names = []
for policy in managed_policies:
managed_policy_names.append(policy['PolicyName'])
return managed_policy_names |
def delete(self, template_id, session):
'''taobao.delivery.template.delete 删除运费模板
根据用户指定的模板ID删除指定的模板'''
request = TOPRequest('taobao.delivery.template.delete')
request['template_id'] = template_id
self.create(self.execute(request, session), fields=['complete', ])
return self.complete | taobao.delivery.template.delete 删除运费模板
根据用户指定的模板ID删除指定的模板 | Below is the the instruction that describes the task:
### Input:
taobao.delivery.template.delete 删除运费模板
根据用户指定的模板ID删除指定的模板
### Response:
def delete(self, template_id, session):
'''taobao.delivery.template.delete 删除运费模板
根据用户指定的模板ID删除指定的模板'''
request = TOPRequest('taobao.delivery.template.delete')
request['template_id'] = template_id
self.create(self.execute(request, session), fields=['complete', ])
return self.complete |
def set_parent(self, new_site):
"""
Set self.site as either an empty string, or with a new Site.
"""
if new_site:
if not isinstance(new_site, Site):
raise Exception
self.site = new_site
self.propagate_data()
return new_site | Set self.site as either an empty string, or with a new Site. | Below is the the instruction that describes the task:
### Input:
Set self.site as either an empty string, or with a new Site.
### Response:
def set_parent(self, new_site):
"""
Set self.site as either an empty string, or with a new Site.
"""
if new_site:
if not isinstance(new_site, Site):
raise Exception
self.site = new_site
self.propagate_data()
return new_site |
def add_blank_row(self, label):
"""
Add a blank row with only an index value to self.df.
This is done inplace.
"""
col_labels = self.df.columns
blank_item = pd.Series({}, index=col_labels, name=label)
# use .loc to add in place (append won't do that)
self.df.loc[blank_item.name] = blank_item
return self.df | Add a blank row with only an index value to self.df.
This is done inplace. | Below is the the instruction that describes the task:
### Input:
Add a blank row with only an index value to self.df.
This is done inplace.
### Response:
def add_blank_row(self, label):
"""
Add a blank row with only an index value to self.df.
This is done inplace.
"""
col_labels = self.df.columns
blank_item = pd.Series({}, index=col_labels, name=label)
# use .loc to add in place (append won't do that)
self.df.loc[blank_item.name] = blank_item
return self.df |
def hierarchy_cycles(rdf, fix=False):
"""Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive.
"""
top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept))
status = {}
for cs, root in top_concepts:
_hierarchy_cycles_visit(
rdf, root, None, fix, status=status)
# double check that all concepts were actually visited in the search,
# and visit remaining ones if necessary
recheck_top_concepts = False
for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)):
if conc not in status:
recheck_top_concepts = True
_hierarchy_cycles_visit(
rdf, conc, None, fix, status=status)
return recheck_top_concepts | Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive. | Below is the the instruction that describes the task:
### Input:
Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive.
### Response:
def hierarchy_cycles(rdf, fix=False):
"""Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive.
"""
top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept))
status = {}
for cs, root in top_concepts:
_hierarchy_cycles_visit(
rdf, root, None, fix, status=status)
# double check that all concepts were actually visited in the search,
# and visit remaining ones if necessary
recheck_top_concepts = False
for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)):
if conc not in status:
recheck_top_concepts = True
_hierarchy_cycles_visit(
rdf, conc, None, fix, status=status)
return recheck_top_concepts |
def temp_directory(*args, **kwargs):
"""
Context manager returns a path created by mkdtemp and cleans it up afterwards.
"""
path = tempfile.mkdtemp(*args, **kwargs)
try:
yield path
finally:
shutil.rmtree(path) | Context manager returns a path created by mkdtemp and cleans it up afterwards. | Below is the the instruction that describes the task:
### Input:
Context manager returns a path created by mkdtemp and cleans it up afterwards.
### Response:
def temp_directory(*args, **kwargs):
"""
Context manager returns a path created by mkdtemp and cleans it up afterwards.
"""
path = tempfile.mkdtemp(*args, **kwargs)
try:
yield path
finally:
shutil.rmtree(path) |
def as_url(cls, api=None, name_prefix='', url_prefix=''):
""" Generate url for resource.
:return RegexURLPattern: Django URL
"""
url_prefix = url_prefix and "%s/" % url_prefix
name_prefix = name_prefix and "%s-" % name_prefix
url_regex = '^%s%s/?$' % (
url_prefix, cls._meta.url_regex.lstrip('^').rstrip('/$'))
url_regex = url_regex.replace('//', '/')
url_name = '%s%s' % (name_prefix, cls._meta.url_name)
return url(url_regex, cls.as_view(api=api), name=url_name) | Generate url for resource.
:return RegexURLPattern: Django URL | Below is the the instruction that describes the task:
### Input:
Generate url for resource.
:return RegexURLPattern: Django URL
### Response:
def as_url(cls, api=None, name_prefix='', url_prefix=''):
""" Generate url for resource.
:return RegexURLPattern: Django URL
"""
url_prefix = url_prefix and "%s/" % url_prefix
name_prefix = name_prefix and "%s-" % name_prefix
url_regex = '^%s%s/?$' % (
url_prefix, cls._meta.url_regex.lstrip('^').rstrip('/$'))
url_regex = url_regex.replace('//', '/')
url_name = '%s%s' % (name_prefix, cls._meta.url_name)
return url(url_regex, cls.as_view(api=api), name=url_name) |
def parse_range_list(ranges):
"""Split a string like 2,3-5,8,9-11 into a list of integers. This is
intended to ease adding command-line options for dealing with affinity.
"""
if not ranges:
return []
parts = ranges.split(',')
out = []
for part in parts:
fields = part.split('-', 1)
if len(fields) == 2:
start = int(fields[0])
end = int(fields[1])
out.extend(range(start, end + 1))
else:
out.append(int(fields[0]))
return out | Split a string like 2,3-5,8,9-11 into a list of integers. This is
intended to ease adding command-line options for dealing with affinity. | Below is the the instruction that describes the task:
### Input:
Split a string like 2,3-5,8,9-11 into a list of integers. This is
intended to ease adding command-line options for dealing with affinity.
### Response:
def parse_range_list(ranges):
"""Split a string like 2,3-5,8,9-11 into a list of integers. This is
intended to ease adding command-line options for dealing with affinity.
"""
if not ranges:
return []
parts = ranges.split(',')
out = []
for part in parts:
fields = part.split('-', 1)
if len(fields) == 2:
start = int(fields[0])
end = int(fields[1])
out.extend(range(start, end + 1))
else:
out.append(int(fields[0]))
return out |
def set_host(ip, alias):
'''
Set the host entry in the hosts file for the given ip, this will overwrite
any previous entry for the given ip
.. versionchanged:: 2016.3.0
If ``alias`` does not include any host names (it is the empty
string or contains only whitespace), all entries for the given
IP address are removed.
CLI Example:
.. code-block:: bash
salt '*' hosts.set_host <ip> <alias>
'''
hfn = _get_or_create_hostfile()
ovr = False
if not os.path.isfile(hfn):
return False
# Make sure future calls to _list_hosts() will re-read the file
__context__.pop('hosts._list_hosts', None)
line_to_add = salt.utils.stringutils.to_bytes(
ip + '\t\t' + alias + os.linesep
)
# support removing a host entry by providing an empty string
if not alias.strip():
line_to_add = b''
with salt.utils.files.fopen(hfn, 'rb') as fp_:
lines = fp_.readlines()
for ind, _ in enumerate(lines):
tmpline = lines[ind].strip()
if not tmpline:
continue
if tmpline.startswith(b'#'):
continue
comps = tmpline.split()
if comps[0] == salt.utils.stringutils.to_bytes(ip):
if not ovr:
lines[ind] = line_to_add
ovr = True
else: # remove other entries
lines[ind] = b''
linesep_bytes = salt.utils.stringutils.to_bytes(os.linesep)
if not ovr:
# make sure there is a newline
if lines and not lines[-1].endswith(linesep_bytes):
lines[-1] += linesep_bytes
line = line_to_add
lines.append(line)
with salt.utils.files.fopen(hfn, 'wb') as ofile:
ofile.writelines(lines)
return True | Set the host entry in the hosts file for the given ip, this will overwrite
any previous entry for the given ip
.. versionchanged:: 2016.3.0
If ``alias`` does not include any host names (it is the empty
string or contains only whitespace), all entries for the given
IP address are removed.
CLI Example:
.. code-block:: bash
salt '*' hosts.set_host <ip> <alias> | Below is the the instruction that describes the task:
### Input:
Set the host entry in the hosts file for the given ip, this will overwrite
any previous entry for the given ip
.. versionchanged:: 2016.3.0
If ``alias`` does not include any host names (it is the empty
string or contains only whitespace), all entries for the given
IP address are removed.
CLI Example:
.. code-block:: bash
salt '*' hosts.set_host <ip> <alias>
### Response:
def set_host(ip, alias):
'''
Set the host entry in the hosts file for the given ip, this will overwrite
any previous entry for the given ip
.. versionchanged:: 2016.3.0
If ``alias`` does not include any host names (it is the empty
string or contains only whitespace), all entries for the given
IP address are removed.
CLI Example:
.. code-block:: bash
salt '*' hosts.set_host <ip> <alias>
'''
hfn = _get_or_create_hostfile()
ovr = False
if not os.path.isfile(hfn):
return False
# Make sure future calls to _list_hosts() will re-read the file
__context__.pop('hosts._list_hosts', None)
line_to_add = salt.utils.stringutils.to_bytes(
ip + '\t\t' + alias + os.linesep
)
# support removing a host entry by providing an empty string
if not alias.strip():
line_to_add = b''
with salt.utils.files.fopen(hfn, 'rb') as fp_:
lines = fp_.readlines()
for ind, _ in enumerate(lines):
tmpline = lines[ind].strip()
if not tmpline:
continue
if tmpline.startswith(b'#'):
continue
comps = tmpline.split()
if comps[0] == salt.utils.stringutils.to_bytes(ip):
if not ovr:
lines[ind] = line_to_add
ovr = True
else: # remove other entries
lines[ind] = b''
linesep_bytes = salt.utils.stringutils.to_bytes(os.linesep)
if not ovr:
# make sure there is a newline
if lines and not lines[-1].endswith(linesep_bytes):
lines[-1] += linesep_bytes
line = line_to_add
lines.append(line)
with salt.utils.files.fopen(hfn, 'wb') as ofile:
ofile.writelines(lines)
return True |
def init_with_context(self, context):
"""
Initialize the menu.
"""
# Apply the include/exclude patterns:
listitems = self._visible_models(context['request'])
# Convert to a similar data structure like the dashboard icons have.
# This allows sorting the items identically.
models = [
{'name': model._meta.model_name,
'app_name': model._meta.app_label,
'title': capfirst(model._meta.verbose_name_plural),
'url': self._get_admin_change_url(model, context)
}
for model, perms in listitems if self.is_item_visible(model, perms)
]
# Sort models.
sort_cms_models(models)
# Convert to items
for model in models:
self.children.append(items.MenuItem(title=model['title'], url=model['url'])) | Initialize the menu. | Below is the the instruction that describes the task:
### Input:
Initialize the menu.
### Response:
def init_with_context(self, context):
"""
Initialize the menu.
"""
# Apply the include/exclude patterns:
listitems = self._visible_models(context['request'])
# Convert to a similar data structure like the dashboard icons have.
# This allows sorting the items identically.
models = [
{'name': model._meta.model_name,
'app_name': model._meta.app_label,
'title': capfirst(model._meta.verbose_name_plural),
'url': self._get_admin_change_url(model, context)
}
for model, perms in listitems if self.is_item_visible(model, perms)
]
# Sort models.
sort_cms_models(models)
# Convert to items
for model in models:
self.children.append(items.MenuItem(title=model['title'], url=model['url'])) |
def optimize(exp_rets, covs):
"""
Return parameters for portfolio optimization.
Parameters
----------
exp_rets : ndarray
Vector of expected returns for each investment..
covs : ndarray
Covariance matrix for the given investments.
Returns
---------
a : ndarray
The first vector (to be combined with target return as scalar)
in the linear equation for optimal weights.
b : ndarray
The second (constant) vector in the linear equation for
optimal weights.
least_risk_ret : int
The return achieved on the portfolio that combines the given
equities so as to achieve the lowest possible risk.
Notes
---------
* The length of `exp_rets` must match the number of rows
and columns in the `covs` matrix.
* The weights for an optimal portfolio with expected return
`ret` is given by the formula `w = ret * a + b` where `a`
and `b` are the vectors returned here. The weights `w` for
the portfolio with lowest risk are given by `w = least_risk_ret * a + b`.
* An exception will be raised if the covariance matrix
is singular or if each prospective investment has the
same expected return.
"""
_cov_inv = np.linalg.inv(covs)
# unit vector
_u = np.ones((len(exp_rets)))
# compute some dot products one time only
_u_cov_inv = _u.dot(_cov_inv)
_rets_cov_inv = exp_rets.dot(_cov_inv)
# helper matrix for deriving Lagrange multipliers
_m = np.empty((2, 2))
_m[0, 0] = _rets_cov_inv.dot(exp_rets)
_m[0, 1] = _u_cov_inv.dot(exp_rets)
_m[1, 0] = _rets_cov_inv.dot(_u)
_m[1, 1] = _u_cov_inv.dot(_u)
# compute values to return
_m_inv = np.linalg.inv(_m)
a = _m_inv[0, 0] * _rets_cov_inv + _m_inv[1, 0] * _u_cov_inv
b = _m_inv[0, 1] * _rets_cov_inv + _m_inv[1, 1] * _u_cov_inv
least_risk_ret = _m[0, 1] / _m[1, 1]
return a, b, least_risk_ret | Return parameters for portfolio optimization.
Parameters
----------
exp_rets : ndarray
Vector of expected returns for each investment..
covs : ndarray
Covariance matrix for the given investments.
Returns
---------
a : ndarray
The first vector (to be combined with target return as scalar)
in the linear equation for optimal weights.
b : ndarray
The second (constant) vector in the linear equation for
optimal weights.
least_risk_ret : int
The return achieved on the portfolio that combines the given
equities so as to achieve the lowest possible risk.
Notes
---------
* The length of `exp_rets` must match the number of rows
and columns in the `covs` matrix.
* The weights for an optimal portfolio with expected return
`ret` is given by the formula `w = ret * a + b` where `a`
and `b` are the vectors returned here. The weights `w` for
the portfolio with lowest risk are given by `w = least_risk_ret * a + b`.
* An exception will be raised if the covariance matrix
is singular or if each prospective investment has the
same expected return. | Below is the the instruction that describes the task:
### Input:
Return parameters for portfolio optimization.
Parameters
----------
exp_rets : ndarray
Vector of expected returns for each investment..
covs : ndarray
Covariance matrix for the given investments.
Returns
---------
a : ndarray
The first vector (to be combined with target return as scalar)
in the linear equation for optimal weights.
b : ndarray
The second (constant) vector in the linear equation for
optimal weights.
least_risk_ret : int
The return achieved on the portfolio that combines the given
equities so as to achieve the lowest possible risk.
Notes
---------
* The length of `exp_rets` must match the number of rows
and columns in the `covs` matrix.
* The weights for an optimal portfolio with expected return
`ret` is given by the formula `w = ret * a + b` where `a`
and `b` are the vectors returned here. The weights `w` for
the portfolio with lowest risk are given by `w = least_risk_ret * a + b`.
* An exception will be raised if the covariance matrix
is singular or if each prospective investment has the
same expected return.
### Response:
def optimize(exp_rets, covs):
"""
Return parameters for portfolio optimization.
Parameters
----------
exp_rets : ndarray
Vector of expected returns for each investment..
covs : ndarray
Covariance matrix for the given investments.
Returns
---------
a : ndarray
The first vector (to be combined with target return as scalar)
in the linear equation for optimal weights.
b : ndarray
The second (constant) vector in the linear equation for
optimal weights.
least_risk_ret : int
The return achieved on the portfolio that combines the given
equities so as to achieve the lowest possible risk.
Notes
---------
* The length of `exp_rets` must match the number of rows
and columns in the `covs` matrix.
* The weights for an optimal portfolio with expected return
`ret` is given by the formula `w = ret * a + b` where `a`
and `b` are the vectors returned here. The weights `w` for
the portfolio with lowest risk are given by `w = least_risk_ret * a + b`.
* An exception will be raised if the covariance matrix
is singular or if each prospective investment has the
same expected return.
"""
_cov_inv = np.linalg.inv(covs)
# unit vector
_u = np.ones((len(exp_rets)))
# compute some dot products one time only
_u_cov_inv = _u.dot(_cov_inv)
_rets_cov_inv = exp_rets.dot(_cov_inv)
# helper matrix for deriving Lagrange multipliers
_m = np.empty((2, 2))
_m[0, 0] = _rets_cov_inv.dot(exp_rets)
_m[0, 1] = _u_cov_inv.dot(exp_rets)
_m[1, 0] = _rets_cov_inv.dot(_u)
_m[1, 1] = _u_cov_inv.dot(_u)
# compute values to return
_m_inv = np.linalg.inv(_m)
a = _m_inv[0, 0] * _rets_cov_inv + _m_inv[1, 0] * _u_cov_inv
b = _m_inv[0, 1] * _rets_cov_inv + _m_inv[1, 1] * _u_cov_inv
least_risk_ret = _m[0, 1] / _m[1, 1]
return a, b, least_risk_ret |
def _ite(f, g, h):
"""Return node that results from recursively applying ITE(f, g, h)."""
# ITE(f, 1, 0) = f
if g is BDDNODEONE and h is BDDNODEZERO:
return f
# ITE(f, 0, 1) = f'
elif g is BDDNODEZERO and h is BDDNODEONE:
return _neg(f)
# ITE(1, g, h) = g
elif f is BDDNODEONE:
return g
# ITE(0, g, h) = h
elif f is BDDNODEZERO:
return h
# ITE(f, g, g) = g
elif g is h:
return g
else:
# ITE(f, g, h) = ITE(x, ITE(fx', gx', hx'), ITE(fx, gx, hx))
root = min(node.root for node in (f, g, h) if node.root > 0)
npoint0 = {root: BDDNODEZERO}
npoint1 = {root: BDDNODEONE}
fv0, gv0, hv0 = [_restrict(node, npoint0) for node in (f, g, h)]
fv1, gv1, hv1 = [_restrict(node, npoint1) for node in (f, g, h)]
return _bddnode(root, _ite(fv0, gv0, hv0), _ite(fv1, gv1, hv1)) | Return node that results from recursively applying ITE(f, g, h). | Below is the the instruction that describes the task:
### Input:
Return node that results from recursively applying ITE(f, g, h).
### Response:
def _ite(f, g, h):
"""Return node that results from recursively applying ITE(f, g, h)."""
# ITE(f, 1, 0) = f
if g is BDDNODEONE and h is BDDNODEZERO:
return f
# ITE(f, 0, 1) = f'
elif g is BDDNODEZERO and h is BDDNODEONE:
return _neg(f)
# ITE(1, g, h) = g
elif f is BDDNODEONE:
return g
# ITE(0, g, h) = h
elif f is BDDNODEZERO:
return h
# ITE(f, g, g) = g
elif g is h:
return g
else:
# ITE(f, g, h) = ITE(x, ITE(fx', gx', hx'), ITE(fx, gx, hx))
root = min(node.root for node in (f, g, h) if node.root > 0)
npoint0 = {root: BDDNODEZERO}
npoint1 = {root: BDDNODEONE}
fv0, gv0, hv0 = [_restrict(node, npoint0) for node in (f, g, h)]
fv1, gv1, hv1 = [_restrict(node, npoint1) for node in (f, g, h)]
return _bddnode(root, _ite(fv0, gv0, hv0), _ite(fv1, gv1, hv1)) |
def replace_csi_driver(self, name, body, **kwargs):
"""
replace the specified CSIDriver
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_csi_driver(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CSIDriver (required)
:param V1beta1CSIDriver body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1CSIDriver
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_csi_driver_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_csi_driver_with_http_info(name, body, **kwargs)
return data | replace the specified CSIDriver
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_csi_driver(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CSIDriver (required)
:param V1beta1CSIDriver body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1CSIDriver
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
replace the specified CSIDriver
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_csi_driver(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CSIDriver (required)
:param V1beta1CSIDriver body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1CSIDriver
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_csi_driver(self, name, body, **kwargs):
"""
replace the specified CSIDriver
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_csi_driver(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CSIDriver (required)
:param V1beta1CSIDriver body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1CSIDriver
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_csi_driver_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_csi_driver_with_http_info(name, body, **kwargs)
return data |
def load_manuf(filename):
"""Load manuf file from Wireshark.
param:
- filename: the file to load the manuf file from"""
manufdb = ManufDA(_name=filename)
with open(filename, "rb") as fdesc:
for line in fdesc:
try:
line = line.strip()
if not line or line.startswith(b"#"):
continue
parts = line.split(None, 2)
oui, shrt = parts[:2]
lng = parts[2].lstrip(b"#").strip() if len(parts) > 2 else ""
lng = lng or shrt
manufdb[oui] = plain_str(shrt), plain_str(lng)
except Exception:
log_loading.warning("Couldn't parse one line from [%s] [%r]",
filename, line, exc_info=True)
return manufdb | Load manuf file from Wireshark.
param:
- filename: the file to load the manuf file from | Below is the the instruction that describes the task:
### Input:
Load manuf file from Wireshark.
param:
- filename: the file to load the manuf file from
### Response:
def load_manuf(filename):
"""Load manuf file from Wireshark.
param:
- filename: the file to load the manuf file from"""
manufdb = ManufDA(_name=filename)
with open(filename, "rb") as fdesc:
for line in fdesc:
try:
line = line.strip()
if not line or line.startswith(b"#"):
continue
parts = line.split(None, 2)
oui, shrt = parts[:2]
lng = parts[2].lstrip(b"#").strip() if len(parts) > 2 else ""
lng = lng or shrt
manufdb[oui] = plain_str(shrt), plain_str(lng)
except Exception:
log_loading.warning("Couldn't parse one line from [%s] [%r]",
filename, line, exc_info=True)
return manufdb |
def get_bytes_from_blob(val) -> bytes:
""" 不同数据库从blob拿出的数据有所差别,有的是memoryview有的是bytes """
if isinstance(val, bytes):
return val
elif isinstance(val, memoryview):
return val.tobytes()
else:
raise TypeError('invalid type for get bytes') | 不同数据库从blob拿出的数据有所差别,有的是memoryview有的是bytes | Below is the the instruction that describes the task:
### Input:
不同数据库从blob拿出的数据有所差别,有的是memoryview有的是bytes
### Response:
def get_bytes_from_blob(val) -> bytes:
""" 不同数据库从blob拿出的数据有所差别,有的是memoryview有的是bytes """
if isinstance(val, bytes):
return val
elif isinstance(val, memoryview):
return val.tobytes()
else:
raise TypeError('invalid type for get bytes') |
def parent(self, resource):
"""Set parent resource
:param resource: parent resource
:type resource: Resource
:raises ResourceNotFound: resource not found on the API
"""
resource.check()
self['parent_type'] = resource.type
self['parent_uuid'] = resource.uuid | Set parent resource
:param resource: parent resource
:type resource: Resource
:raises ResourceNotFound: resource not found on the API | Below is the the instruction that describes the task:
### Input:
Set parent resource
:param resource: parent resource
:type resource: Resource
:raises ResourceNotFound: resource not found on the API
### Response:
def parent(self, resource):
"""Set parent resource
:param resource: parent resource
:type resource: Resource
:raises ResourceNotFound: resource not found on the API
"""
resource.check()
self['parent_type'] = resource.type
self['parent_uuid'] = resource.uuid |
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
return
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
else:
_writen(master_fd, data) | Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read) | Below is the the instruction that describes the task:
### Input:
Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)
### Response:
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
return
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
else:
_writen(master_fd, data) |
def _generate_examples(self, label_images):
"""Generate example for each image in the dict."""
for label, image_paths in label_images.items():
for image_path in image_paths:
yield {
"image": image_path,
"label": label,
} | Generate example for each image in the dict. | Below is the the instruction that describes the task:
### Input:
Generate example for each image in the dict.
### Response:
def _generate_examples(self, label_images):
"""Generate example for each image in the dict."""
for label, image_paths in label_images.items():
for image_path in image_paths:
yield {
"image": image_path,
"label": label,
} |
def bind_license(self, license_item_id=None):
"""
Auto bind license, uses dynamic if POS is not found
:param str license_item_id: license id
:raises LicenseError: binding license failed, possibly no licenses
:return: None
"""
params = {'license_item_id': license_item_id}
self.make_request(
LicenseError,
method='create',
resource='bind',
params=params) | Auto bind license, uses dynamic if POS is not found
:param str license_item_id: license id
:raises LicenseError: binding license failed, possibly no licenses
:return: None | Below is the the instruction that describes the task:
### Input:
Auto bind license, uses dynamic if POS is not found
:param str license_item_id: license id
:raises LicenseError: binding license failed, possibly no licenses
:return: None
### Response:
def bind_license(self, license_item_id=None):
"""
Auto bind license, uses dynamic if POS is not found
:param str license_item_id: license id
:raises LicenseError: binding license failed, possibly no licenses
:return: None
"""
params = {'license_item_id': license_item_id}
self.make_request(
LicenseError,
method='create',
resource='bind',
params=params) |
def _super_pprint(obj, p, cycle):
"""The pprint for the super type."""
p.begin_group(8, '<super: ')
p.pretty(obj.__self_class__)
p.text(',')
p.breakable()
p.pretty(obj.__self__)
p.end_group(8, '>') | The pprint for the super type. | Below is the the instruction that describes the task:
### Input:
The pprint for the super type.
### Response:
def _super_pprint(obj, p, cycle):
"""The pprint for the super type."""
p.begin_group(8, '<super: ')
p.pretty(obj.__self_class__)
p.text(',')
p.breakable()
p.pretty(obj.__self__)
p.end_group(8, '>') |
def encrypt(self, sa, esp, key):
"""
Encrypt an ESP packet
@param sa: the SecurityAssociation associated with the ESP packet.
@param esp: an unencrypted _ESPPlain packet with valid padding
@param key: the secret key used for encryption
@return: a valid ESP packet encrypted with this algorithm
"""
data = esp.data_for_encryption()
if self.cipher:
mode_iv = self._format_mode_iv(algo=self, sa=sa, iv=esp.iv)
cipher = self.new_cipher(key, mode_iv)
encryptor = cipher.encryptor()
if self.is_aead:
aad = struct.pack('!LL', esp.spi, esp.seq)
encryptor.authenticate_additional_data(aad)
data = encryptor.update(data) + encryptor.finalize()
data += encryptor.tag[:self.icv_size]
else:
data = encryptor.update(data) + encryptor.finalize()
return ESP(spi=esp.spi, seq=esp.seq, data=esp.iv + data) | Encrypt an ESP packet
@param sa: the SecurityAssociation associated with the ESP packet.
@param esp: an unencrypted _ESPPlain packet with valid padding
@param key: the secret key used for encryption
@return: a valid ESP packet encrypted with this algorithm | Below is the the instruction that describes the task:
### Input:
Encrypt an ESP packet
@param sa: the SecurityAssociation associated with the ESP packet.
@param esp: an unencrypted _ESPPlain packet with valid padding
@param key: the secret key used for encryption
@return: a valid ESP packet encrypted with this algorithm
### Response:
def encrypt(self, sa, esp, key):
"""
Encrypt an ESP packet
@param sa: the SecurityAssociation associated with the ESP packet.
@param esp: an unencrypted _ESPPlain packet with valid padding
@param key: the secret key used for encryption
@return: a valid ESP packet encrypted with this algorithm
"""
data = esp.data_for_encryption()
if self.cipher:
mode_iv = self._format_mode_iv(algo=self, sa=sa, iv=esp.iv)
cipher = self.new_cipher(key, mode_iv)
encryptor = cipher.encryptor()
if self.is_aead:
aad = struct.pack('!LL', esp.spi, esp.seq)
encryptor.authenticate_additional_data(aad)
data = encryptor.update(data) + encryptor.finalize()
data += encryptor.tag[:self.icv_size]
else:
data = encryptor.update(data) + encryptor.finalize()
return ESP(spi=esp.spi, seq=esp.seq, data=esp.iv + data) |
def get_status(self, response, finished=False):
"""Given the stdout from the command returned by :meth:`cmd_status`,
return one of the status code defined in :mod:`clusterjob.status`"""
for line in response.split("\n"):
if line.strip() in self.status_mapping:
return self.status_mapping[line.strip()]
return None | Given the stdout from the command returned by :meth:`cmd_status`,
return one of the status code defined in :mod:`clusterjob.status` | Below is the the instruction that describes the task:
### Input:
Given the stdout from the command returned by :meth:`cmd_status`,
return one of the status code defined in :mod:`clusterjob.status`
### Response:
def get_status(self, response, finished=False):
"""Given the stdout from the command returned by :meth:`cmd_status`,
return one of the status code defined in :mod:`clusterjob.status`"""
for line in response.split("\n"):
if line.strip() in self.status_mapping:
return self.status_mapping[line.strip()]
return None |
def reduce_chunk(func, array):
"""Reduce with `func`, chunk by chunk, the passed pytable `array`.
"""
res = []
for slice in iter_chunk_slice(array.shape[-1], array.chunkshape[-1]):
res.append(func(array[..., slice]))
return func(res) | Reduce with `func`, chunk by chunk, the passed pytable `array`. | Below is the the instruction that describes the task:
### Input:
Reduce with `func`, chunk by chunk, the passed pytable `array`.
### Response:
def reduce_chunk(func, array):
"""Reduce with `func`, chunk by chunk, the passed pytable `array`.
"""
res = []
for slice in iter_chunk_slice(array.shape[-1], array.chunkshape[-1]):
res.append(func(array[..., slice]))
return func(res) |
def parse(readDataInstance):
"""
Returns a new L{DosHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{DosHeader} object.
@rtype: L{DosHeader}
@return: A new L{DosHeader} object.
"""
dosHdr = DosHeader()
dosHdr.e_magic.value = readDataInstance.readWord()
dosHdr.e_cblp.value = readDataInstance.readWord()
dosHdr.e_cp.value = readDataInstance.readWord()
dosHdr.e_crlc.value = readDataInstance.readWord()
dosHdr.e_cparhdr.value = readDataInstance.readWord()
dosHdr.e_minalloc.value = readDataInstance.readWord()
dosHdr.e_maxalloc.value = readDataInstance.readWord()
dosHdr.e_ss.value = readDataInstance.readWord()
dosHdr.e_sp.value = readDataInstance.readWord()
dosHdr.e_csum.value = readDataInstance.readWord()
dosHdr.e_ip.value = readDataInstance.readWord()
dosHdr.e_cs.value = readDataInstance.readWord()
dosHdr.e_lfarlc.value = readDataInstance.readWord()
dosHdr.e_ovno.value = readDataInstance.readWord()
dosHdr.e_res = datatypes.Array(datatypes.TYPE_WORD)
for i in range(4):
dosHdr.e_res.append(datatypes.WORD(readDataInstance.readWord()))
dosHdr.e_oemid.value = readDataInstance.readWord()
dosHdr.e_oeminfo.value = readDataInstance.readWord()
dosHdr.e_res2 = datatypes.Array(datatypes.TYPE_WORD)
for i in range (10):
dosHdr.e_res2.append(datatypes.WORD(readDataInstance.readWord()))
dosHdr.e_lfanew.value = readDataInstance.readDword()
return dosHdr | Returns a new L{DosHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{DosHeader} object.
@rtype: L{DosHeader}
@return: A new L{DosHeader} object. | Below is the the instruction that describes the task:
### Input:
Returns a new L{DosHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{DosHeader} object.
@rtype: L{DosHeader}
@return: A new L{DosHeader} object.
### Response:
def parse(readDataInstance):
"""
Returns a new L{DosHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{DosHeader} object.
@rtype: L{DosHeader}
@return: A new L{DosHeader} object.
"""
dosHdr = DosHeader()
dosHdr.e_magic.value = readDataInstance.readWord()
dosHdr.e_cblp.value = readDataInstance.readWord()
dosHdr.e_cp.value = readDataInstance.readWord()
dosHdr.e_crlc.value = readDataInstance.readWord()
dosHdr.e_cparhdr.value = readDataInstance.readWord()
dosHdr.e_minalloc.value = readDataInstance.readWord()
dosHdr.e_maxalloc.value = readDataInstance.readWord()
dosHdr.e_ss.value = readDataInstance.readWord()
dosHdr.e_sp.value = readDataInstance.readWord()
dosHdr.e_csum.value = readDataInstance.readWord()
dosHdr.e_ip.value = readDataInstance.readWord()
dosHdr.e_cs.value = readDataInstance.readWord()
dosHdr.e_lfarlc.value = readDataInstance.readWord()
dosHdr.e_ovno.value = readDataInstance.readWord()
dosHdr.e_res = datatypes.Array(datatypes.TYPE_WORD)
for i in range(4):
dosHdr.e_res.append(datatypes.WORD(readDataInstance.readWord()))
dosHdr.e_oemid.value = readDataInstance.readWord()
dosHdr.e_oeminfo.value = readDataInstance.readWord()
dosHdr.e_res2 = datatypes.Array(datatypes.TYPE_WORD)
for i in range (10):
dosHdr.e_res2.append(datatypes.WORD(readDataInstance.readWord()))
dosHdr.e_lfanew.value = readDataInstance.readDword()
return dosHdr |
def accept(self):
"""Method invoked when OK button is clicked."""
output_path = self.output_path_line_edit.text()
if not output_path:
display_warning_message_box(
self,
tr('Empty Output Path'),
tr('Output path can not be empty'))
return
try:
self.convert_metadata()
except MetadataConversionError as e:
display_warning_message_box(
self,
tr('Metadata Conversion Failed'),
str(e))
return
if not os.path.exists(output_path):
display_warning_message_box(
self,
tr('Metadata Conversion Failed'),
tr('Result file is not found.'))
return
display_success_message_bar(
tr('Metadata Conversion Success'),
tr('You can find your copied layer with metadata version 3.5 in '
'%s' % output_path),
iface_object=self.iface
)
super(MetadataConverterDialog, self).accept() | Method invoked when OK button is clicked. | Below is the the instruction that describes the task:
### Input:
Method invoked when OK button is clicked.
### Response:
def accept(self):
"""Method invoked when OK button is clicked."""
output_path = self.output_path_line_edit.text()
if not output_path:
display_warning_message_box(
self,
tr('Empty Output Path'),
tr('Output path can not be empty'))
return
try:
self.convert_metadata()
except MetadataConversionError as e:
display_warning_message_box(
self,
tr('Metadata Conversion Failed'),
str(e))
return
if not os.path.exists(output_path):
display_warning_message_box(
self,
tr('Metadata Conversion Failed'),
tr('Result file is not found.'))
return
display_success_message_bar(
tr('Metadata Conversion Success'),
tr('You can find your copied layer with metadata version 3.5 in '
'%s' % output_path),
iface_object=self.iface
)
super(MetadataConverterDialog, self).accept() |
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``NDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or Context
The destination array or context.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray``, then the return value
and ``other`` will point to the same ``NDArray``.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.copyto(mx.gpu(0))
<NDArray 2x3 @gpu(0)>
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return False
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = NDArray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other))) | Copies the value of this array to another array.
If ``other`` is a ``NDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``NDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or Context
The destination array or context.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray``, then the return value
and ``other`` will point to the same ``NDArray``.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.copyto(mx.gpu(0))
<NDArray 2x3 @gpu(0)> | Below is the the instruction that describes the task:
### Input:
Copies the value of this array to another array.
If ``other`` is a ``NDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``NDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or Context
The destination array or context.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray``, then the return value
and ``other`` will point to the same ``NDArray``.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.copyto(mx.gpu(0))
<NDArray 2x3 @gpu(0)>
### Response:
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``NDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or Context
The destination array or context.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray``, then the return value
and ``other`` will point to the same ``NDArray``.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.copyto(mx.gpu(0))
<NDArray 2x3 @gpu(0)>
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return False
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = NDArray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other))) |
def insert(self, inst):
"""Insert a vdata or a vgroup in the vgroup.
Args::
inst vdata or vgroup instance to add
Returns::
index of the inserted vdata or vgroup (0 based)
C library equivalent : Vinsert
"""
if isinstance(inst, VD):
id = inst._id
elif isinstance(inst, VG):
id = inst._id
else:
raise HDF4Error("insrt: bad argument")
index = _C.Vinsert(self._id, id)
_checkErr('insert', index, "cannot insert in vgroup")
return index | Insert a vdata or a vgroup in the vgroup.
Args::
inst vdata or vgroup instance to add
Returns::
index of the inserted vdata or vgroup (0 based)
C library equivalent : Vinsert | Below is the the instruction that describes the task:
### Input:
Insert a vdata or a vgroup in the vgroup.
Args::
inst vdata or vgroup instance to add
Returns::
index of the inserted vdata or vgroup (0 based)
C library equivalent : Vinsert
### Response:
def insert(self, inst):
"""Insert a vdata or a vgroup in the vgroup.
Args::
inst vdata or vgroup instance to add
Returns::
index of the inserted vdata or vgroup (0 based)
C library equivalent : Vinsert
"""
if isinstance(inst, VD):
id = inst._id
elif isinstance(inst, VG):
id = inst._id
else:
raise HDF4Error("insrt: bad argument")
index = _C.Vinsert(self._id, id)
_checkErr('insert', index, "cannot insert in vgroup")
return index |
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
# if we are using multimaster, discovery can only happen at start time
# because MinionManager handles it. by eval_master time the minion doesn't
# know about other siblings currently running
if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'):
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'%s\'.', opts['master'][0])
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
log.warning(
'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor '
'of \'random_master\'. Please update your minion config file.'
)
opts['random_master'] = opts['master_shuffle']
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
last_exc = None
opts['master_uri_list'] = []
opts['local_masters'] = copy.copy(opts['master'])
# shuffle the masters and then loop through them
if opts['random_master']:
# master_failback is only used when master_type is set to failover
if opts['master_type'] == 'failover' and opts['master_failback']:
secondary_masters = opts['local_masters'][1:]
shuffle(secondary_masters)
opts['local_masters'][1:] = secondary_masters
else:
shuffle(opts['local_masters'])
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
try:
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc | Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters. | Below is the the instruction that describes the task:
### Input:
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
### Response:
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
# if we are using multimaster, discovery can only happen at start time
# because MinionManager handles it. by eval_master time the minion doesn't
# know about other siblings currently running
if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'):
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'%s\'.', opts['master'][0])
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
log.warning(
'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor '
'of \'random_master\'. Please update your minion config file.'
)
opts['random_master'] = opts['master_shuffle']
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
last_exc = None
opts['master_uri_list'] = []
opts['local_masters'] = copy.copy(opts['master'])
# shuffle the masters and then loop through them
if opts['random_master']:
# master_failback is only used when master_type is set to failover
if opts['master_type'] == 'failover' and opts['master_failback']:
secondary_masters = opts['local_masters'][1:]
shuffle(secondary_masters)
opts['local_masters'][1:] = secondary_masters
else:
shuffle(opts['local_masters'])
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
try:
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc |
def swo_supported_speeds(self, cpu_speed, num_speeds=3):
"""Retrives a list of SWO speeds supported by both the target and the
connected J-Link.
The supported speeds are returned in order from highest to lowest.
Args:
self (JLink): the ``JLink`` instance
cpu_speed (int): the target's CPU speed in Hz
num_speeds (int): the number of compatible speeds to return
Returns:
A list of compatible SWO speeds in Hz in order from highest to lowest.
"""
buf_size = num_speeds
buf = (ctypes.c_uint32 * buf_size)()
res = self._dll.JLINKARM_SWO_GetCompatibleSpeeds(cpu_speed, 0, buf, buf_size)
if res < 0:
raise errors.JLinkException(res)
return list(buf)[:res] | Retrives a list of SWO speeds supported by both the target and the
connected J-Link.
The supported speeds are returned in order from highest to lowest.
Args:
self (JLink): the ``JLink`` instance
cpu_speed (int): the target's CPU speed in Hz
num_speeds (int): the number of compatible speeds to return
Returns:
A list of compatible SWO speeds in Hz in order from highest to lowest. | Below is the the instruction that describes the task:
### Input:
Retrives a list of SWO speeds supported by both the target and the
connected J-Link.
The supported speeds are returned in order from highest to lowest.
Args:
self (JLink): the ``JLink`` instance
cpu_speed (int): the target's CPU speed in Hz
num_speeds (int): the number of compatible speeds to return
Returns:
A list of compatible SWO speeds in Hz in order from highest to lowest.
### Response:
def swo_supported_speeds(self, cpu_speed, num_speeds=3):
"""Retrives a list of SWO speeds supported by both the target and the
connected J-Link.
The supported speeds are returned in order from highest to lowest.
Args:
self (JLink): the ``JLink`` instance
cpu_speed (int): the target's CPU speed in Hz
num_speeds (int): the number of compatible speeds to return
Returns:
A list of compatible SWO speeds in Hz in order from highest to lowest.
"""
buf_size = num_speeds
buf = (ctypes.c_uint32 * buf_size)()
res = self._dll.JLINKARM_SWO_GetCompatibleSpeeds(cpu_speed, 0, buf, buf_size)
if res < 0:
raise errors.JLinkException(res)
return list(buf)[:res] |
def payment_end(self, account, wallet):
"""
End a payment session. Marks the account as available for use in a
payment session.
:param account: Account to mark available
:type account: str
:param wallet: Wallet to end payment session for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.payment_end(
... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000",
... wallet="FFFD1BAEC8EC20814BBB9059B393051AAA8380F9B5A2E6B2489A277D81789EEE"
... )
True
"""
account = self._process_value(account, 'account')
wallet = self._process_value(wallet, 'wallet')
payload = {"account": account, "wallet": wallet}
resp = self.call('payment_end', payload)
return resp == {} | End a payment session. Marks the account as available for use in a
payment session.
:param account: Account to mark available
:type account: str
:param wallet: Wallet to end payment session for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.payment_end(
... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000",
... wallet="FFFD1BAEC8EC20814BBB9059B393051AAA8380F9B5A2E6B2489A277D81789EEE"
... )
True | Below is the the instruction that describes the task:
### Input:
End a payment session. Marks the account as available for use in a
payment session.
:param account: Account to mark available
:type account: str
:param wallet: Wallet to end payment session for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.payment_end(
... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000",
... wallet="FFFD1BAEC8EC20814BBB9059B393051AAA8380F9B5A2E6B2489A277D81789EEE"
... )
True
### Response:
def payment_end(self, account, wallet):
"""
End a payment session. Marks the account as available for use in a
payment session.
:param account: Account to mark available
:type account: str
:param wallet: Wallet to end payment session for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.payment_end(
... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000",
... wallet="FFFD1BAEC8EC20814BBB9059B393051AAA8380F9B5A2E6B2489A277D81789EEE"
... )
True
"""
account = self._process_value(account, 'account')
wallet = self._process_value(wallet, 'wallet')
payload = {"account": account, "wallet": wallet}
resp = self.call('payment_end', payload)
return resp == {} |
def wait(self):
'''
Waits for the current application to idle or window update event occurs.
Usage:
d.wait.idle(timeout=1000)
d.wait.update(timeout=1000, package_name="com.android.settings")
'''
@param_to_property(action=["idle", "update"])
def _wait(action, timeout=1000, package_name=None):
if timeout / 1000 + 5 > int(os.environ.get("JSONRPC_TIMEOUT", 90)):
http_timeout = timeout / 1000 + 5
else:
http_timeout = int(os.environ.get("JSONRPC_TIMEOUT", 90))
if action == "idle":
return self.server.jsonrpc_wrap(timeout=http_timeout).waitForIdle(timeout)
elif action == "update":
return self.server.jsonrpc_wrap(timeout=http_timeout).waitForWindowUpdate(package_name, timeout)
return _wait | Waits for the current application to idle or window update event occurs.
Usage:
d.wait.idle(timeout=1000)
d.wait.update(timeout=1000, package_name="com.android.settings") | Below is the the instruction that describes the task:
### Input:
Waits for the current application to idle or window update event occurs.
Usage:
d.wait.idle(timeout=1000)
d.wait.update(timeout=1000, package_name="com.android.settings")
### Response:
def wait(self):
'''
Waits for the current application to idle or window update event occurs.
Usage:
d.wait.idle(timeout=1000)
d.wait.update(timeout=1000, package_name="com.android.settings")
'''
@param_to_property(action=["idle", "update"])
def _wait(action, timeout=1000, package_name=None):
if timeout / 1000 + 5 > int(os.environ.get("JSONRPC_TIMEOUT", 90)):
http_timeout = timeout / 1000 + 5
else:
http_timeout = int(os.environ.get("JSONRPC_TIMEOUT", 90))
if action == "idle":
return self.server.jsonrpc_wrap(timeout=http_timeout).waitForIdle(timeout)
elif action == "update":
return self.server.jsonrpc_wrap(timeout=http_timeout).waitForWindowUpdate(package_name, timeout)
return _wait |
def close(self):
"""
Closes the connection to the bridge.
"""
self.is_closed = True
self.is_ready = False
self._command_queue.put(None) | Closes the connection to the bridge. | Below is the the instruction that describes the task:
### Input:
Closes the connection to the bridge.
### Response:
def close(self):
"""
Closes the connection to the bridge.
"""
self.is_closed = True
self.is_ready = False
self._command_queue.put(None) |
def section(self, section):
"""Creates a section block
Args:
section (str or :class:`Section`): name of section or object
Returns:
self for chaining
"""
if not isinstance(self._container, ConfigUpdater):
raise ValueError("Sections can only be added at section level!")
if isinstance(section, str):
# create a new section
section = Section(section, container=self._container)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
if section.name in [block.name for block in self._container
if isinstance(block, Section)]:
raise DuplicateSectionError(section.name)
self._container.structure.insert(self._idx, section)
self._idx += 1
return self | Creates a section block
Args:
section (str or :class:`Section`): name of section or object
Returns:
self for chaining | Below is the the instruction that describes the task:
### Input:
Creates a section block
Args:
section (str or :class:`Section`): name of section or object
Returns:
self for chaining
### Response:
def section(self, section):
"""Creates a section block
Args:
section (str or :class:`Section`): name of section or object
Returns:
self for chaining
"""
if not isinstance(self._container, ConfigUpdater):
raise ValueError("Sections can only be added at section level!")
if isinstance(section, str):
# create a new section
section = Section(section, container=self._container)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
if section.name in [block.name for block in self._container
if isinstance(block, Section)]:
raise DuplicateSectionError(section.name)
self._container.structure.insert(self._idx, section)
self._idx += 1
return self |
def merge_insert(ins_chunks, doc):
""" doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that. """
# Though we don't throw away unbalanced_start or unbalanced_end
# (we assume there is accompanying markup later or earlier in the
# document), we only put <ins> around the balanced portion.
unbalanced_start, balanced, unbalanced_end = split_unbalanced(ins_chunks)
doc.extend(unbalanced_start)
if doc and not doc[-1].endswith(' '):
# Fix up the case where the word before the insert didn't end with
# a space
doc[-1] += ' '
doc.append('<ins>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </ins>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</ins> ')
doc.extend(unbalanced_end) | doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that. | Below is the the instruction that describes the task:
### Input:
doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that.
### Response:
def merge_insert(ins_chunks, doc):
""" doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that. """
# Though we don't throw away unbalanced_start or unbalanced_end
# (we assume there is accompanying markup later or earlier in the
# document), we only put <ins> around the balanced portion.
unbalanced_start, balanced, unbalanced_end = split_unbalanced(ins_chunks)
doc.extend(unbalanced_start)
if doc and not doc[-1].endswith(' '):
# Fix up the case where the word before the insert didn't end with
# a space
doc[-1] += ' '
doc.append('<ins>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </ins>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</ins> ')
doc.extend(unbalanced_end) |
def parse_devices(self, json):
"""Parse result from API."""
result = []
for json_device in json:
license_plate = json_device['EquipmentHeader']['SerialNumber']
device = Device(self, license_plate)
device.update_from_json(json_device)
result.append(device)
return result | Parse result from API. | Below is the the instruction that describes the task:
### Input:
Parse result from API.
### Response:
def parse_devices(self, json):
"""Parse result from API."""
result = []
for json_device in json:
license_plate = json_device['EquipmentHeader']['SerialNumber']
device = Device(self, license_plate)
device.update_from_json(json_device)
result.append(device)
return result |
def generate(env):
"""Add Builders and construction variables for Borland ilink to an
Environment."""
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['LINK'] = '$CC'
env['LINKFLAGS'] = SCons.Util.CLVar('')
env['LINKCOM'] = '$LINK -q $LINKFLAGS -e$TARGET $SOURCES $LIBS'
env['LIBDIRPREFIX']=''
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX' | Add Builders and construction variables for Borland ilink to an
Environment. | Below is the the instruction that describes the task:
### Input:
Add Builders and construction variables for Borland ilink to an
Environment.
### Response:
def generate(env):
"""Add Builders and construction variables for Borland ilink to an
Environment."""
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['LINK'] = '$CC'
env['LINKFLAGS'] = SCons.Util.CLVar('')
env['LINKCOM'] = '$LINK -q $LINKFLAGS -e$TARGET $SOURCES $LIBS'
env['LIBDIRPREFIX']=''
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX' |
def forward(self, inputs, begin_state=None): # pylint: disable=arguments-differ
"""Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers-1.
the initial state with shape `(num_layers, batch_size, num_hidden)`
Returns
--------
out: NDArray
output tensor with shape `(sequence_length, batch_size, input_size)`
when `layout` is "TNC".
out_states: list
output recurrent state tensor with length equals to num_layers-1.
the state with shape `(num_layers, batch_size, num_hidden)`
encoded_raw: list
The list of last output of the model's encoder.
the shape of last encoder's output `(sequence_length, batch_size, num_hidden)`
encoded_dropped: list
The list of last output with dropout of the model's encoder.
the shape of last encoder's dropped output `(sequence_length, batch_size, num_hidden)`
"""
encoded = self.embedding(inputs)
if not begin_state:
begin_state = self.begin_state(batch_size=inputs.shape[1])
encoded_raw = []
encoded_dropped = []
encoded, state = self.encoder(encoded, begin_state)
encoded_raw.append(encoded)
if self._dropout:
encoded = nd.Dropout(encoded, p=self._dropout, axes=(0,))
out = self.decoder(encoded)
return out, state, encoded_raw, encoded_dropped | Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers-1.
the initial state with shape `(num_layers, batch_size, num_hidden)`
Returns
--------
out: NDArray
output tensor with shape `(sequence_length, batch_size, input_size)`
when `layout` is "TNC".
out_states: list
output recurrent state tensor with length equals to num_layers-1.
the state with shape `(num_layers, batch_size, num_hidden)`
encoded_raw: list
The list of last output of the model's encoder.
the shape of last encoder's output `(sequence_length, batch_size, num_hidden)`
encoded_dropped: list
The list of last output with dropout of the model's encoder.
the shape of last encoder's dropped output `(sequence_length, batch_size, num_hidden)` | Below is the the instruction that describes the task:
### Input:
Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers-1.
the initial state with shape `(num_layers, batch_size, num_hidden)`
Returns
--------
out: NDArray
output tensor with shape `(sequence_length, batch_size, input_size)`
when `layout` is "TNC".
out_states: list
output recurrent state tensor with length equals to num_layers-1.
the state with shape `(num_layers, batch_size, num_hidden)`
encoded_raw: list
The list of last output of the model's encoder.
the shape of last encoder's output `(sequence_length, batch_size, num_hidden)`
encoded_dropped: list
The list of last output with dropout of the model's encoder.
the shape of last encoder's dropped output `(sequence_length, batch_size, num_hidden)`
### Response:
def forward(self, inputs, begin_state=None): # pylint: disable=arguments-differ
"""Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers-1.
the initial state with shape `(num_layers, batch_size, num_hidden)`
Returns
--------
out: NDArray
output tensor with shape `(sequence_length, batch_size, input_size)`
when `layout` is "TNC".
out_states: list
output recurrent state tensor with length equals to num_layers-1.
the state with shape `(num_layers, batch_size, num_hidden)`
encoded_raw: list
The list of last output of the model's encoder.
the shape of last encoder's output `(sequence_length, batch_size, num_hidden)`
encoded_dropped: list
The list of last output with dropout of the model's encoder.
the shape of last encoder's dropped output `(sequence_length, batch_size, num_hidden)`
"""
encoded = self.embedding(inputs)
if not begin_state:
begin_state = self.begin_state(batch_size=inputs.shape[1])
encoded_raw = []
encoded_dropped = []
encoded, state = self.encoder(encoded, begin_state)
encoded_raw.append(encoded)
if self._dropout:
encoded = nd.Dropout(encoded, p=self._dropout, axes=(0,))
out = self.decoder(encoded)
return out, state, encoded_raw, encoded_dropped |
def get_as_nullable_boolean(self, key):
"""
Converts map element into a boolean or returns None if conversion is not possible
:param key: an index of element to get.
:return: boolean value of the element or None if conversion is not supported.
"""
value = self.get(key)
return BooleanConverter.to_nullable_boolean(value) | Converts map element into a boolean or returns None if conversion is not possible
:param key: an index of element to get.
:return: boolean value of the element or None if conversion is not supported. | Below is the the instruction that describes the task:
### Input:
Converts map element into a boolean or returns None if conversion is not possible
:param key: an index of element to get.
:return: boolean value of the element or None if conversion is not supported.
### Response:
def get_as_nullable_boolean(self, key):
"""
Converts map element into a boolean or returns None if conversion is not possible
:param key: an index of element to get.
:return: boolean value of the element or None if conversion is not supported.
"""
value = self.get(key)
return BooleanConverter.to_nullable_boolean(value) |
def get(self):
"""
Reloads the measurements from the backing store.
:return: 200 if success.
"""
try:
self._measurementController.reloadCompletedMeasurements()
return None, 200
except:
logger.exception("Failed to reload measurements")
return str(sys.exc_info()), 500 | Reloads the measurements from the backing store.
:return: 200 if success. | Below is the the instruction that describes the task:
### Input:
Reloads the measurements from the backing store.
:return: 200 if success.
### Response:
def get(self):
"""
Reloads the measurements from the backing store.
:return: 200 if success.
"""
try:
self._measurementController.reloadCompletedMeasurements()
return None, 200
except:
logger.exception("Failed to reload measurements")
return str(sys.exc_info()), 500 |
def login(self, username=None, password=None, **kwargs):
"""Login to a reddit site.
**DEPRECATED**. Will be removed in a future version of PRAW.
https://www.reddit.com/comments/2ujhkr/
https://www.reddit.com/comments/37e2mv/
Look for username first in parameter, then praw.ini and finally if both
were empty get it from stdin. Look for password in parameter, then
praw.ini (but only if username matches that in praw.ini) and finally
if they both are empty get it with getpass. Add the variables ``user``
(username) and ``pswd`` (password) to your praw.ini file to allow for
auto-login.
A successful login will overwrite any existing authentication.
"""
if password and not username:
raise Exception('Username must be provided when password is.')
user = username or self.config.user
if not user:
sys.stdout.write('Username: ')
sys.stdout.flush()
user = sys.stdin.readline().strip()
pswd = None
else:
pswd = password or self.config.pswd
if not pswd:
import getpass
pswd = getpass.getpass('Password for {0}: '.format(user)
.encode('ascii', 'ignore'))
data = {'passwd': pswd,
'user': user}
self.clear_authentication()
self.request_json(self.config['login'], data=data)
# Update authentication settings
self._authentication = True
self.user = self.get_redditor(user)
self.user.__class__ = objects.LoggedInRedditor | Login to a reddit site.
**DEPRECATED**. Will be removed in a future version of PRAW.
https://www.reddit.com/comments/2ujhkr/
https://www.reddit.com/comments/37e2mv/
Look for username first in parameter, then praw.ini and finally if both
were empty get it from stdin. Look for password in parameter, then
praw.ini (but only if username matches that in praw.ini) and finally
if they both are empty get it with getpass. Add the variables ``user``
(username) and ``pswd`` (password) to your praw.ini file to allow for
auto-login.
A successful login will overwrite any existing authentication. | Below is the the instruction that describes the task:
### Input:
Login to a reddit site.
**DEPRECATED**. Will be removed in a future version of PRAW.
https://www.reddit.com/comments/2ujhkr/
https://www.reddit.com/comments/37e2mv/
Look for username first in parameter, then praw.ini and finally if both
were empty get it from stdin. Look for password in parameter, then
praw.ini (but only if username matches that in praw.ini) and finally
if they both are empty get it with getpass. Add the variables ``user``
(username) and ``pswd`` (password) to your praw.ini file to allow for
auto-login.
A successful login will overwrite any existing authentication.
### Response:
def login(self, username=None, password=None, **kwargs):
"""Login to a reddit site.
**DEPRECATED**. Will be removed in a future version of PRAW.
https://www.reddit.com/comments/2ujhkr/
https://www.reddit.com/comments/37e2mv/
Look for username first in parameter, then praw.ini and finally if both
were empty get it from stdin. Look for password in parameter, then
praw.ini (but only if username matches that in praw.ini) and finally
if they both are empty get it with getpass. Add the variables ``user``
(username) and ``pswd`` (password) to your praw.ini file to allow for
auto-login.
A successful login will overwrite any existing authentication.
"""
if password and not username:
raise Exception('Username must be provided when password is.')
user = username or self.config.user
if not user:
sys.stdout.write('Username: ')
sys.stdout.flush()
user = sys.stdin.readline().strip()
pswd = None
else:
pswd = password or self.config.pswd
if not pswd:
import getpass
pswd = getpass.getpass('Password for {0}: '.format(user)
.encode('ascii', 'ignore'))
data = {'passwd': pswd,
'user': user}
self.clear_authentication()
self.request_json(self.config['login'], data=data)
# Update authentication settings
self._authentication = True
self.user = self.get_redditor(user)
self.user.__class__ = objects.LoggedInRedditor |
def c_module_relocs(self):
"""Build relocation for the module variable."""
if self.opts.no_structs or self.opts.windll:
return '', ''
x86 = reloc_var(
self.name, self._c_struct_names()[1],
self.opts.reloc_delta,
self._c_uses_pointer()
)
x64 = '{0} *{1} = &_{1};\n'.format(
self._c_struct_names()[1], self.name
) if self._c_uses_pointer() else ''
return x86, x64 | Build relocation for the module variable. | Below is the the instruction that describes the task:
### Input:
Build relocation for the module variable.
### Response:
def c_module_relocs(self):
"""Build relocation for the module variable."""
if self.opts.no_structs or self.opts.windll:
return '', ''
x86 = reloc_var(
self.name, self._c_struct_names()[1],
self.opts.reloc_delta,
self._c_uses_pointer()
)
x64 = '{0} *{1} = &_{1};\n'.format(
self._c_struct_names()[1], self.name
) if self._c_uses_pointer() else ''
return x86, x64 |
def finalize(self, process_row = None):
"""
Restore the LigolwSegmentList objects to the XML tables in
preparation for output. All segments from all segment
lists are inserted into the tables in time order, but this
is NOT behaviour external applications should rely on.
This is done simply in the belief that it might assist in
constructing well balanced indexed databases from the
resulting files. If that proves not to be the case, or for
some reason this behaviour proves inconvenient to preserve,
then it might be discontinued without notice. You've been
warned.
"""
if process_row is not None:
process_id = process_row.process_id
elif self.process is not None:
process_id = self.process.process_id
else:
raise ValueError("must supply a process row to .__init__()")
#
# ensure ID generators are synchronized with table contents
#
self.segment_def_table.sync_next_id()
self.segment_table.sync_next_id()
self.segment_sum_table.sync_next_id()
#
# put all segment lists in time order
#
self.sort()
#
# generator function to convert segments into row objects,
# each paired with the table to which the row is to be
# appended
#
def row_generator(segs, target_table, process_id, segment_def_id):
id_column = target_table.next_id.column_name
for seg in segs:
row = target_table.RowType()
row.segment = seg
row.process_id = process_id
row.segment_def_id = segment_def_id
setattr(row, id_column, target_table.get_next_id())
if 'comment' in target_table.validcolumns:
row.comment = None
yield row, target_table
#
# populate the segment_definer table from the list of
# LigolwSegmentList objects and construct a matching list
# of table row generators. empty ourselves to prevent this
# process from being repeated
#
row_generators = []
while self:
ligolw_segment_list = self.pop()
segment_def_row = self.segment_def_table.RowType()
segment_def_row.process_id = process_id
segment_def_row.segment_def_id = self.segment_def_table.get_next_id()
segment_def_row.instruments = ligolw_segment_list.instruments
segment_def_row.name = ligolw_segment_list.name
segment_def_row.version = ligolw_segment_list.version
segment_def_row.comment = ligolw_segment_list.comment
self.segment_def_table.append(segment_def_row)
row_generators.append(row_generator(ligolw_segment_list.valid, self.segment_sum_table, process_id, segment_def_row.segment_def_id))
row_generators.append(row_generator(ligolw_segment_list.active, self.segment_table, process_id, segment_def_row.segment_def_id))
#
# populate segment and segment_summary tables by pulling
# rows from the generators in time order
#
for row, target_table in iterutils.inorder(*row_generators):
target_table.append(row) | Restore the LigolwSegmentList objects to the XML tables in
preparation for output. All segments from all segment
lists are inserted into the tables in time order, but this
is NOT behaviour external applications should rely on.
This is done simply in the belief that it might assist in
constructing well balanced indexed databases from the
resulting files. If that proves not to be the case, or for
some reason this behaviour proves inconvenient to preserve,
then it might be discontinued without notice. You've been
warned. | Below is the the instruction that describes the task:
### Input:
Restore the LigolwSegmentList objects to the XML tables in
preparation for output. All segments from all segment
lists are inserted into the tables in time order, but this
is NOT behaviour external applications should rely on.
This is done simply in the belief that it might assist in
constructing well balanced indexed databases from the
resulting files. If that proves not to be the case, or for
some reason this behaviour proves inconvenient to preserve,
then it might be discontinued without notice. You've been
warned.
### Response:
def finalize(self, process_row = None):
"""
Restore the LigolwSegmentList objects to the XML tables in
preparation for output. All segments from all segment
lists are inserted into the tables in time order, but this
is NOT behaviour external applications should rely on.
This is done simply in the belief that it might assist in
constructing well balanced indexed databases from the
resulting files. If that proves not to be the case, or for
some reason this behaviour proves inconvenient to preserve,
then it might be discontinued without notice. You've been
warned.
"""
if process_row is not None:
process_id = process_row.process_id
elif self.process is not None:
process_id = self.process.process_id
else:
raise ValueError("must supply a process row to .__init__()")
#
# ensure ID generators are synchronized with table contents
#
self.segment_def_table.sync_next_id()
self.segment_table.sync_next_id()
self.segment_sum_table.sync_next_id()
#
# put all segment lists in time order
#
self.sort()
#
# generator function to convert segments into row objects,
# each paired with the table to which the row is to be
# appended
#
def row_generator(segs, target_table, process_id, segment_def_id):
id_column = target_table.next_id.column_name
for seg in segs:
row = target_table.RowType()
row.segment = seg
row.process_id = process_id
row.segment_def_id = segment_def_id
setattr(row, id_column, target_table.get_next_id())
if 'comment' in target_table.validcolumns:
row.comment = None
yield row, target_table
#
# populate the segment_definer table from the list of
# LigolwSegmentList objects and construct a matching list
# of table row generators. empty ourselves to prevent this
# process from being repeated
#
row_generators = []
while self:
ligolw_segment_list = self.pop()
segment_def_row = self.segment_def_table.RowType()
segment_def_row.process_id = process_id
segment_def_row.segment_def_id = self.segment_def_table.get_next_id()
segment_def_row.instruments = ligolw_segment_list.instruments
segment_def_row.name = ligolw_segment_list.name
segment_def_row.version = ligolw_segment_list.version
segment_def_row.comment = ligolw_segment_list.comment
self.segment_def_table.append(segment_def_row)
row_generators.append(row_generator(ligolw_segment_list.valid, self.segment_sum_table, process_id, segment_def_row.segment_def_id))
row_generators.append(row_generator(ligolw_segment_list.active, self.segment_table, process_id, segment_def_row.segment_def_id))
#
# populate segment and segment_summary tables by pulling
# rows from the generators in time order
#
for row, target_table in iterutils.inorder(*row_generators):
target_table.append(row) |
def _negotiateHandler(self, request):
"""
Negotiate a handler based on the content types acceptable to the
client.
:rtype: 2-`tuple` of `twisted.web.iweb.IResource` and `bytes`
:return: Pair of a resource and the content type.
"""
accept = _parseAccept(request.requestHeaders.getRawHeaders('Accept'))
for contentType in accept.keys():
handler = self._acceptHandlers.get(contentType.lower())
if handler is not None:
return handler, handler.contentType
if self._fallback:
handler = self._handlers[0]
return handler, handler.contentType
return NotAcceptable(), None | Negotiate a handler based on the content types acceptable to the
client.
:rtype: 2-`tuple` of `twisted.web.iweb.IResource` and `bytes`
:return: Pair of a resource and the content type. | Below is the the instruction that describes the task:
### Input:
Negotiate a handler based on the content types acceptable to the
client.
:rtype: 2-`tuple` of `twisted.web.iweb.IResource` and `bytes`
:return: Pair of a resource and the content type.
### Response:
def _negotiateHandler(self, request):
"""
Negotiate a handler based on the content types acceptable to the
client.
:rtype: 2-`tuple` of `twisted.web.iweb.IResource` and `bytes`
:return: Pair of a resource and the content type.
"""
accept = _parseAccept(request.requestHeaders.getRawHeaders('Accept'))
for contentType in accept.keys():
handler = self._acceptHandlers.get(contentType.lower())
if handler is not None:
return handler, handler.contentType
if self._fallback:
handler = self._handlers[0]
return handler, handler.contentType
return NotAcceptable(), None |
def filter_with_schema(self, model=None, context=None):
""" Perform model filtering with schema """
if model is None or self.schema is None:
return
self._schema.filter(
model=model,
context=context if self.use_context else None
) | Perform model filtering with schema | Below is the the instruction that describes the task:
### Input:
Perform model filtering with schema
### Response:
def filter_with_schema(self, model=None, context=None):
""" Perform model filtering with schema """
if model is None or self.schema is None:
return
self._schema.filter(
model=model,
context=context if self.use_context else None
) |
def get_bnum(opts, minions, quiet):
'''
Return the active number of minions to maintain
'''
partition = lambda x: float(x) / 100.0 * len(minions)
try:
if '%' in opts['batch']:
res = partition(float(opts['batch'].strip('%')))
if res < 1:
return int(math.ceil(res))
else:
return int(res)
else:
return int(opts['batch'])
except ValueError:
if not quiet:
salt.utils.stringutils.print_cli('Invalid batch data sent: {0}\nData must be in the '
'form of %10, 10% or 3'.format(opts['batch'])) | Return the active number of minions to maintain | Below is the the instruction that describes the task:
### Input:
Return the active number of minions to maintain
### Response:
def get_bnum(opts, minions, quiet):
'''
Return the active number of minions to maintain
'''
partition = lambda x: float(x) / 100.0 * len(minions)
try:
if '%' in opts['batch']:
res = partition(float(opts['batch'].strip('%')))
if res < 1:
return int(math.ceil(res))
else:
return int(res)
else:
return int(opts['batch'])
except ValueError:
if not quiet:
salt.utils.stringutils.print_cli('Invalid batch data sent: {0}\nData must be in the '
'form of %10, 10% or 3'.format(opts['batch'])) |
def sendSomeData(self, howMany):
"""
Send some DATA commands to my peer(s) to relay some data.
@param howMany: an int, the number of chunks to send out.
"""
# print 'sending some data', howMany
if self.transport is None:
return
peer = self.transport.getQ2QPeer()
while howMany > 0:
# sort transloads so that the least-frequently-serviced ones will
# come first
tloads = [
(findin(tl.name, self.sentTransloads),
tl) for tl in self.nexus.transloadsForPeer(peer)]
tloads.sort()
tloads = [tl for (idx, tl) in tloads if tl.peerNeedsData(peer)]
if not tloads:
break
wasHowMany = howMany
for myTransload in tloads:
# move this transload to the end so it will be sorted last next
# time.
name = myTransload.name
if name in self.sentTransloads:
self.sentTransloads.remove(name)
self.sentTransloads.append(name)
knowledge = myTransload.peers[peer]
chunkNumber, chunkData = myTransload.selectOptimalChunk(peer)
if chunkNumber is None:
continue
peerToIntroduce = knowledge.selectPeerToIntroduce(
myTransload.peers.keys())
if peerToIntroduce is not None:
self.introduce(myTransload.name, peerToIntroduce)
self.data(name, chunkNumber, chunkData)
# Don't re-send that chunk again unless they explicitly tell us
# they need it for some reason
knowledge.mask[chunkNumber] = 1
howMany -= 1
if howMany <= 0:
break
if wasHowMany == howMany:
# couldn't find anything to send.
break | Send some DATA commands to my peer(s) to relay some data.
@param howMany: an int, the number of chunks to send out. | Below is the the instruction that describes the task:
### Input:
Send some DATA commands to my peer(s) to relay some data.
@param howMany: an int, the number of chunks to send out.
### Response:
def sendSomeData(self, howMany):
"""
Send some DATA commands to my peer(s) to relay some data.
@param howMany: an int, the number of chunks to send out.
"""
# print 'sending some data', howMany
if self.transport is None:
return
peer = self.transport.getQ2QPeer()
while howMany > 0:
# sort transloads so that the least-frequently-serviced ones will
# come first
tloads = [
(findin(tl.name, self.sentTransloads),
tl) for tl in self.nexus.transloadsForPeer(peer)]
tloads.sort()
tloads = [tl for (idx, tl) in tloads if tl.peerNeedsData(peer)]
if not tloads:
break
wasHowMany = howMany
for myTransload in tloads:
# move this transload to the end so it will be sorted last next
# time.
name = myTransload.name
if name in self.sentTransloads:
self.sentTransloads.remove(name)
self.sentTransloads.append(name)
knowledge = myTransload.peers[peer]
chunkNumber, chunkData = myTransload.selectOptimalChunk(peer)
if chunkNumber is None:
continue
peerToIntroduce = knowledge.selectPeerToIntroduce(
myTransload.peers.keys())
if peerToIntroduce is not None:
self.introduce(myTransload.name, peerToIntroduce)
self.data(name, chunkNumber, chunkData)
# Don't re-send that chunk again unless they explicitly tell us
# they need it for some reason
knowledge.mask[chunkNumber] = 1
howMany -= 1
if howMany <= 0:
break
if wasHowMany == howMany:
# couldn't find anything to send.
break |
def install_virtualenv(parser_args):
""" Installs virtual environment """
python_version = '.'.join(str(v) for v in sys.version_info[:2])
sys.stdout.write('Installing Python {0} virtualenv into {1} \n'.format(python_version, VE_ROOT))
if sys.version_info < (3, 3):
install_virtualenv_p2(VE_ROOT, python_version)
else:
install_virtualenv_p3(VE_ROOT, python_version) | Installs virtual environment | Below is the the instruction that describes the task:
### Input:
Installs virtual environment
### Response:
def install_virtualenv(parser_args):
""" Installs virtual environment """
python_version = '.'.join(str(v) for v in sys.version_info[:2])
sys.stdout.write('Installing Python {0} virtualenv into {1} \n'.format(python_version, VE_ROOT))
if sys.version_info < (3, 3):
install_virtualenv_p2(VE_ROOT, python_version)
else:
install_virtualenv_p3(VE_ROOT, python_version) |
def get_groups(self, condition=None, page_size=1000):
"""Return an iterator over all groups in this device cloud account
Optionally, a condition can be specified to limit the number of
groups returned.
Examples::
# Get all groups and print information about them
for group in dc.devicecore.get_groups():
print group
# Iterate over all devices which are in a group with a specific
# ID.
group = dc.devicore.get_groups(group_id == 123)[0]
for device in dc.devicecore.get_devices(group_path == group.get_path()):
print device.get_mac()
:param condition: A condition to use when filtering the results set. If
unspecified, all groups will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Generator over the groups in this device cloud account. No
guarantees about the order of results is provided and child links
between nodes will not be populated.
"""
query_kwargs = {}
if condition is not None:
query_kwargs["condition"] = condition.compile()
for group_data in self._conn.iter_json_pages("/ws/Group", page_size=page_size, **query_kwargs):
yield Group.from_json(group_data) | Return an iterator over all groups in this device cloud account
Optionally, a condition can be specified to limit the number of
groups returned.
Examples::
# Get all groups and print information about them
for group in dc.devicecore.get_groups():
print group
# Iterate over all devices which are in a group with a specific
# ID.
group = dc.devicore.get_groups(group_id == 123)[0]
for device in dc.devicecore.get_devices(group_path == group.get_path()):
print device.get_mac()
:param condition: A condition to use when filtering the results set. If
unspecified, all groups will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Generator over the groups in this device cloud account. No
guarantees about the order of results is provided and child links
between nodes will not be populated. | Below is the the instruction that describes the task:
### Input:
Return an iterator over all groups in this device cloud account
Optionally, a condition can be specified to limit the number of
groups returned.
Examples::
# Get all groups and print information about them
for group in dc.devicecore.get_groups():
print group
# Iterate over all devices which are in a group with a specific
# ID.
group = dc.devicore.get_groups(group_id == 123)[0]
for device in dc.devicecore.get_devices(group_path == group.get_path()):
print device.get_mac()
:param condition: A condition to use when filtering the results set. If
unspecified, all groups will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Generator over the groups in this device cloud account. No
guarantees about the order of results is provided and child links
between nodes will not be populated.
### Response:
def get_groups(self, condition=None, page_size=1000):
"""Return an iterator over all groups in this device cloud account
Optionally, a condition can be specified to limit the number of
groups returned.
Examples::
# Get all groups and print information about them
for group in dc.devicecore.get_groups():
print group
# Iterate over all devices which are in a group with a specific
# ID.
group = dc.devicore.get_groups(group_id == 123)[0]
for device in dc.devicecore.get_devices(group_path == group.get_path()):
print device.get_mac()
:param condition: A condition to use when filtering the results set. If
unspecified, all groups will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Generator over the groups in this device cloud account. No
guarantees about the order of results is provided and child links
between nodes will not be populated.
"""
query_kwargs = {}
if condition is not None:
query_kwargs["condition"] = condition.compile()
for group_data in self._conn.iter_json_pages("/ws/Group", page_size=page_size, **query_kwargs):
yield Group.from_json(group_data) |
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True | Close the application and all installed plugins. | Below is the the instruction that describes the task:
### Input:
Close the application and all installed plugins.
### Response:
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True |
def add_path_object(self, *args):
"""
Add custom path objects
:type: path_object: static_bundle.paths.AbstractPath
"""
for obj in args:
obj.bundle = self
self.files.append(obj) | Add custom path objects
:type: path_object: static_bundle.paths.AbstractPath | Below is the the instruction that describes the task:
### Input:
Add custom path objects
:type: path_object: static_bundle.paths.AbstractPath
### Response:
def add_path_object(self, *args):
"""
Add custom path objects
:type: path_object: static_bundle.paths.AbstractPath
"""
for obj in args:
obj.bundle = self
self.files.append(obj) |
def part_search(self, part_query):
'''
handles the part lookup/search for the given part query
part_query: part string to search as product name
outputs result on stdout
'''
limit = 100
results = self._e.parts_search(q=part_query,
limit=limit)
start = 0
hits = results[0]['hits']
if hits == 0:
print("No result")
return ReturnValues.NO_RESULTS
print("Searched for: '{}'".format(results[0]['request']['q']))
def show_result(r):
print(' → {:30} {:30} {}'.format(
r['item']['mpn'], r['item']['manufacturer']['name'], r['snippet']
))
for r in results[1]:
show_result(r)
while hits - limit > limit:
start += limit
hits -= limit
results = self._e.parts_search(q=part_query, limit=limit,
start=start)
for r in results[1]:
show_result(r)
if hits-limit > 0:
start += limit
hits -= limit
results = self._e.parts_search(q=part_query,
limit=hits,
start=start)
for r in results[1]:
show_result(r)
return ReturnValues.OK | handles the part lookup/search for the given part query
part_query: part string to search as product name
outputs result on stdout | Below is the the instruction that describes the task:
### Input:
handles the part lookup/search for the given part query
part_query: part string to search as product name
outputs result on stdout
### Response:
def part_search(self, part_query):
'''
handles the part lookup/search for the given part query
part_query: part string to search as product name
outputs result on stdout
'''
limit = 100
results = self._e.parts_search(q=part_query,
limit=limit)
start = 0
hits = results[0]['hits']
if hits == 0:
print("No result")
return ReturnValues.NO_RESULTS
print("Searched for: '{}'".format(results[0]['request']['q']))
def show_result(r):
print(' → {:30} {:30} {}'.format(
r['item']['mpn'], r['item']['manufacturer']['name'], r['snippet']
))
for r in results[1]:
show_result(r)
while hits - limit > limit:
start += limit
hits -= limit
results = self._e.parts_search(q=part_query, limit=limit,
start=start)
for r in results[1]:
show_result(r)
if hits-limit > 0:
start += limit
hits -= limit
results = self._e.parts_search(q=part_query,
limit=hits,
start=start)
for r in results[1]:
show_result(r)
return ReturnValues.OK |
def parse_gcs_url(gsurl):
"""
Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a
tuple containing the corresponding bucket and blob.
"""
parsed_url = urlparse(gsurl)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket name')
else:
bucket = parsed_url.netloc
blob = parsed_url.path.strip('/')
return bucket, blob | Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a
tuple containing the corresponding bucket and blob. | Below is the the instruction that describes the task:
### Input:
Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a
tuple containing the corresponding bucket and blob.
### Response:
def parse_gcs_url(gsurl):
"""
Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a
tuple containing the corresponding bucket and blob.
"""
parsed_url = urlparse(gsurl)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket name')
else:
bucket = parsed_url.netloc
blob = parsed_url.path.strip('/')
return bucket, blob |
def read_data(self, **kwargs):
'''
Read the datafile specified in Sample.datafile and
return the resulting object.
Does NOT assign the data to self.data
It's advised not to use this method, but instead to access
the data through the FCMeasurement.data attribute.
'''
meta, data = parse_fcs(self.datafile, **kwargs)
return data | Read the datafile specified in Sample.datafile and
return the resulting object.
Does NOT assign the data to self.data
It's advised not to use this method, but instead to access
the data through the FCMeasurement.data attribute. | Below is the the instruction that describes the task:
### Input:
Read the datafile specified in Sample.datafile and
return the resulting object.
Does NOT assign the data to self.data
It's advised not to use this method, but instead to access
the data through the FCMeasurement.data attribute.
### Response:
def read_data(self, **kwargs):
'''
Read the datafile specified in Sample.datafile and
return the resulting object.
Does NOT assign the data to self.data
It's advised not to use this method, but instead to access
the data through the FCMeasurement.data attribute.
'''
meta, data = parse_fcs(self.datafile, **kwargs)
return data |
def _allocate_channel(self):
"""
Allocate a new AMQP channel.
Raises:
NoFreeChannels: If this connection has reached its maximum number of channels.
"""
try:
channel = yield self.channel()
except pika.exceptions.NoFreeChannels:
raise NoFreeChannels()
_std_log.debug("Created AMQP channel id %d", channel.channel_number)
if self._confirms:
yield channel.confirm_delivery()
defer.returnValue(channel) | Allocate a new AMQP channel.
Raises:
NoFreeChannels: If this connection has reached its maximum number of channels. | Below is the the instruction that describes the task:
### Input:
Allocate a new AMQP channel.
Raises:
NoFreeChannels: If this connection has reached its maximum number of channels.
### Response:
def _allocate_channel(self):
"""
Allocate a new AMQP channel.
Raises:
NoFreeChannels: If this connection has reached its maximum number of channels.
"""
try:
channel = yield self.channel()
except pika.exceptions.NoFreeChannels:
raise NoFreeChannels()
_std_log.debug("Created AMQP channel id %d", channel.channel_number)
if self._confirms:
yield channel.confirm_delivery()
defer.returnValue(channel) |
def clear_history(vcs):
"""Clear (committed) test run history from this project.
Args:
vcs (easyci.vcs.base.Vcs)
"""
evidence_path = _get_committed_history_path(vcs)
if os.path.exists(evidence_path):
os.remove(evidence_path) | Clear (committed) test run history from this project.
Args:
vcs (easyci.vcs.base.Vcs) | Below is the the instruction that describes the task:
### Input:
Clear (committed) test run history from this project.
Args:
vcs (easyci.vcs.base.Vcs)
### Response:
def clear_history(vcs):
"""Clear (committed) test run history from this project.
Args:
vcs (easyci.vcs.base.Vcs)
"""
evidence_path = _get_committed_history_path(vcs)
if os.path.exists(evidence_path):
os.remove(evidence_path) |
def __write_data(self, idx):
"""
Write out the measurement tables found in paleoData and chronData
:return:
"""
pair = self.noaa_data_sorted["Data"][idx]
# Run once for each pair (paleo+chron) of tables that was gathered earlier.
# for idx, pair in enumerate(self.noaa_data_sorted["Data"]):
lst_pc = ["chron", "paleo"]
# loop once for paleo, once for chron
for pc in lst_pc:
# safeguard in case the table is an empty set.
table = pair[pc]
if pc == "paleo":
self.__write_variables_1(table)
self.__write_divider()
self.__write_columns(pc, table)
elif pc == "chron":
# self.__write_variables_1(table)
self.__write_columns(pc, table)
self.__write_divider(nl=False)
return | Write out the measurement tables found in paleoData and chronData
:return: | Below is the the instruction that describes the task:
### Input:
Write out the measurement tables found in paleoData and chronData
:return:
### Response:
def __write_data(self, idx):
"""
Write out the measurement tables found in paleoData and chronData
:return:
"""
pair = self.noaa_data_sorted["Data"][idx]
# Run once for each pair (paleo+chron) of tables that was gathered earlier.
# for idx, pair in enumerate(self.noaa_data_sorted["Data"]):
lst_pc = ["chron", "paleo"]
# loop once for paleo, once for chron
for pc in lst_pc:
# safeguard in case the table is an empty set.
table = pair[pc]
if pc == "paleo":
self.__write_variables_1(table)
self.__write_divider()
self.__write_columns(pc, table)
elif pc == "chron":
# self.__write_variables_1(table)
self.__write_columns(pc, table)
self.__write_divider(nl=False)
return |
def parse_port(port_obj, owner):
'''Create a port object of the correct type.
The correct port object type is chosen based on the port.port_type
property of port_obj.
@param port_obj The CORBA PortService object to wrap.
@param owner The owner of this port. Should be a Component object or None.
@return The created port object.
'''
profile = port_obj.get_port_profile()
props = utils.nvlist_to_dict(profile.properties)
if props['port.port_type'] == 'DataInPort':
return DataInPort(port_obj, owner)
elif props['port.port_type'] == 'DataOutPort':
return DataOutPort(port_obj, owner)
elif props['port.port_type'] == 'CorbaPort':
return CorbaPort(port_obj, owner)
else:
return Port(port_obj, owner) | Create a port object of the correct type.
The correct port object type is chosen based on the port.port_type
property of port_obj.
@param port_obj The CORBA PortService object to wrap.
@param owner The owner of this port. Should be a Component object or None.
@return The created port object. | Below is the the instruction that describes the task:
### Input:
Create a port object of the correct type.
The correct port object type is chosen based on the port.port_type
property of port_obj.
@param port_obj The CORBA PortService object to wrap.
@param owner The owner of this port. Should be a Component object or None.
@return The created port object.
### Response:
def parse_port(port_obj, owner):
'''Create a port object of the correct type.
The correct port object type is chosen based on the port.port_type
property of port_obj.
@param port_obj The CORBA PortService object to wrap.
@param owner The owner of this port. Should be a Component object or None.
@return The created port object.
'''
profile = port_obj.get_port_profile()
props = utils.nvlist_to_dict(profile.properties)
if props['port.port_type'] == 'DataInPort':
return DataInPort(port_obj, owner)
elif props['port.port_type'] == 'DataOutPort':
return DataOutPort(port_obj, owner)
elif props['port.port_type'] == 'CorbaPort':
return CorbaPort(port_obj, owner)
else:
return Port(port_obj, owner) |
def get_bundle_for_cn(cn, relation_name=None):
"""Extract certificates for the given cn.
:param cn: str Canonical Name on certificate.
:param relation_name: str Relation to check for certificates down.
:returns: Dictionary of certificate data,
:rtype: dict.
"""
entries = get_requests_for_local_unit(relation_name)
cert_bundle = {}
for entry in entries:
for _cn, bundle in entry['certs'].items():
if _cn == cn:
cert_bundle = {
'cert': bundle['cert'],
'key': bundle['key'],
'chain': entry['chain'],
'ca': entry['ca']}
break
if cert_bundle:
break
return cert_bundle | Extract certificates for the given cn.
:param cn: str Canonical Name on certificate.
:param relation_name: str Relation to check for certificates down.
:returns: Dictionary of certificate data,
:rtype: dict. | Below is the the instruction that describes the task:
### Input:
Extract certificates for the given cn.
:param cn: str Canonical Name on certificate.
:param relation_name: str Relation to check for certificates down.
:returns: Dictionary of certificate data,
:rtype: dict.
### Response:
def get_bundle_for_cn(cn, relation_name=None):
"""Extract certificates for the given cn.
:param cn: str Canonical Name on certificate.
:param relation_name: str Relation to check for certificates down.
:returns: Dictionary of certificate data,
:rtype: dict.
"""
entries = get_requests_for_local_unit(relation_name)
cert_bundle = {}
for entry in entries:
for _cn, bundle in entry['certs'].items():
if _cn == cn:
cert_bundle = {
'cert': bundle['cert'],
'key': bundle['key'],
'chain': entry['chain'],
'ca': entry['ca']}
break
if cert_bundle:
break
return cert_bundle |
def check_hints(self, ds):
'''
Checks for potentially mislabeled metadata and makes suggestions for how to correct
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
ret_val = []
ret_val.extend(self._check_hint_bounds(ds))
return ret_val | Checks for potentially mislabeled metadata and makes suggestions for how to correct
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results | Below is the the instruction that describes the task:
### Input:
Checks for potentially mislabeled metadata and makes suggestions for how to correct
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
### Response:
def check_hints(self, ds):
'''
Checks for potentially mislabeled metadata and makes suggestions for how to correct
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
ret_val = []
ret_val.extend(self._check_hint_bounds(ds))
return ret_val |
def preparse(output_format):
""" Do any special processing of a template, and return the result.
"""
try:
return templating.preparse(output_format, lambda path: os.path.join(config.config_dir, "templates", path))
except ImportError as exc:
if "tempita" in str(exc):
raise error.UserError("To be able to use Tempita templates, install the 'tempita' package (%s)\n"
" Possibly USING THE FOLLOWING COMMAND:\n"
" %s/easy_install tempita" % (exc, os.path.dirname(sys.executable)))
raise
except IOError as exc:
raise error.LoggableError("Cannot read template: {}".format(exc)) | Do any special processing of a template, and return the result. | Below is the the instruction that describes the task:
### Input:
Do any special processing of a template, and return the result.
### Response:
def preparse(output_format):
""" Do any special processing of a template, and return the result.
"""
try:
return templating.preparse(output_format, lambda path: os.path.join(config.config_dir, "templates", path))
except ImportError as exc:
if "tempita" in str(exc):
raise error.UserError("To be able to use Tempita templates, install the 'tempita' package (%s)\n"
" Possibly USING THE FOLLOWING COMMAND:\n"
" %s/easy_install tempita" % (exc, os.path.dirname(sys.executable)))
raise
except IOError as exc:
raise error.LoggableError("Cannot read template: {}".format(exc)) |
def gen(sc, asset, expire):
'''
Database population function.
What we are doing here is trying to interpret the output of plugin ID 20811
and use that information to help populate the database with individualized
entries of the software that is installed on the host. This information will
later be used to build the report.
'''
# The following regex patters are used to pull out the needed fields from
# Plugin ID 20811
redate = re.compile(r'\[installed on (\d{4})/(\d{1,2})/(\d{1,2})\]')
reinvdate = re.compile(r'\[installed on (\d{1,2})/(\d{1,2})/(\d{4})\]')
rever = re.compile(r'\[version (.*?)\]')
resw = re.compile(r'^([\w\s\.\(\-\)\+]*)')
s = Session()
ts = datetime.datetime.now()
for vuln in sc.analysis(('pluginID','=','20811,22869'),
('asset', '=', {'id': str(asset)}),
tool='vulndetails'):
# First we need to get the host information...
nh = False
host = s.query(Host).filter_by(ip=vuln['ip']).first()
if not host:
host = Host()
nh = True
hdata = sc.analysis(('ip', '=', vuln['ip']),tool='sumip')[0]
host.ip = vuln['ip']
host.name = vuln['netbiosName']
host.cpe = hdata['osCPE']
host.dns = hdata['dnsName']
host.asset_id = asset
if nh:
s.add(host)
else:
s.merge(host)
s.commit()
sys.stdout.write('%4d\t%-16s\t%-40s' % (host.id, host.ip, host.dns))
sys.stdout.flush()
if vuln['pluginID'] == '22869':
if 'CentOS Linux system' in vuln['pluginText'] or 'Red Hat Linux system' in vuln['pluginText']:
software = re.findall(' ([a-zA-Z0-9\.\-]*)\|',vuln['pluginText'])
for item in software:
entry = Entry()
entry.name = item
entry.timestamp = ts
entry.host_id = host.id
s.add(entry)
s.commit()
elif 'SunOS 5.10' in vuln['pluginText']:
software = re.findall('Patch: ([^ ]*)', vuln['pluginText'])
for item in software:
entry = Entry()
entry.name = item
entry.timestamp = ts
entry.host_id = host.id
s.add(entry)
s.commit()
elif 'Solaris 11 system' in vuln['pluginText']:
software = re.findall('([\w\/]+)\W+([0-9\.\-]+).*\n',vuln['pluginText'])
for item in software:
entry = Entry()
entry.name = item[0]
entry.version = item[1]
entry.timestamp = ts
entry.host_id = host.id
s.add(entry)
s.commit()
elif 'Mac OS X system' in vuln['pluginText']:
software = re.findall(' ([a-zA-Z0-9\.\-\_]*\.pkg)\n',vuln['pluginText'])
for item in software:
entry = Entry()
entry.name = item
entry.timestamp = ts
entry.host_id = host.id
s.add(entry)
s.commit()
else:
sys.stdout.write('\t[NO FORMATTER]')
sys.stdout.flush()
if vuln['pluginID'] == '20811':
software = False
patches = False
sw = None
nh = False
s.commit()
for line in vuln['pluginText'].split('\n'):
if '</plugin_output>' in line:
continue
if line == u'The following software are installed on the remote host :':
software = True
patches = False
continue
if line == u'The following updates are installed :':
patches = True
continue
if software and line != '':
names = resw.findall(line)
vers = rever.findall(line)
dates = redate.findall(line)
new = Entry()
if len(names) > 0: new.name = names[0].strip()
if len(vers) > 0: new.version = vers[0]
try:
if len(dates) > 0:
date = datetime.date(year=int(dates[0][0]),
month=int(dates[0][1]),
day=int(dates[0][2]))
new.date = date
else:
dates = reinvdate.findall(line)
if len(dates) > 0:
date = datetime.date(year=int(dates[0][2]),
month=int(dates[0][0]),
day=int(dates[0][1]))
new.date = date
except:
pass
if patches:
if line[:2] != ' ':
sw = line.strip(':').strip()
continue
else:
new.name = '%s (%s)' % (new.name, sw)
new.timestamp = ts
new.host_id = host.id
s.add(new)
s.commit()
sys.stdout.write('\tdone\n')
sys.stdout.flush()
s.commit()
# Now to expire the old data out...
exp = datetime.datetime.now() - datetime.timedelta(days=expire)
print exp
# First to delete the aged out entries
for entry in s.query(Entry).filter(Entry.timestamp < exp).all():
s.delete(entry)
s.commit()
# Next to delete any hosts that we arent pulling info for anymore...
for host in s.query(Host).all():
if len(host.entries) == 0:
s.delete(host)
s.commit()
s.close() | Database population function.
What we are doing here is trying to interpret the output of plugin ID 20811
and use that information to help populate the database with individualized
entries of the software that is installed on the host. This information will
later be used to build the report. | Below is the the instruction that describes the task:
### Input:
Database population function.
What we are doing here is trying to interpret the output of plugin ID 20811
and use that information to help populate the database with individualized
entries of the software that is installed on the host. This information will
later be used to build the report.
### Response:
def gen(sc, asset, expire):
'''
Database population function.
What we are doing here is trying to interpret the output of plugin ID 20811
and use that information to help populate the database with individualized
entries of the software that is installed on the host. This information will
later be used to build the report.
'''
# The following regex patters are used to pull out the needed fields from
# Plugin ID 20811
redate = re.compile(r'\[installed on (\d{4})/(\d{1,2})/(\d{1,2})\]')
reinvdate = re.compile(r'\[installed on (\d{1,2})/(\d{1,2})/(\d{4})\]')
rever = re.compile(r'\[version (.*?)\]')
resw = re.compile(r'^([\w\s\.\(\-\)\+]*)')
s = Session()
ts = datetime.datetime.now()
for vuln in sc.analysis(('pluginID','=','20811,22869'),
('asset', '=', {'id': str(asset)}),
tool='vulndetails'):
# First we need to get the host information...
nh = False
host = s.query(Host).filter_by(ip=vuln['ip']).first()
if not host:
host = Host()
nh = True
hdata = sc.analysis(('ip', '=', vuln['ip']),tool='sumip')[0]
host.ip = vuln['ip']
host.name = vuln['netbiosName']
host.cpe = hdata['osCPE']
host.dns = hdata['dnsName']
host.asset_id = asset
if nh:
s.add(host)
else:
s.merge(host)
s.commit()
sys.stdout.write('%4d\t%-16s\t%-40s' % (host.id, host.ip, host.dns))
sys.stdout.flush()
if vuln['pluginID'] == '22869':
if 'CentOS Linux system' in vuln['pluginText'] or 'Red Hat Linux system' in vuln['pluginText']:
software = re.findall(' ([a-zA-Z0-9\.\-]*)\|',vuln['pluginText'])
for item in software:
entry = Entry()
entry.name = item
entry.timestamp = ts
entry.host_id = host.id
s.add(entry)
s.commit()
elif 'SunOS 5.10' in vuln['pluginText']:
software = re.findall('Patch: ([^ ]*)', vuln['pluginText'])
for item in software:
entry = Entry()
entry.name = item
entry.timestamp = ts
entry.host_id = host.id
s.add(entry)
s.commit()
elif 'Solaris 11 system' in vuln['pluginText']:
software = re.findall('([\w\/]+)\W+([0-9\.\-]+).*\n',vuln['pluginText'])
for item in software:
entry = Entry()
entry.name = item[0]
entry.version = item[1]
entry.timestamp = ts
entry.host_id = host.id
s.add(entry)
s.commit()
elif 'Mac OS X system' in vuln['pluginText']:
software = re.findall(' ([a-zA-Z0-9\.\-\_]*\.pkg)\n',vuln['pluginText'])
for item in software:
entry = Entry()
entry.name = item
entry.timestamp = ts
entry.host_id = host.id
s.add(entry)
s.commit()
else:
sys.stdout.write('\t[NO FORMATTER]')
sys.stdout.flush()
if vuln['pluginID'] == '20811':
software = False
patches = False
sw = None
nh = False
s.commit()
for line in vuln['pluginText'].split('\n'):
if '</plugin_output>' in line:
continue
if line == u'The following software are installed on the remote host :':
software = True
patches = False
continue
if line == u'The following updates are installed :':
patches = True
continue
if software and line != '':
names = resw.findall(line)
vers = rever.findall(line)
dates = redate.findall(line)
new = Entry()
if len(names) > 0: new.name = names[0].strip()
if len(vers) > 0: new.version = vers[0]
try:
if len(dates) > 0:
date = datetime.date(year=int(dates[0][0]),
month=int(dates[0][1]),
day=int(dates[0][2]))
new.date = date
else:
dates = reinvdate.findall(line)
if len(dates) > 0:
date = datetime.date(year=int(dates[0][2]),
month=int(dates[0][0]),
day=int(dates[0][1]))
new.date = date
except:
pass
if patches:
if line[:2] != ' ':
sw = line.strip(':').strip()
continue
else:
new.name = '%s (%s)' % (new.name, sw)
new.timestamp = ts
new.host_id = host.id
s.add(new)
s.commit()
sys.stdout.write('\tdone\n')
sys.stdout.flush()
s.commit()
# Now to expire the old data out...
exp = datetime.datetime.now() - datetime.timedelta(days=expire)
print exp
# First to delete the aged out entries
for entry in s.query(Entry).filter(Entry.timestamp < exp).all():
s.delete(entry)
s.commit()
# Next to delete any hosts that we arent pulling info for anymore...
for host in s.query(Host).all():
if len(host.entries) == 0:
s.delete(host)
s.commit()
s.close() |
def recognize(
self,
config,
audio,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Performs synchronous speech recognition: receive results after all audio
has been sent and processed.
Example:
>>> from google.cloud import speech_v1p1beta1
>>> from google.cloud.speech_v1p1beta1 import enums
>>>
>>> client = speech_v1p1beta1.SpeechClient()
>>>
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = {'uri': uri}
>>>
>>> response = client.recognize(config, audio)
Args:
config (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionConfig]): *Required* Provides information to the recognizer that specifies how to
process the request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionConfig`
audio (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionAudio]): *Required* The audio data to be recognized.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionAudio`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.speech_v1p1beta1.types.RecognizeResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "recognize" not in self._inner_api_calls:
self._inner_api_calls[
"recognize"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.recognize,
default_retry=self._method_configs["Recognize"].retry,
default_timeout=self._method_configs["Recognize"].timeout,
client_info=self._client_info,
)
request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio)
return self._inner_api_calls["recognize"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Performs synchronous speech recognition: receive results after all audio
has been sent and processed.
Example:
>>> from google.cloud import speech_v1p1beta1
>>> from google.cloud.speech_v1p1beta1 import enums
>>>
>>> client = speech_v1p1beta1.SpeechClient()
>>>
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = {'uri': uri}
>>>
>>> response = client.recognize(config, audio)
Args:
config (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionConfig]): *Required* Provides information to the recognizer that specifies how to
process the request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionConfig`
audio (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionAudio]): *Required* The audio data to be recognized.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionAudio`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.speech_v1p1beta1.types.RecognizeResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Performs synchronous speech recognition: receive results after all audio
has been sent and processed.
Example:
>>> from google.cloud import speech_v1p1beta1
>>> from google.cloud.speech_v1p1beta1 import enums
>>>
>>> client = speech_v1p1beta1.SpeechClient()
>>>
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = {'uri': uri}
>>>
>>> response = client.recognize(config, audio)
Args:
config (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionConfig]): *Required* Provides information to the recognizer that specifies how to
process the request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionConfig`
audio (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionAudio]): *Required* The audio data to be recognized.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionAudio`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.speech_v1p1beta1.types.RecognizeResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def recognize(
self,
config,
audio,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Performs synchronous speech recognition: receive results after all audio
has been sent and processed.
Example:
>>> from google.cloud import speech_v1p1beta1
>>> from google.cloud.speech_v1p1beta1 import enums
>>>
>>> client = speech_v1p1beta1.SpeechClient()
>>>
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = {'uri': uri}
>>>
>>> response = client.recognize(config, audio)
Args:
config (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionConfig]): *Required* Provides information to the recognizer that specifies how to
process the request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionConfig`
audio (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionAudio]): *Required* The audio data to be recognized.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionAudio`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.speech_v1p1beta1.types.RecognizeResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "recognize" not in self._inner_api_calls:
self._inner_api_calls[
"recognize"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.recognize,
default_retry=self._method_configs["Recognize"].retry,
default_timeout=self._method_configs["Recognize"].timeout,
client_info=self._client_info,
)
request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio)
return self._inner_api_calls["recognize"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def get_options(self, section='default', opt_keys=None, vars=None):
"""
Get all options for a section. If ``opt_keys`` is given return
only options with those keys.
"""
vars = vars if vars else self.default_vars
conf = self.parser
opts = {}
if opt_keys is None:
if conf is None:
opt_keys = {}
else:
if not self.robust or conf.has_section(section):
opt_keys = conf.options(section)
else:
opt_keys = {}
else:
logger.debug('conf: %s' % conf)
copts = conf.options(section) if conf else {}
opt_keys = set(opt_keys).intersection(set(copts))
for option in opt_keys:
logger.debug(f'option: {option}, vars: {vars}')
opts[option] = conf.get(section, option, vars=vars)
return opts | Get all options for a section. If ``opt_keys`` is given return
only options with those keys. | Below is the the instruction that describes the task:
### Input:
Get all options for a section. If ``opt_keys`` is given return
only options with those keys.
### Response:
def get_options(self, section='default', opt_keys=None, vars=None):
"""
Get all options for a section. If ``opt_keys`` is given return
only options with those keys.
"""
vars = vars if vars else self.default_vars
conf = self.parser
opts = {}
if opt_keys is None:
if conf is None:
opt_keys = {}
else:
if not self.robust or conf.has_section(section):
opt_keys = conf.options(section)
else:
opt_keys = {}
else:
logger.debug('conf: %s' % conf)
copts = conf.options(section) if conf else {}
opt_keys = set(opt_keys).intersection(set(copts))
for option in opt_keys:
logger.debug(f'option: {option}, vars: {vars}')
opts[option] = conf.get(section, option, vars=vars)
return opts |
def set_executing(on: bool):
"""
Toggle whether or not the current thread is executing a step file. This
will only apply when the current thread is a CauldronThread. This function
has no effect when run on a Main thread.
:param on:
Whether or not the thread should be annotated as executing a step file.
"""
my_thread = threading.current_thread()
if isinstance(my_thread, threads.CauldronThread):
my_thread.is_executing = on | Toggle whether or not the current thread is executing a step file. This
will only apply when the current thread is a CauldronThread. This function
has no effect when run on a Main thread.
:param on:
Whether or not the thread should be annotated as executing a step file. | Below is the the instruction that describes the task:
### Input:
Toggle whether or not the current thread is executing a step file. This
will only apply when the current thread is a CauldronThread. This function
has no effect when run on a Main thread.
:param on:
Whether or not the thread should be annotated as executing a step file.
### Response:
def set_executing(on: bool):
"""
Toggle whether or not the current thread is executing a step file. This
will only apply when the current thread is a CauldronThread. This function
has no effect when run on a Main thread.
:param on:
Whether or not the thread should be annotated as executing a step file.
"""
my_thread = threading.current_thread()
if isinstance(my_thread, threads.CauldronThread):
my_thread.is_executing = on |
def redshift(distance, **kwargs):
r"""Returns the redshift associated with the given luminosity distance.
If the requested cosmology is one of the pre-defined ones in
:py:attr:`astropy.cosmology.parameters.available`, :py:class:`DistToZ` is
used to provide a fast interpolation. This takes a few seconds to setup
on the first call.
Parameters
----------
distance : float
The luminosity distance, in Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift corresponding to the given distance.
"""
cosmology = get_cosmology(**kwargs)
try:
z = _d2zs[cosmology.name](distance)
except KeyError:
# not a standard cosmology, call the redshift function
z = _redshift(distance, cosmology=cosmology)
return z | r"""Returns the redshift associated with the given luminosity distance.
If the requested cosmology is one of the pre-defined ones in
:py:attr:`astropy.cosmology.parameters.available`, :py:class:`DistToZ` is
used to provide a fast interpolation. This takes a few seconds to setup
on the first call.
Parameters
----------
distance : float
The luminosity distance, in Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift corresponding to the given distance. | Below is the the instruction that describes the task:
### Input:
r"""Returns the redshift associated with the given luminosity distance.
If the requested cosmology is one of the pre-defined ones in
:py:attr:`astropy.cosmology.parameters.available`, :py:class:`DistToZ` is
used to provide a fast interpolation. This takes a few seconds to setup
on the first call.
Parameters
----------
distance : float
The luminosity distance, in Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift corresponding to the given distance.
### Response:
def redshift(distance, **kwargs):
r"""Returns the redshift associated with the given luminosity distance.
If the requested cosmology is one of the pre-defined ones in
:py:attr:`astropy.cosmology.parameters.available`, :py:class:`DistToZ` is
used to provide a fast interpolation. This takes a few seconds to setup
on the first call.
Parameters
----------
distance : float
The luminosity distance, in Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift corresponding to the given distance.
"""
cosmology = get_cosmology(**kwargs)
try:
z = _d2zs[cosmology.name](distance)
except KeyError:
# not a standard cosmology, call the redshift function
z = _redshift(distance, cosmology=cosmology)
return z |
def ListMappedNetworkDrives():
'''
On Windows, returns a list of mapped network drives
:return: tuple(string, string, bool)
For each mapped netword drive, return 3 values tuple:
- the local drive
- the remote path-
- True if the mapping is enabled (warning: not reliable)
'''
if sys.platform != 'win32':
raise NotImplementedError
drives_list = []
netuse = _CallWindowsNetCommand(['use'])
for line in netuse.split(EOL_STYLE_WINDOWS):
match = re.match("(\w*)\s+(\w:)\s+(.+)", line.rstrip())
if match:
drives_list.append((match.group(2), match.group(3), match.group(1) == 'OK'))
return drives_list | On Windows, returns a list of mapped network drives
:return: tuple(string, string, bool)
For each mapped netword drive, return 3 values tuple:
- the local drive
- the remote path-
- True if the mapping is enabled (warning: not reliable) | Below is the the instruction that describes the task:
### Input:
On Windows, returns a list of mapped network drives
:return: tuple(string, string, bool)
For each mapped netword drive, return 3 values tuple:
- the local drive
- the remote path-
- True if the mapping is enabled (warning: not reliable)
### Response:
def ListMappedNetworkDrives():
'''
On Windows, returns a list of mapped network drives
:return: tuple(string, string, bool)
For each mapped netword drive, return 3 values tuple:
- the local drive
- the remote path-
- True if the mapping is enabled (warning: not reliable)
'''
if sys.platform != 'win32':
raise NotImplementedError
drives_list = []
netuse = _CallWindowsNetCommand(['use'])
for line in netuse.split(EOL_STYLE_WINDOWS):
match = re.match("(\w*)\s+(\w:)\s+(.+)", line.rstrip())
if match:
drives_list.append((match.group(2), match.group(3), match.group(1) == 'OK'))
return drives_list |
def birthdays_subcommand(vcard_list, parsable):
"""Print birthday contact table.
:param vcard_list: the vcards to search for matching entries which should
be printed
:type vcard_list: list of carddav_object.CarddavObject
:param parsable: machine readable output: columns devided by tabulator (\t)
:type parsable: bool
:returns: None
:rtype: None
"""
# filter out contacts without a birthday date
vcard_list = [
vcard for vcard in vcard_list if vcard.get_birthday() is not None]
# sort by date (month and day)
# The sort function should work for strings and datetime objects. All
# strings will besorted before any datetime objects.
vcard_list.sort(
key=lambda x: (x.get_birthday().month, x.get_birthday().day)
if isinstance(x.get_birthday(), datetime.datetime)
else (0, 0, x.get_birthday()))
# add to string list
birthday_list = []
for vcard in vcard_list:
date = vcard.get_birthday()
if parsable:
if config.display_by_name() == "first_name":
birthday_list.append("%04d.%02d.%02d\t%s"
% (date.year, date.month, date.day,
vcard.get_first_name_last_name()))
else:
birthday_list.append("%04d.%02d.%02d\t%s"
% (date.year, date.month, date.day,
vcard.get_last_name_first_name()))
else:
if config.display_by_name() == "first_name":
birthday_list.append("%s\t%s"
% (vcard.get_first_name_last_name(),
vcard.get_formatted_birthday()))
else:
birthday_list.append("%s\t%s"
% (vcard.get_last_name_first_name(),
vcard.get_formatted_birthday()))
if birthday_list:
if parsable:
print('\n'.join(birthday_list))
else:
list_birthdays(birthday_list)
else:
if not parsable:
print("Found no birthdays")
sys.exit(1) | Print birthday contact table.
:param vcard_list: the vcards to search for matching entries which should
be printed
:type vcard_list: list of carddav_object.CarddavObject
:param parsable: machine readable output: columns devided by tabulator (\t)
:type parsable: bool
:returns: None
:rtype: None | Below is the the instruction that describes the task:
### Input:
Print birthday contact table.
:param vcard_list: the vcards to search for matching entries which should
be printed
:type vcard_list: list of carddav_object.CarddavObject
:param parsable: machine readable output: columns devided by tabulator (\t)
:type parsable: bool
:returns: None
:rtype: None
### Response:
def birthdays_subcommand(vcard_list, parsable):
"""Print birthday contact table.
:param vcard_list: the vcards to search for matching entries which should
be printed
:type vcard_list: list of carddav_object.CarddavObject
:param parsable: machine readable output: columns devided by tabulator (\t)
:type parsable: bool
:returns: None
:rtype: None
"""
# filter out contacts without a birthday date
vcard_list = [
vcard for vcard in vcard_list if vcard.get_birthday() is not None]
# sort by date (month and day)
# The sort function should work for strings and datetime objects. All
# strings will besorted before any datetime objects.
vcard_list.sort(
key=lambda x: (x.get_birthday().month, x.get_birthday().day)
if isinstance(x.get_birthday(), datetime.datetime)
else (0, 0, x.get_birthday()))
# add to string list
birthday_list = []
for vcard in vcard_list:
date = vcard.get_birthday()
if parsable:
if config.display_by_name() == "first_name":
birthday_list.append("%04d.%02d.%02d\t%s"
% (date.year, date.month, date.day,
vcard.get_first_name_last_name()))
else:
birthday_list.append("%04d.%02d.%02d\t%s"
% (date.year, date.month, date.day,
vcard.get_last_name_first_name()))
else:
if config.display_by_name() == "first_name":
birthday_list.append("%s\t%s"
% (vcard.get_first_name_last_name(),
vcard.get_formatted_birthday()))
else:
birthday_list.append("%s\t%s"
% (vcard.get_last_name_first_name(),
vcard.get_formatted_birthday()))
if birthday_list:
if parsable:
print('\n'.join(birthday_list))
else:
list_birthdays(birthday_list)
else:
if not parsable:
print("Found no birthdays")
sys.exit(1) |
def up(self):
"""Moves the layer up in the stacking order.
"""
i = self.index()
if i != None:
del self.canvas.layers[i]
i = min(len(self.canvas.layers), i+1)
self.canvas.layers.insert(i, self) | Moves the layer up in the stacking order. | Below is the the instruction that describes the task:
### Input:
Moves the layer up in the stacking order.
### Response:
def up(self):
"""Moves the layer up in the stacking order.
"""
i = self.index()
if i != None:
del self.canvas.layers[i]
i = min(len(self.canvas.layers), i+1)
self.canvas.layers.insert(i, self) |
def _get_license_description(license_code):
""" Gets the body for a license based on a license code """
req = requests.get("{base_url}/licenses/{license_code}".format(
base_url=BASE_URL, license_code=license_code), headers=_HEADERS)
if req.status_code == requests.codes.ok:
s = req.json()["body"]
search_curly = re.search(r'\{(.*)\}', s)
search_square = re.search(r'\[(.*)\]', s)
license = ""
replace_string = '{year} {name}'.format(year=date.today().year,
name=_get_config_name())
if search_curly:
license = re.sub(r'\{(.+)\}', replace_string, s)
elif search_square:
license = re.sub(r'\[(.+)\]', replace_string, s)
else:
license = s
return license
else:
print(Fore.RED + 'No such license. Please check again.'),
print(Style.RESET_ALL),
sys.exit() | Gets the body for a license based on a license code | Below is the the instruction that describes the task:
### Input:
Gets the body for a license based on a license code
### Response:
def _get_license_description(license_code):
""" Gets the body for a license based on a license code """
req = requests.get("{base_url}/licenses/{license_code}".format(
base_url=BASE_URL, license_code=license_code), headers=_HEADERS)
if req.status_code == requests.codes.ok:
s = req.json()["body"]
search_curly = re.search(r'\{(.*)\}', s)
search_square = re.search(r'\[(.*)\]', s)
license = ""
replace_string = '{year} {name}'.format(year=date.today().year,
name=_get_config_name())
if search_curly:
license = re.sub(r'\{(.+)\}', replace_string, s)
elif search_square:
license = re.sub(r'\[(.+)\]', replace_string, s)
else:
license = s
return license
else:
print(Fore.RED + 'No such license. Please check again.'),
print(Style.RESET_ALL),
sys.exit() |
def safe_split_text(text: str, length: int = MAX_MESSAGE_LENGTH) -> typing.List[str]:
"""
Split long text
:param text:
:param length:
:return:
"""
# TODO: More informative description
temp_text = text
parts = []
while temp_text:
if len(temp_text) > length:
try:
split_pos = temp_text[:length].rindex(' ')
except ValueError:
split_pos = length
if split_pos < length // 4 * 3:
split_pos = length
parts.append(temp_text[:split_pos])
temp_text = temp_text[split_pos:].lstrip()
else:
parts.append(temp_text)
break
return parts | Split long text
:param text:
:param length:
:return: | Below is the the instruction that describes the task:
### Input:
Split long text
:param text:
:param length:
:return:
### Response:
def safe_split_text(text: str, length: int = MAX_MESSAGE_LENGTH) -> typing.List[str]:
"""
Split long text
:param text:
:param length:
:return:
"""
# TODO: More informative description
temp_text = text
parts = []
while temp_text:
if len(temp_text) > length:
try:
split_pos = temp_text[:length].rindex(' ')
except ValueError:
split_pos = length
if split_pos < length // 4 * 3:
split_pos = length
parts.append(temp_text[:split_pos])
temp_text = temp_text[split_pos:].lstrip()
else:
parts.append(temp_text)
break
return parts |
def iter_get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
raw=False,
):
""" iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False`
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
data = self._load_data(grp)
for fragment in data:
yield self.get(
group=gp_nr,
index=ch_nr,
raster=raster,
samples_only=samples_only,
data=fragment,
raw=raw,
) | iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False` | Below is the the instruction that describes the task:
### Input:
iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False`
### Response:
def iter_get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
raw=False,
):
""" iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False`
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
data = self._load_data(grp)
for fragment in data:
yield self.get(
group=gp_nr,
index=ch_nr,
raster=raster,
samples_only=samples_only,
data=fragment,
raw=raw,
) |
def enabled(name):
'''
Ensure an Apache conf is enabled.
name
Name of the Apache conf
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
is_enabled = __salt__['apache.check_conf_enabled'](name)
if not is_enabled:
if __opts__['test']:
msg = 'Apache conf {0} is set to be enabled.'.format(name)
ret['comment'] = msg
ret['changes']['old'] = None
ret['changes']['new'] = name
ret['result'] = None
return ret
status = __salt__['apache.a2enconf'](name)['Status']
if isinstance(status, six.string_types) and 'enabled' in status:
ret['result'] = True
ret['changes']['old'] = None
ret['changes']['new'] = name
else:
ret['result'] = False
ret['comment'] = 'Failed to enable {0} Apache conf'.format(name)
if isinstance(status, six.string_types):
ret['comment'] = ret['comment'] + ' ({0})'.format(status)
return ret
else:
ret['comment'] = '{0} already enabled.'.format(name)
return ret | Ensure an Apache conf is enabled.
name
Name of the Apache conf | Below is the the instruction that describes the task:
### Input:
Ensure an Apache conf is enabled.
name
Name of the Apache conf
### Response:
def enabled(name):
'''
Ensure an Apache conf is enabled.
name
Name of the Apache conf
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
is_enabled = __salt__['apache.check_conf_enabled'](name)
if not is_enabled:
if __opts__['test']:
msg = 'Apache conf {0} is set to be enabled.'.format(name)
ret['comment'] = msg
ret['changes']['old'] = None
ret['changes']['new'] = name
ret['result'] = None
return ret
status = __salt__['apache.a2enconf'](name)['Status']
if isinstance(status, six.string_types) and 'enabled' in status:
ret['result'] = True
ret['changes']['old'] = None
ret['changes']['new'] = name
else:
ret['result'] = False
ret['comment'] = 'Failed to enable {0} Apache conf'.format(name)
if isinstance(status, six.string_types):
ret['comment'] = ret['comment'] + ' ({0})'.format(status)
return ret
else:
ret['comment'] = '{0} already enabled.'.format(name)
return ret |
def select_graphic_rendition(self, *attrs):
"""Set display attributes.
:param list attrs: a list of display attributes to set.
"""
replace = {}
# Fast path for resetting all attributes.
if not attrs or attrs == (0, ):
self.cursor.attrs = self.default_char
return
else:
attrs = list(reversed(attrs))
while attrs:
attr = attrs.pop()
if attr == 0:
# Reset all attributes.
replace.update(self.default_char._asdict())
elif attr in g.FG_ANSI:
replace["fg"] = g.FG_ANSI[attr]
elif attr in g.BG:
replace["bg"] = g.BG_ANSI[attr]
elif attr in g.TEXT:
attr = g.TEXT[attr]
replace[attr[1:]] = attr.startswith("+")
elif attr in g.FG_AIXTERM:
replace.update(fg=g.FG_AIXTERM[attr], bold=True)
elif attr in g.BG_AIXTERM:
replace.update(bg=g.BG_AIXTERM[attr], bold=True)
elif attr in (g.FG_256, g.BG_256):
key = "fg" if attr == g.FG_256 else "bg"
try:
n = attrs.pop()
if n == 5: # 256.
m = attrs.pop()
replace[key] = g.FG_BG_256[m]
elif n == 2: # 24bit.
# This is somewhat non-standard but is nonetheless
# supported in quite a few terminals. See discussion
# here https://gist.github.com/XVilka/8346728.
replace[key] = "{0:02x}{1:02x}{2:02x}".format(
attrs.pop(), attrs.pop(), attrs.pop())
except IndexError:
pass
self.cursor.attrs = self.cursor.attrs._replace(**replace) | Set display attributes.
:param list attrs: a list of display attributes to set. | Below is the the instruction that describes the task:
### Input:
Set display attributes.
:param list attrs: a list of display attributes to set.
### Response:
def select_graphic_rendition(self, *attrs):
"""Set display attributes.
:param list attrs: a list of display attributes to set.
"""
replace = {}
# Fast path for resetting all attributes.
if not attrs or attrs == (0, ):
self.cursor.attrs = self.default_char
return
else:
attrs = list(reversed(attrs))
while attrs:
attr = attrs.pop()
if attr == 0:
# Reset all attributes.
replace.update(self.default_char._asdict())
elif attr in g.FG_ANSI:
replace["fg"] = g.FG_ANSI[attr]
elif attr in g.BG:
replace["bg"] = g.BG_ANSI[attr]
elif attr in g.TEXT:
attr = g.TEXT[attr]
replace[attr[1:]] = attr.startswith("+")
elif attr in g.FG_AIXTERM:
replace.update(fg=g.FG_AIXTERM[attr], bold=True)
elif attr in g.BG_AIXTERM:
replace.update(bg=g.BG_AIXTERM[attr], bold=True)
elif attr in (g.FG_256, g.BG_256):
key = "fg" if attr == g.FG_256 else "bg"
try:
n = attrs.pop()
if n == 5: # 256.
m = attrs.pop()
replace[key] = g.FG_BG_256[m]
elif n == 2: # 24bit.
# This is somewhat non-standard but is nonetheless
# supported in quite a few terminals. See discussion
# here https://gist.github.com/XVilka/8346728.
replace[key] = "{0:02x}{1:02x}{2:02x}".format(
attrs.pop(), attrs.pop(), attrs.pop())
except IndexError:
pass
self.cursor.attrs = self.cursor.attrs._replace(**replace) |
def relu(inplace:bool=False, leaky:float=None):
"Return a relu activation, maybe `leaky` and `inplace`."
return nn.LeakyReLU(inplace=inplace, negative_slope=leaky) if leaky is not None else nn.ReLU(inplace=inplace) | Return a relu activation, maybe `leaky` and `inplace`. | Below is the the instruction that describes the task:
### Input:
Return a relu activation, maybe `leaky` and `inplace`.
### Response:
def relu(inplace:bool=False, leaky:float=None):
"Return a relu activation, maybe `leaky` and `inplace`."
return nn.LeakyReLU(inplace=inplace, negative_slope=leaky) if leaky is not None else nn.ReLU(inplace=inplace) |
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'') | Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading. | Below is the the instruction that describes the task:
### Input:
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
### Response:
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'') |
def write_byte_data(self, address, register, value):
"""
SMBus Read Byte: i2c_smbus_read_byte_data()
============================================
This reads a single byte from a device, from a designated register.
The register is specified through the Comm byte.
S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
"""
return self.smbus.write_byte_data(address, register, value) | SMBus Read Byte: i2c_smbus_read_byte_data()
============================================
This reads a single byte from a device, from a designated register.
The register is specified through the Comm byte.
S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA | Below is the the instruction that describes the task:
### Input:
SMBus Read Byte: i2c_smbus_read_byte_data()
============================================
This reads a single byte from a device, from a designated register.
The register is specified through the Comm byte.
S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
### Response:
def write_byte_data(self, address, register, value):
"""
SMBus Read Byte: i2c_smbus_read_byte_data()
============================================
This reads a single byte from a device, from a designated register.
The register is specified through the Comm byte.
S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
"""
return self.smbus.write_byte_data(address, register, value) |
def read_scans(sdmfile, bdfdir=''):
""" Use sdmpy to get all scans and info needed for rtpipe as dict """
sdm = getsdm(sdmfile, bdfdir=bdfdir)
scandict = {}
skippedscans = []
for scan in sdm.scans():
scannum = int(scan.idx)
scandict[scannum] = {}
intentstr = ' '.join(scan.intents)
src = scan.source
scandict[scannum]['source'] = src
scandict[scannum]['intent'] = intentstr
# bdf specific properties
try:
startmjd = scan.bdf.startTime
nints = scan.bdf.numIntegration
interval = scan.bdf.get_integration(0).interval
endmjd = startmjd + (nints*interval)/(24*3600)
bdfstr = scan.bdf.fname
except AttributeError, IOError:
skippedscans.append(scannum)
else:
scandict[scannum]['startmjd'] = startmjd
scandict[scannum]['endmjd'] = endmjd
scandict[scannum]['duration'] = endmjd-startmjd
scandict[scannum]['nints'] = nints
scandict[scannum]['bdfstr'] = bdfstr
# clear reference to nonexistent BDFs (either bad or not in standard locations)
if (not os.path.exists(scandict[scannum]['bdfstr'])) or ('X1' in bdfstr):
scandict[scannum]['bdfstr'] = None
logger.debug('Invalid bdf for %d of %s' % (scannum, sdmfile) )
if skippedscans:
logger.warn('No BDF found for scans {0}'.format(skippedscans))
return scandict | Use sdmpy to get all scans and info needed for rtpipe as dict | Below is the the instruction that describes the task:
### Input:
Use sdmpy to get all scans and info needed for rtpipe as dict
### Response:
def read_scans(sdmfile, bdfdir=''):
""" Use sdmpy to get all scans and info needed for rtpipe as dict """
sdm = getsdm(sdmfile, bdfdir=bdfdir)
scandict = {}
skippedscans = []
for scan in sdm.scans():
scannum = int(scan.idx)
scandict[scannum] = {}
intentstr = ' '.join(scan.intents)
src = scan.source
scandict[scannum]['source'] = src
scandict[scannum]['intent'] = intentstr
# bdf specific properties
try:
startmjd = scan.bdf.startTime
nints = scan.bdf.numIntegration
interval = scan.bdf.get_integration(0).interval
endmjd = startmjd + (nints*interval)/(24*3600)
bdfstr = scan.bdf.fname
except AttributeError, IOError:
skippedscans.append(scannum)
else:
scandict[scannum]['startmjd'] = startmjd
scandict[scannum]['endmjd'] = endmjd
scandict[scannum]['duration'] = endmjd-startmjd
scandict[scannum]['nints'] = nints
scandict[scannum]['bdfstr'] = bdfstr
# clear reference to nonexistent BDFs (either bad or not in standard locations)
if (not os.path.exists(scandict[scannum]['bdfstr'])) or ('X1' in bdfstr):
scandict[scannum]['bdfstr'] = None
logger.debug('Invalid bdf for %d of %s' % (scannum, sdmfile) )
if skippedscans:
logger.warn('No BDF found for scans {0}'.format(skippedscans))
return scandict |
def figure(
key=None,
width=400,
height=500,
lighting=True,
controls=True,
controls_vr=False,
controls_light=False,
debug=False,
**kwargs
):
"""Create a new figure if no key is given, or return the figure associated with key.
:param key: Python object that identifies this figure
:param int width: pixel width of WebGL canvas
:param int height: .. height ..
:param bool lighting: use lighting or not
:param bool controls: show controls or not
:param bool controls_vr: show controls for VR or not
:param bool debug: show debug buttons or not
:return: :any:`Figure`
"""
if key is not None and key in current.figures:
current.figure = current.figures[key]
current.container = current.containers[key]
elif isinstance(key, ipv.Figure) and key in current.figures.values():
key_index = list(current.figures.values()).index(key)
key = list(current.figures.keys())[key_index]
current.figure = current.figures[key]
current.container = current.containers[key]
else:
current.figure = ipv.Figure(width=width, height=height, **kwargs)
current.container = ipywidgets.VBox()
current.container.children = [current.figure]
if key is None:
key = uuid.uuid4().hex
current.figures[key] = current.figure
current.containers[key] = current.container
if controls:
# stereo = ipywidgets.ToggleButton(value=current.figure.stereo, description='stereo', icon='eye')
# l1 = ipywidgets.jslink((current.figure, 'stereo'), (stereo, 'value'))
# current.container.children += (ipywidgets.HBox([stereo, ]),)
pass # stereo and fullscreen are now include in the js code (per view)
if controls_vr:
eye_separation = ipywidgets.FloatSlider(value=current.figure.eye_separation, min=-10, max=10, icon='eye')
ipywidgets.jslink((eye_separation, 'value'), (current.figure, 'eye_separation'))
current.container.children += (eye_separation,)
if controls_light:
globals()['controls_light']()
if debug:
show = ipywidgets.ToggleButtons(options=["Volume", "Back", "Front", "Coordinate"])
current.container.children += (show,)
# ipywidgets.jslink((current.figure, 'show'), (show, 'value'))
traitlets.link((current.figure, 'show'), (show, 'value'))
return current.figure | Create a new figure if no key is given, or return the figure associated with key.
:param key: Python object that identifies this figure
:param int width: pixel width of WebGL canvas
:param int height: .. height ..
:param bool lighting: use lighting or not
:param bool controls: show controls or not
:param bool controls_vr: show controls for VR or not
:param bool debug: show debug buttons or not
:return: :any:`Figure` | Below is the the instruction that describes the task:
### Input:
Create a new figure if no key is given, or return the figure associated with key.
:param key: Python object that identifies this figure
:param int width: pixel width of WebGL canvas
:param int height: .. height ..
:param bool lighting: use lighting or not
:param bool controls: show controls or not
:param bool controls_vr: show controls for VR or not
:param bool debug: show debug buttons or not
:return: :any:`Figure`
### Response:
def figure(
key=None,
width=400,
height=500,
lighting=True,
controls=True,
controls_vr=False,
controls_light=False,
debug=False,
**kwargs
):
"""Create a new figure if no key is given, or return the figure associated with key.
:param key: Python object that identifies this figure
:param int width: pixel width of WebGL canvas
:param int height: .. height ..
:param bool lighting: use lighting or not
:param bool controls: show controls or not
:param bool controls_vr: show controls for VR or not
:param bool debug: show debug buttons or not
:return: :any:`Figure`
"""
if key is not None and key in current.figures:
current.figure = current.figures[key]
current.container = current.containers[key]
elif isinstance(key, ipv.Figure) and key in current.figures.values():
key_index = list(current.figures.values()).index(key)
key = list(current.figures.keys())[key_index]
current.figure = current.figures[key]
current.container = current.containers[key]
else:
current.figure = ipv.Figure(width=width, height=height, **kwargs)
current.container = ipywidgets.VBox()
current.container.children = [current.figure]
if key is None:
key = uuid.uuid4().hex
current.figures[key] = current.figure
current.containers[key] = current.container
if controls:
# stereo = ipywidgets.ToggleButton(value=current.figure.stereo, description='stereo', icon='eye')
# l1 = ipywidgets.jslink((current.figure, 'stereo'), (stereo, 'value'))
# current.container.children += (ipywidgets.HBox([stereo, ]),)
pass # stereo and fullscreen are now include in the js code (per view)
if controls_vr:
eye_separation = ipywidgets.FloatSlider(value=current.figure.eye_separation, min=-10, max=10, icon='eye')
ipywidgets.jslink((eye_separation, 'value'), (current.figure, 'eye_separation'))
current.container.children += (eye_separation,)
if controls_light:
globals()['controls_light']()
if debug:
show = ipywidgets.ToggleButtons(options=["Volume", "Back", "Front", "Coordinate"])
current.container.children += (show,)
# ipywidgets.jslink((current.figure, 'show'), (show, 'value'))
traitlets.link((current.figure, 'show'), (show, 'value'))
return current.figure |
def execute(self, offset=0, **query):
"""
Executes a query. Additional query parameters can be passed
as keyword arguments.
Returns: The request parameters and the raw query response.
"""
_params = self._build_query(**query)
self._page_offset = offset
_params.update(
offset=self._page_offset,
limit=min(self._page_size, self._limit)
)
logger.debug('executing query. from/limit: %6d/%d' %
(_params['offset'], _params['limit']))
# If the request results in a SolveError (ie bad filter) set the error.
try:
self._response = self._client.post(self._data_url, _params)
except SolveError as e:
self._error = e
raise
logger.debug('query response took: %(took)d ms, total: %(total)d'
% self._response)
return _params, self._response | Executes a query. Additional query parameters can be passed
as keyword arguments.
Returns: The request parameters and the raw query response. | Below is the the instruction that describes the task:
### Input:
Executes a query. Additional query parameters can be passed
as keyword arguments.
Returns: The request parameters and the raw query response.
### Response:
def execute(self, offset=0, **query):
"""
Executes a query. Additional query parameters can be passed
as keyword arguments.
Returns: The request parameters and the raw query response.
"""
_params = self._build_query(**query)
self._page_offset = offset
_params.update(
offset=self._page_offset,
limit=min(self._page_size, self._limit)
)
logger.debug('executing query. from/limit: %6d/%d' %
(_params['offset'], _params['limit']))
# If the request results in a SolveError (ie bad filter) set the error.
try:
self._response = self._client.post(self._data_url, _params)
except SolveError as e:
self._error = e
raise
logger.debug('query response took: %(took)d ms, total: %(total)d'
% self._response)
return _params, self._response |
def pokeStorable(self, storable, objname, obj, container, visited=None, _stack=None, **kwargs):
"""
Arguments:
storable (StorableHandler): storable instance.
objname (any): record reference.
obj (any): object to be serialized.
container (any): container.
visited (dict): map of the previously serialized objects that are
passed by references; keys are the objects' IDs.
_stack (CallStack): stack of parent object names.
Trailing keyword arguments are passed to the :class:`Storable` instance's
:attr:`~Storable.poke`.
"""
#print((objname, storable.storable_type)) # debug
storable.poke(self, objname, obj, container, visited=visited, _stack=_stack, **kwargs)
try:
record = self.getRecord(objname, container)
except KeyError:
# fake storable; silently skip
if self.verbose:
print("skipping `{}` (type: {})".format(objname, storable.storable_type))
if 1 < self.verbose:
print(traceback.format_exc())
else:
self.setRecordAttr('type', storable.storable_type, record)
if storable.version is not None:
self.setRecordAttr('version', from_version(storable.version), record) | Arguments:
storable (StorableHandler): storable instance.
objname (any): record reference.
obj (any): object to be serialized.
container (any): container.
visited (dict): map of the previously serialized objects that are
passed by references; keys are the objects' IDs.
_stack (CallStack): stack of parent object names.
Trailing keyword arguments are passed to the :class:`Storable` instance's
:attr:`~Storable.poke`. | Below is the the instruction that describes the task:
### Input:
Arguments:
storable (StorableHandler): storable instance.
objname (any): record reference.
obj (any): object to be serialized.
container (any): container.
visited (dict): map of the previously serialized objects that are
passed by references; keys are the objects' IDs.
_stack (CallStack): stack of parent object names.
Trailing keyword arguments are passed to the :class:`Storable` instance's
:attr:`~Storable.poke`.
### Response:
def pokeStorable(self, storable, objname, obj, container, visited=None, _stack=None, **kwargs):
"""
Arguments:
storable (StorableHandler): storable instance.
objname (any): record reference.
obj (any): object to be serialized.
container (any): container.
visited (dict): map of the previously serialized objects that are
passed by references; keys are the objects' IDs.
_stack (CallStack): stack of parent object names.
Trailing keyword arguments are passed to the :class:`Storable` instance's
:attr:`~Storable.poke`.
"""
#print((objname, storable.storable_type)) # debug
storable.poke(self, objname, obj, container, visited=visited, _stack=_stack, **kwargs)
try:
record = self.getRecord(objname, container)
except KeyError:
# fake storable; silently skip
if self.verbose:
print("skipping `{}` (type: {})".format(objname, storable.storable_type))
if 1 < self.verbose:
print(traceback.format_exc())
else:
self.setRecordAttr('type', storable.storable_type, record)
if storable.version is not None:
self.setRecordAttr('version', from_version(storable.version), record) |
def SLIT_DIFFRACTION(x,g):
"""
Instrumental (slit) function.
"""
y = zeros(len(x))
index_zero = x==0
index_nonzero = ~index_zero
dk_ = pi/g
x_ = dk_*x[index_nonzero]
w_ = sin(x_)
r_ = w_**2/x_**2
y[index_zero] = 1
y[index_nonzero] = r_/g
return y | Instrumental (slit) function. | Below is the the instruction that describes the task:
### Input:
Instrumental (slit) function.
### Response:
def SLIT_DIFFRACTION(x,g):
"""
Instrumental (slit) function.
"""
y = zeros(len(x))
index_zero = x==0
index_nonzero = ~index_zero
dk_ = pi/g
x_ = dk_*x[index_nonzero]
w_ = sin(x_)
r_ = w_**2/x_**2
y[index_zero] = 1
y[index_nonzero] = r_/g
return y |
def mounts():
'''
Return a list of current MooseFS mounts
CLI Example:
.. code-block:: bash
salt '*' moosefs.mounts
'''
cmd = 'mount'
ret = {}
out = __salt__['cmd.run_all'](cmd)
output = out['stdout'].splitlines()
for line in output:
if not line:
continue
if 'fuse.mfs' in line:
comps = line.split(' ')
info1 = comps[0].split(':')
info2 = info1[1].split('/')
ret[comps[2]] = {
'remote': {
'master': info1[0],
'port': info2[0],
'subfolder': '/' + info2[1],
},
'local': comps[2],
'options': (comps[5].replace('(', '').replace(')', '')
.split(',')),
}
return ret | Return a list of current MooseFS mounts
CLI Example:
.. code-block:: bash
salt '*' moosefs.mounts | Below is the the instruction that describes the task:
### Input:
Return a list of current MooseFS mounts
CLI Example:
.. code-block:: bash
salt '*' moosefs.mounts
### Response:
def mounts():
'''
Return a list of current MooseFS mounts
CLI Example:
.. code-block:: bash
salt '*' moosefs.mounts
'''
cmd = 'mount'
ret = {}
out = __salt__['cmd.run_all'](cmd)
output = out['stdout'].splitlines()
for line in output:
if not line:
continue
if 'fuse.mfs' in line:
comps = line.split(' ')
info1 = comps[0].split(':')
info2 = info1[1].split('/')
ret[comps[2]] = {
'remote': {
'master': info1[0],
'port': info2[0],
'subfolder': '/' + info2[1],
},
'local': comps[2],
'options': (comps[5].replace('(', '').replace(')', '')
.split(',')),
}
return ret |
def between(self, time_):
"""
Compare if the parameter HH:MM is in the time range.
"""
hour = int(time_[0:2])
minute = int(time_[3:5])
return not (
hour < self.h1 or hour > self.h2 or
(hour == self.h1 and minute < self.m1) or
(hour == self.h2 and minute > self.m2)
) | Compare if the parameter HH:MM is in the time range. | Below is the the instruction that describes the task:
### Input:
Compare if the parameter HH:MM is in the time range.
### Response:
def between(self, time_):
"""
Compare if the parameter HH:MM is in the time range.
"""
hour = int(time_[0:2])
minute = int(time_[3:5])
return not (
hour < self.h1 or hour > self.h2 or
(hour == self.h1 and minute < self.m1) or
(hour == self.h2 and minute > self.m2)
) |
def export_agg_losses_ebr(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
if 'ruptures' not in dstore:
logging.warning('There are no ruptures in the datastore')
return []
name, ext = export.keyfunc(ekey)
agg_losses = dstore['losses_by_event']
has_rup_data = 'ruptures' in dstore
extra_list = [('magnitude', F32),
('centroid_lon', F32),
('centroid_lat', F32),
('centroid_depth', F32)] if has_rup_data else []
oq = dstore['oqparam']
lti = oq.lti
dtlist = ([('event_id', U64), ('rup_id', U32), ('year', U32)]
+ extra_list + oq.loss_dt_list())
elt_dt = numpy.dtype(dtlist)
elt = numpy.zeros(len(agg_losses), elt_dt)
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
events = dstore['events'].value
events_by_rupid = collections.defaultdict(list)
for event in events:
rupid = event['eid'] // TWO32
events_by_rupid[rupid].append(event)
year_of = year_dict(events['eid'], oq.investigation_time, oq.ses_seed)
rup_data = {}
event_by_eid = {} # eid -> event
# populate rup_data and event_by_eid
# TODO: avoid reading the events twice
for rgetter in getters.gen_rupture_getters(dstore):
ruptures = rgetter.get_ruptures()
for ebr in ruptures:
for event in events_by_rupid[ebr.serial]:
event_by_eid[event['eid']] = event
if has_rup_data:
rup_data.update(get_rup_data(ruptures))
for r, row in enumerate(agg_losses):
rec = elt[r]
event = event_by_eid[row['eid']]
rec['event_id'] = eid = event['eid']
rec['year'] = year_of[eid]
if rup_data:
rec['rup_id'] = rup_id = event['eid'] // TWO32
(rec['magnitude'], rec['centroid_lon'], rec['centroid_lat'],
rec['centroid_depth']) = rup_data[rup_id]
for lt, i in lti.items():
rec[lt] = row['loss'][i]
elt.sort(order=['year', 'event_id'])
dest = dstore.build_fname('elt', '', 'csv')
writer.save(elt, dest)
return writer.getsaved() | :param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object | Below is the the instruction that describes the task:
### Input:
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
### Response:
def export_agg_losses_ebr(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
if 'ruptures' not in dstore:
logging.warning('There are no ruptures in the datastore')
return []
name, ext = export.keyfunc(ekey)
agg_losses = dstore['losses_by_event']
has_rup_data = 'ruptures' in dstore
extra_list = [('magnitude', F32),
('centroid_lon', F32),
('centroid_lat', F32),
('centroid_depth', F32)] if has_rup_data else []
oq = dstore['oqparam']
lti = oq.lti
dtlist = ([('event_id', U64), ('rup_id', U32), ('year', U32)]
+ extra_list + oq.loss_dt_list())
elt_dt = numpy.dtype(dtlist)
elt = numpy.zeros(len(agg_losses), elt_dt)
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
events = dstore['events'].value
events_by_rupid = collections.defaultdict(list)
for event in events:
rupid = event['eid'] // TWO32
events_by_rupid[rupid].append(event)
year_of = year_dict(events['eid'], oq.investigation_time, oq.ses_seed)
rup_data = {}
event_by_eid = {} # eid -> event
# populate rup_data and event_by_eid
# TODO: avoid reading the events twice
for rgetter in getters.gen_rupture_getters(dstore):
ruptures = rgetter.get_ruptures()
for ebr in ruptures:
for event in events_by_rupid[ebr.serial]:
event_by_eid[event['eid']] = event
if has_rup_data:
rup_data.update(get_rup_data(ruptures))
for r, row in enumerate(agg_losses):
rec = elt[r]
event = event_by_eid[row['eid']]
rec['event_id'] = eid = event['eid']
rec['year'] = year_of[eid]
if rup_data:
rec['rup_id'] = rup_id = event['eid'] // TWO32
(rec['magnitude'], rec['centroid_lon'], rec['centroid_lat'],
rec['centroid_depth']) = rup_data[rup_id]
for lt, i in lti.items():
rec[lt] = row['loss'][i]
elt.sort(order=['year', 'event_id'])
dest = dstore.build_fname('elt', '', 'csv')
writer.save(elt, dest)
return writer.getsaved() |
def close(self):
"""Stop monitoring and close connection."""
_LOGGER.debug("Closing...")
self.closed = True
if self.connected:
self._writer.close() | Stop monitoring and close connection. | Below is the the instruction that describes the task:
### Input:
Stop monitoring and close connection.
### Response:
def close(self):
"""Stop monitoring and close connection."""
_LOGGER.debug("Closing...")
self.closed = True
if self.connected:
self._writer.close() |
def delete(self, url_path, data=None):
"""Delete an object from the JSS.
In general, it is better to use a higher level interface for
deleting objects, namely, using a JSSObject's delete method.
Args:
url_path: String API endpoint path to DEL, with ID (e.g.
"/packages/id/<object ID>")
Raises:
JSSDeleteError if provided url_path has a >= 400 response.
"""
request_url = "%s%s" % (self._url, url_path)
if data:
response = self.session.delete(request_url, data=data)
else:
response = self.session.delete(request_url)
if response.status_code == 200 and self.verbose:
print "DEL %s: Success." % request_url
elif response.status_code >= 400:
error_handler(JSSDeleteError, response) | Delete an object from the JSS.
In general, it is better to use a higher level interface for
deleting objects, namely, using a JSSObject's delete method.
Args:
url_path: String API endpoint path to DEL, with ID (e.g.
"/packages/id/<object ID>")
Raises:
JSSDeleteError if provided url_path has a >= 400 response. | Below is the the instruction that describes the task:
### Input:
Delete an object from the JSS.
In general, it is better to use a higher level interface for
deleting objects, namely, using a JSSObject's delete method.
Args:
url_path: String API endpoint path to DEL, with ID (e.g.
"/packages/id/<object ID>")
Raises:
JSSDeleteError if provided url_path has a >= 400 response.
### Response:
def delete(self, url_path, data=None):
"""Delete an object from the JSS.
In general, it is better to use a higher level interface for
deleting objects, namely, using a JSSObject's delete method.
Args:
url_path: String API endpoint path to DEL, with ID (e.g.
"/packages/id/<object ID>")
Raises:
JSSDeleteError if provided url_path has a >= 400 response.
"""
request_url = "%s%s" % (self._url, url_path)
if data:
response = self.session.delete(request_url, data=data)
else:
response = self.session.delete(request_url)
if response.status_code == 200 and self.verbose:
print "DEL %s: Success." % request_url
elif response.status_code >= 400:
error_handler(JSSDeleteError, response) |
def _normalize_json_search_response(self, json):
"""
Normalizes a JSON search response so that PB and HTTP have the
same return value
"""
result = {}
if 'facet_counts' in json:
result['facet_counts'] = json[u'facet_counts']
if 'grouped' in json:
result['grouped'] = json[u'grouped']
if 'stats' in json:
result['stats'] = json[u'stats']
if u'response' in json:
result['num_found'] = json[u'response'][u'numFound']
result['max_score'] = float(json[u'response'][u'maxScore'])
docs = []
for doc in json[u'response'][u'docs']:
resdoc = {}
if u'_yz_rk' in doc:
# Is this a Riak 2.0 result?
resdoc = doc
else:
# Riak Search 1.0 Legacy assumptions about format
resdoc[u'id'] = doc[u'id']
if u'fields' in doc:
for k, v in six.iteritems(doc[u'fields']):
resdoc[k] = v
docs.append(resdoc)
result['docs'] = docs
return result | Normalizes a JSON search response so that PB and HTTP have the
same return value | Below is the the instruction that describes the task:
### Input:
Normalizes a JSON search response so that PB and HTTP have the
same return value
### Response:
def _normalize_json_search_response(self, json):
"""
Normalizes a JSON search response so that PB and HTTP have the
same return value
"""
result = {}
if 'facet_counts' in json:
result['facet_counts'] = json[u'facet_counts']
if 'grouped' in json:
result['grouped'] = json[u'grouped']
if 'stats' in json:
result['stats'] = json[u'stats']
if u'response' in json:
result['num_found'] = json[u'response'][u'numFound']
result['max_score'] = float(json[u'response'][u'maxScore'])
docs = []
for doc in json[u'response'][u'docs']:
resdoc = {}
if u'_yz_rk' in doc:
# Is this a Riak 2.0 result?
resdoc = doc
else:
# Riak Search 1.0 Legacy assumptions about format
resdoc[u'id'] = doc[u'id']
if u'fields' in doc:
for k, v in six.iteritems(doc[u'fields']):
resdoc[k] = v
docs.append(resdoc)
result['docs'] = docs
return result |
def convert_to_group_ids(groups, vpc_id=None, vpc_name=None, region=None, key=None,
keyid=None, profile=None):
'''
Given a list of security groups and a vpc_id, convert_to_group_ids will
convert all list items in the given list to security group ids.
CLI example::
salt myminion boto_secgroup.convert_to_group_ids mysecgroup vpc-89yhh7h
'''
log.debug('security group contents %s pre-conversion', groups)
group_ids = []
for group in groups:
group_id = get_group_id(name=group, vpc_id=vpc_id,
vpc_name=vpc_name, region=region,
key=key, keyid=keyid, profile=profile)
if not group_id:
# Security groups are a big deal - need to fail if any can't be resolved...
raise CommandExecutionError('Could not resolve Security Group name '
'{0} to a Group ID'.format(group))
else:
group_ids.append(six.text_type(group_id))
log.debug('security group contents %s post-conversion', group_ids)
return group_ids | Given a list of security groups and a vpc_id, convert_to_group_ids will
convert all list items in the given list to security group ids.
CLI example::
salt myminion boto_secgroup.convert_to_group_ids mysecgroup vpc-89yhh7h | Below is the the instruction that describes the task:
### Input:
Given a list of security groups and a vpc_id, convert_to_group_ids will
convert all list items in the given list to security group ids.
CLI example::
salt myminion boto_secgroup.convert_to_group_ids mysecgroup vpc-89yhh7h
### Response:
def convert_to_group_ids(groups, vpc_id=None, vpc_name=None, region=None, key=None,
keyid=None, profile=None):
'''
Given a list of security groups and a vpc_id, convert_to_group_ids will
convert all list items in the given list to security group ids.
CLI example::
salt myminion boto_secgroup.convert_to_group_ids mysecgroup vpc-89yhh7h
'''
log.debug('security group contents %s pre-conversion', groups)
group_ids = []
for group in groups:
group_id = get_group_id(name=group, vpc_id=vpc_id,
vpc_name=vpc_name, region=region,
key=key, keyid=keyid, profile=profile)
if not group_id:
# Security groups are a big deal - need to fail if any can't be resolved...
raise CommandExecutionError('Could not resolve Security Group name '
'{0} to a Group ID'.format(group))
else:
group_ids.append(six.text_type(group_id))
log.debug('security group contents %s post-conversion', group_ids)
return group_ids |
def updateState(self, slicedArray, rtiInfo, separateFields):
""" Sets the slicedArray and rtiInfo and other members. This will reset the model.
Will be called from the tableInspector._drawContents.
"""
self.beginResetModel()
try:
# The sliced array can be a masked array or a (regular) numpy array.
# The table works fine with masked arrays, no need to replace the masked values.
self._slicedArray = slicedArray
if slicedArray is None:
self._nRows = 0
self._nCols = 0
self._fieldNames = []
else:
self._nRows, self._nCols = self._slicedArray.shape
if self._slicedArray.data.dtype.names:
self._fieldNames = self._slicedArray.data.dtype.names
else:
self._fieldNames = []
self._rtiInfo = rtiInfo
self._separateFields = separateFields
# Don't put numbers in the header if the record is of structured type, fields are
# placed in separate cells and the fake dimension is selected (combo index 0)
if self._separateFields and self._fieldNames:
if self._rtiInfo['x-dim'] == FAKE_DIM_NAME:
self._separateFieldOrientation = Qt.Horizontal
self._numbersInHeader = False
elif self._rtiInfo['y-dim'] == FAKE_DIM_NAME:
self._separateFieldOrientation = Qt.Vertical
self._numbersInHeader = False
else:
self._separateFieldOrientation = Qt.Horizontal
self._numbersInHeader = True
else:
self._separateFieldOrientation = None
self._numbersInHeader = True
finally:
self.endResetModel() | Sets the slicedArray and rtiInfo and other members. This will reset the model.
Will be called from the tableInspector._drawContents. | Below is the the instruction that describes the task:
### Input:
Sets the slicedArray and rtiInfo and other members. This will reset the model.
Will be called from the tableInspector._drawContents.
### Response:
def updateState(self, slicedArray, rtiInfo, separateFields):
""" Sets the slicedArray and rtiInfo and other members. This will reset the model.
Will be called from the tableInspector._drawContents.
"""
self.beginResetModel()
try:
# The sliced array can be a masked array or a (regular) numpy array.
# The table works fine with masked arrays, no need to replace the masked values.
self._slicedArray = slicedArray
if slicedArray is None:
self._nRows = 0
self._nCols = 0
self._fieldNames = []
else:
self._nRows, self._nCols = self._slicedArray.shape
if self._slicedArray.data.dtype.names:
self._fieldNames = self._slicedArray.data.dtype.names
else:
self._fieldNames = []
self._rtiInfo = rtiInfo
self._separateFields = separateFields
# Don't put numbers in the header if the record is of structured type, fields are
# placed in separate cells and the fake dimension is selected (combo index 0)
if self._separateFields and self._fieldNames:
if self._rtiInfo['x-dim'] == FAKE_DIM_NAME:
self._separateFieldOrientation = Qt.Horizontal
self._numbersInHeader = False
elif self._rtiInfo['y-dim'] == FAKE_DIM_NAME:
self._separateFieldOrientation = Qt.Vertical
self._numbersInHeader = False
else:
self._separateFieldOrientation = Qt.Horizontal
self._numbersInHeader = True
else:
self._separateFieldOrientation = None
self._numbersInHeader = True
finally:
self.endResetModel() |
def turbulent_Sieder_Tate(Re, Pr, mu=None, mu_w=None):
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [1]_ and supposedly [2]_.
.. math::
Nu = 0.027Re^{4/5}Pr^{1/3}\left(\frac{\mu}{\mu_s}\right)^{0.14}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
mu : float
Viscosity of fluid, [Pa*s]
mu_w : float
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
A linear coefficient of 0.023 is often listed with this equation. The
source of the discrepancy is not known. The equation is not present in the
original paper, but is nevertheless the source usually cited for it.
Examples
--------
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2)
286.9178136793052
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2, mu=0.01, mu_w=0.067)
219.84016455766044
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Sieder, E. N., and G. E. Tate. "Heat Transfer and Pressure Drop of
Liquids in Tubes." Industrial & Engineering Chemistry 28, no. 12
(December 1, 1936): 1429-35. doi:10.1021/ie50324a027.
'''
Nu = 0.027*Re**0.8*Pr**(1/3.)
if mu_w and mu:
Nu *= (mu/mu_w)**0.14
return Nu | r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [1]_ and supposedly [2]_.
.. math::
Nu = 0.027Re^{4/5}Pr^{1/3}\left(\frac{\mu}{\mu_s}\right)^{0.14}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
mu : float
Viscosity of fluid, [Pa*s]
mu_w : float
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
A linear coefficient of 0.023 is often listed with this equation. The
source of the discrepancy is not known. The equation is not present in the
original paper, but is nevertheless the source usually cited for it.
Examples
--------
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2)
286.9178136793052
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2, mu=0.01, mu_w=0.067)
219.84016455766044
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Sieder, E. N., and G. E. Tate. "Heat Transfer and Pressure Drop of
Liquids in Tubes." Industrial & Engineering Chemistry 28, no. 12
(December 1, 1936): 1429-35. doi:10.1021/ie50324a027. | Below is the the instruction that describes the task:
### Input:
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [1]_ and supposedly [2]_.
.. math::
Nu = 0.027Re^{4/5}Pr^{1/3}\left(\frac{\mu}{\mu_s}\right)^{0.14}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
mu : float
Viscosity of fluid, [Pa*s]
mu_w : float
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
A linear coefficient of 0.023 is often listed with this equation. The
source of the discrepancy is not known. The equation is not present in the
original paper, but is nevertheless the source usually cited for it.
Examples
--------
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2)
286.9178136793052
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2, mu=0.01, mu_w=0.067)
219.84016455766044
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Sieder, E. N., and G. E. Tate. "Heat Transfer and Pressure Drop of
Liquids in Tubes." Industrial & Engineering Chemistry 28, no. 12
(December 1, 1936): 1429-35. doi:10.1021/ie50324a027.
### Response:
def turbulent_Sieder_Tate(Re, Pr, mu=None, mu_w=None):
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [1]_ and supposedly [2]_.
.. math::
Nu = 0.027Re^{4/5}Pr^{1/3}\left(\frac{\mu}{\mu_s}\right)^{0.14}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
mu : float
Viscosity of fluid, [Pa*s]
mu_w : float
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
A linear coefficient of 0.023 is often listed with this equation. The
source of the discrepancy is not known. The equation is not present in the
original paper, but is nevertheless the source usually cited for it.
Examples
--------
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2)
286.9178136793052
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2, mu=0.01, mu_w=0.067)
219.84016455766044
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Sieder, E. N., and G. E. Tate. "Heat Transfer and Pressure Drop of
Liquids in Tubes." Industrial & Engineering Chemistry 28, no. 12
(December 1, 1936): 1429-35. doi:10.1021/ie50324a027.
'''
Nu = 0.027*Re**0.8*Pr**(1/3.)
if mu_w and mu:
Nu *= (mu/mu_w)**0.14
return Nu |
def infect(cls, graph, key, default_scope=None):
"""
Forcibly convert an entry-point based factory to a ScopedFactory.
Must be invoked before resolving the entry point.
:raises AlreadyBoundError: for non entry-points; these should be declared with @scoped_binding
"""
func = graph.factory_for(key)
if isinstance(func, cls):
func = func.func
factory = cls(key, func, default_scope)
graph._registry.factories[key] = factory
return factory | Forcibly convert an entry-point based factory to a ScopedFactory.
Must be invoked before resolving the entry point.
:raises AlreadyBoundError: for non entry-points; these should be declared with @scoped_binding | Below is the the instruction that describes the task:
### Input:
Forcibly convert an entry-point based factory to a ScopedFactory.
Must be invoked before resolving the entry point.
:raises AlreadyBoundError: for non entry-points; these should be declared with @scoped_binding
### Response:
def infect(cls, graph, key, default_scope=None):
"""
Forcibly convert an entry-point based factory to a ScopedFactory.
Must be invoked before resolving the entry point.
:raises AlreadyBoundError: for non entry-points; these should be declared with @scoped_binding
"""
func = graph.factory_for(key)
if isinstance(func, cls):
func = func.func
factory = cls(key, func, default_scope)
graph._registry.factories[key] = factory
return factory |
def pair_tree_creator(meta_id):
"""Splits string into a pairtree path."""
chunks = []
for x in range(0, len(meta_id)):
if x % 2:
continue
if (len(meta_id) - 1) == x:
chunk = meta_id[x]
else:
chunk = meta_id[x: x + 2]
chunks.append(chunk)
return os.sep + os.sep.join(chunks) + os.sep | Splits string into a pairtree path. | Below is the the instruction that describes the task:
### Input:
Splits string into a pairtree path.
### Response:
def pair_tree_creator(meta_id):
"""Splits string into a pairtree path."""
chunks = []
for x in range(0, len(meta_id)):
if x % 2:
continue
if (len(meta_id) - 1) == x:
chunk = meta_id[x]
else:
chunk = meta_id[x: x + 2]
chunks.append(chunk)
return os.sep + os.sep.join(chunks) + os.sep |
def _create_job_details(self, key, job_config, logfile, status):
"""Create a `JobDetails` for a single job
Parameters
----------
key : str
Key used to identify this particular job
job_config : dict
Dictionary with arguements passed to this particular job
logfile : str
Name of the associated log file
status : int
Current status of the job
Returns
-------
job_details : `fermipy.jobs.JobDetails`
Object with the details about a particular job.
"""
self.update_args(job_config)
job_details = JobDetails(jobname=self.full_linkname,
jobkey=key,
appname=self.appname,
logfile=logfile,
job_config=job_config,
timestamp=get_timestamp(),
file_dict=copy.deepcopy(self.files),
sub_file_dict=copy.deepcopy(self.sub_files),
status=status)
return job_details | Create a `JobDetails` for a single job
Parameters
----------
key : str
Key used to identify this particular job
job_config : dict
Dictionary with arguements passed to this particular job
logfile : str
Name of the associated log file
status : int
Current status of the job
Returns
-------
job_details : `fermipy.jobs.JobDetails`
Object with the details about a particular job. | Below is the the instruction that describes the task:
### Input:
Create a `JobDetails` for a single job
Parameters
----------
key : str
Key used to identify this particular job
job_config : dict
Dictionary with arguements passed to this particular job
logfile : str
Name of the associated log file
status : int
Current status of the job
Returns
-------
job_details : `fermipy.jobs.JobDetails`
Object with the details about a particular job.
### Response:
def _create_job_details(self, key, job_config, logfile, status):
"""Create a `JobDetails` for a single job
Parameters
----------
key : str
Key used to identify this particular job
job_config : dict
Dictionary with arguements passed to this particular job
logfile : str
Name of the associated log file
status : int
Current status of the job
Returns
-------
job_details : `fermipy.jobs.JobDetails`
Object with the details about a particular job.
"""
self.update_args(job_config)
job_details = JobDetails(jobname=self.full_linkname,
jobkey=key,
appname=self.appname,
logfile=logfile,
job_config=job_config,
timestamp=get_timestamp(),
file_dict=copy.deepcopy(self.files),
sub_file_dict=copy.deepcopy(self.sub_files),
status=status)
return job_details |
def _psd_mask(x):
"""Computes whether each square matrix in the input is positive semi-definite.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
Returns:
mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each
scalar is 1 if the corresponding matrix was PSD, otherwise 0.
"""
# Allegedly
# https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite
# it is more efficient to test for positive semi-definiteness by
# trying to compute the Cholesky decomposition -- the matrix is PSD
# if you succeed and not PSD if you fail. However, TensorFlow's
# Cholesky raises an exception if _any_ of the input matrices are
# not PSD, from which I don't know how to extract _which ones_, so I
# proceed by explicitly computing all the eigenvalues and checking
# whether they are all positive or not.
#
# Also, as was discussed in the answer, it is somewhat dangerous to
# treat SPD-ness as binary in floating-point arithmetic. Cholesky
# factorization can complete and 'look' like everything is fine
# (e.g., O(1) entries and a diagonal of all ones) but the matrix can
# have an exponential condition number.
eigenvalues, _ = tf.linalg.eigh(x)
return tf.cast(
tf.reduce_min(input_tensor=eigenvalues, axis=-1) >= 0, dtype=x.dtype) | Computes whether each square matrix in the input is positive semi-definite.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
Returns:
mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each
scalar is 1 if the corresponding matrix was PSD, otherwise 0. | Below is the the instruction that describes the task:
### Input:
Computes whether each square matrix in the input is positive semi-definite.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
Returns:
mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each
scalar is 1 if the corresponding matrix was PSD, otherwise 0.
### Response:
def _psd_mask(x):
"""Computes whether each square matrix in the input is positive semi-definite.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
Returns:
mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each
scalar is 1 if the corresponding matrix was PSD, otherwise 0.
"""
# Allegedly
# https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite
# it is more efficient to test for positive semi-definiteness by
# trying to compute the Cholesky decomposition -- the matrix is PSD
# if you succeed and not PSD if you fail. However, TensorFlow's
# Cholesky raises an exception if _any_ of the input matrices are
# not PSD, from which I don't know how to extract _which ones_, so I
# proceed by explicitly computing all the eigenvalues and checking
# whether they are all positive or not.
#
# Also, as was discussed in the answer, it is somewhat dangerous to
# treat SPD-ness as binary in floating-point arithmetic. Cholesky
# factorization can complete and 'look' like everything is fine
# (e.g., O(1) entries and a diagonal of all ones) but the matrix can
# have an exponential condition number.
eigenvalues, _ = tf.linalg.eigh(x)
return tf.cast(
tf.reduce_min(input_tensor=eigenvalues, axis=-1) >= 0, dtype=x.dtype) |
def _update_handlers(self):
"""Update `_handler_map` after `handlers` have been
modified."""
handler_map = defaultdict(list)
for i, obj in enumerate(self.handlers):
for dummy, handler in inspect.getmembers(obj, callable):
if not hasattr(handler, "_pyxmpp_event_handled"):
continue
# pylint: disable-msg=W0212
event_class = handler._pyxmpp_event_handled
handler_map[event_class].append( (i, handler) )
self._handler_map = handler_map | Update `_handler_map` after `handlers` have been
modified. | Below is the the instruction that describes the task:
### Input:
Update `_handler_map` after `handlers` have been
modified.
### Response:
def _update_handlers(self):
"""Update `_handler_map` after `handlers` have been
modified."""
handler_map = defaultdict(list)
for i, obj in enumerate(self.handlers):
for dummy, handler in inspect.getmembers(obj, callable):
if not hasattr(handler, "_pyxmpp_event_handled"):
continue
# pylint: disable-msg=W0212
event_class = handler._pyxmpp_event_handled
handler_map[event_class].append( (i, handler) )
self._handler_map = handler_map |
def ssh_sa_ssh_client_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
client = ET.SubElement(ssh, "client")
mac = ET.SubElement(client, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ssh_sa_ssh_client_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
client = ET.SubElement(ssh, "client")
mac = ET.SubElement(client, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def set_canvas_properties(self, canvas, x_title=None, y_title=None, x_lim=None, y_lim=None, x_labels=True, y_labels=True):
"""!
@brief Set properties for specified canvas.
@param[in] canvas (uint): Index of canvas whose properties should changed.
@param[in] x_title (string): Title for X axis, if 'None', then nothing is displayed.
@param[in] y_title (string): Title for Y axis, if 'None', then nothing is displayed.
@param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then
borders are calculated automatically.
@param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated
automatically.
@param[in] x_labels (bool): If True then labels of X axis are displayed.
@param[in] y_labels (bool): If True then labels of Y axis are displayed.
"""
self.__canvases[canvas] = canvas_descr(x_title, y_title, x_lim, y_lim, x_labels, y_labels); | !
@brief Set properties for specified canvas.
@param[in] canvas (uint): Index of canvas whose properties should changed.
@param[in] x_title (string): Title for X axis, if 'None', then nothing is displayed.
@param[in] y_title (string): Title for Y axis, if 'None', then nothing is displayed.
@param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then
borders are calculated automatically.
@param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated
automatically.
@param[in] x_labels (bool): If True then labels of X axis are displayed.
@param[in] y_labels (bool): If True then labels of Y axis are displayed. | Below is the the instruction that describes the task:
### Input:
!
@brief Set properties for specified canvas.
@param[in] canvas (uint): Index of canvas whose properties should changed.
@param[in] x_title (string): Title for X axis, if 'None', then nothing is displayed.
@param[in] y_title (string): Title for Y axis, if 'None', then nothing is displayed.
@param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then
borders are calculated automatically.
@param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated
automatically.
@param[in] x_labels (bool): If True then labels of X axis are displayed.
@param[in] y_labels (bool): If True then labels of Y axis are displayed.
### Response:
def set_canvas_properties(self, canvas, x_title=None, y_title=None, x_lim=None, y_lim=None, x_labels=True, y_labels=True):
"""!
@brief Set properties for specified canvas.
@param[in] canvas (uint): Index of canvas whose properties should changed.
@param[in] x_title (string): Title for X axis, if 'None', then nothing is displayed.
@param[in] y_title (string): Title for Y axis, if 'None', then nothing is displayed.
@param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then
borders are calculated automatically.
@param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated
automatically.
@param[in] x_labels (bool): If True then labels of X axis are displayed.
@param[in] y_labels (bool): If True then labels of Y axis are displayed.
"""
self.__canvases[canvas] = canvas_descr(x_title, y_title, x_lim, y_lim, x_labels, y_labels); |
def iso_name_converter(iso):
'''
Converts the name of the given isotope (input), e.g., 'N-14' to
14N as used later to compare w/ grain database.
'''
sp = iso.split('-')
output = sp[1] + sp[0]
return output.lower() | Converts the name of the given isotope (input), e.g., 'N-14' to
14N as used later to compare w/ grain database. | Below is the the instruction that describes the task:
### Input:
Converts the name of the given isotope (input), e.g., 'N-14' to
14N as used later to compare w/ grain database.
### Response:
def iso_name_converter(iso):
'''
Converts the name of the given isotope (input), e.g., 'N-14' to
14N as used later to compare w/ grain database.
'''
sp = iso.split('-')
output = sp[1] + sp[0]
return output.lower() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.