code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def image(self,path_img):
""" Open image file """
im_open = Image.open(path_img)
im = im_open.convert("RGB")
# Convert the RGB image in printable image
pix_line, img_size = self._convert_image(im)
self._print_image(pix_line, img_size) | Open image file | Below is the the instruction that describes the task:
### Input:
Open image file
### Response:
def image(self,path_img):
""" Open image file """
im_open = Image.open(path_img)
im = im_open.convert("RGB")
# Convert the RGB image in printable image
pix_line, img_size = self._convert_image(im)
self._print_image(pix_line, img_size) |
def result(retn):
'''
Return a value or raise an exception from a retn tuple.
'''
ok, valu = retn
if ok:
return valu
name, info = valu
ctor = getattr(s_exc, name, None)
if ctor is not None:
raise ctor(**info)
info['errx'] = name
raise s_exc.SynErr(**info) | Return a value or raise an exception from a retn tuple. | Below is the the instruction that describes the task:
### Input:
Return a value or raise an exception from a retn tuple.
### Response:
def result(retn):
'''
Return a value or raise an exception from a retn tuple.
'''
ok, valu = retn
if ok:
return valu
name, info = valu
ctor = getattr(s_exc, name, None)
if ctor is not None:
raise ctor(**info)
info['errx'] = name
raise s_exc.SynErr(**info) |
def getRowByIndex(self, index):
"""
Get row by numeric index.
Args:
index: Zero-based index of the row to get.
Returns:
The corresponding row.
"""
assert isinstance(index, int)
return Row(self._impl.getRowByIndex(index)) | Get row by numeric index.
Args:
index: Zero-based index of the row to get.
Returns:
The corresponding row. | Below is the the instruction that describes the task:
### Input:
Get row by numeric index.
Args:
index: Zero-based index of the row to get.
Returns:
The corresponding row.
### Response:
def getRowByIndex(self, index):
"""
Get row by numeric index.
Args:
index: Zero-based index of the row to get.
Returns:
The corresponding row.
"""
assert isinstance(index, int)
return Row(self._impl.getRowByIndex(index)) |
def main(argString=None):
"""The main function of the module.
:param argString: the options.
:type argString: list
These are the steps:
1. Prints the options.
2. Reads the ``tfam`` and ``tped`` files and find all heterozygous and all
failed markers (:py:func:`processTPEDandTFAM`).
"""
# Getting and checking the options
args = parseArgs(argString)
checkArgs(args)
logger.info("Options used:")
for key, value in vars(args).iteritems():
logger.info(" --{} {}".format(key.replace("_", "-"), value))
# Process the TPED and TFAM file
logger.info("Processing the TPED and TFAM file")
processTPEDandTFAM(args.tfile + ".tped", args.tfile + ".tfam", args.out) | The main function of the module.
:param argString: the options.
:type argString: list
These are the steps:
1. Prints the options.
2. Reads the ``tfam`` and ``tped`` files and find all heterozygous and all
failed markers (:py:func:`processTPEDandTFAM`). | Below is the the instruction that describes the task:
### Input:
The main function of the module.
:param argString: the options.
:type argString: list
These are the steps:
1. Prints the options.
2. Reads the ``tfam`` and ``tped`` files and find all heterozygous and all
failed markers (:py:func:`processTPEDandTFAM`).
### Response:
def main(argString=None):
"""The main function of the module.
:param argString: the options.
:type argString: list
These are the steps:
1. Prints the options.
2. Reads the ``tfam`` and ``tped`` files and find all heterozygous and all
failed markers (:py:func:`processTPEDandTFAM`).
"""
# Getting and checking the options
args = parseArgs(argString)
checkArgs(args)
logger.info("Options used:")
for key, value in vars(args).iteritems():
logger.info(" --{} {}".format(key.replace("_", "-"), value))
# Process the TPED and TFAM file
logger.info("Processing the TPED and TFAM file")
processTPEDandTFAM(args.tfile + ".tped", args.tfile + ".tfam", args.out) |
def workspace_create(ctx, clobber_mets, directory):
"""
Create a workspace with an empty METS file in DIRECTORY.
Use '.' for $PWD"
"""
workspace = ctx.resolver.workspace_from_nothing(
directory=os.path.abspath(directory),
mets_basename=ctx.mets_basename,
clobber_mets=clobber_mets
)
workspace.save_mets()
print(workspace.directory) | Create a workspace with an empty METS file in DIRECTORY.
Use '.' for $PWD" | Below is the the instruction that describes the task:
### Input:
Create a workspace with an empty METS file in DIRECTORY.
Use '.' for $PWD"
### Response:
def workspace_create(ctx, clobber_mets, directory):
"""
Create a workspace with an empty METS file in DIRECTORY.
Use '.' for $PWD"
"""
workspace = ctx.resolver.workspace_from_nothing(
directory=os.path.abspath(directory),
mets_basename=ctx.mets_basename,
clobber_mets=clobber_mets
)
workspace.save_mets()
print(workspace.directory) |
def ctc_beam_search_decoder(probs_seq,
alphabet,
beam_size,
cutoff_prob=1.0,
cutoff_top_n=40,
scorer=None):
"""Wrapper for the CTC Beam Search Decoder.
:param probs_seq: 2-D list of probability distributions over each time
step, with each element being a list of normalized
probabilities over alphabet and blank.
:type probs_seq: 2-D list
:param alphabet: alphabet list.
:alphabet: Alphabet
:param beam_size: Width for beam search.
:type beam_size: int
:param cutoff_prob: Cutoff probability in pruning,
default 1.0, no pruning.
:type cutoff_prob: float
:param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
characters with highest probs in alphabet will be
used in beam search, default 40.
:type cutoff_top_n: int
:param scorer: External scorer for partially decoded sentence, e.g. word
count or language model.
:type scorer: Scorer
:return: List of tuples of log probability and sentence as decoding
results, in descending order of the probability.
:rtype: list
"""
beam_results = swigwrapper.ctc_beam_search_decoder(
probs_seq, alphabet.config_file(), beam_size, cutoff_prob, cutoff_top_n,
scorer)
beam_results = [(res.probability, alphabet.decode(res.tokens)) for res in beam_results]
return beam_results | Wrapper for the CTC Beam Search Decoder.
:param probs_seq: 2-D list of probability distributions over each time
step, with each element being a list of normalized
probabilities over alphabet and blank.
:type probs_seq: 2-D list
:param alphabet: alphabet list.
:alphabet: Alphabet
:param beam_size: Width for beam search.
:type beam_size: int
:param cutoff_prob: Cutoff probability in pruning,
default 1.0, no pruning.
:type cutoff_prob: float
:param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
characters with highest probs in alphabet will be
used in beam search, default 40.
:type cutoff_top_n: int
:param scorer: External scorer for partially decoded sentence, e.g. word
count or language model.
:type scorer: Scorer
:return: List of tuples of log probability and sentence as decoding
results, in descending order of the probability.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Wrapper for the CTC Beam Search Decoder.
:param probs_seq: 2-D list of probability distributions over each time
step, with each element being a list of normalized
probabilities over alphabet and blank.
:type probs_seq: 2-D list
:param alphabet: alphabet list.
:alphabet: Alphabet
:param beam_size: Width for beam search.
:type beam_size: int
:param cutoff_prob: Cutoff probability in pruning,
default 1.0, no pruning.
:type cutoff_prob: float
:param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
characters with highest probs in alphabet will be
used in beam search, default 40.
:type cutoff_top_n: int
:param scorer: External scorer for partially decoded sentence, e.g. word
count or language model.
:type scorer: Scorer
:return: List of tuples of log probability and sentence as decoding
results, in descending order of the probability.
:rtype: list
### Response:
def ctc_beam_search_decoder(probs_seq,
alphabet,
beam_size,
cutoff_prob=1.0,
cutoff_top_n=40,
scorer=None):
"""Wrapper for the CTC Beam Search Decoder.
:param probs_seq: 2-D list of probability distributions over each time
step, with each element being a list of normalized
probabilities over alphabet and blank.
:type probs_seq: 2-D list
:param alphabet: alphabet list.
:alphabet: Alphabet
:param beam_size: Width for beam search.
:type beam_size: int
:param cutoff_prob: Cutoff probability in pruning,
default 1.0, no pruning.
:type cutoff_prob: float
:param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
characters with highest probs in alphabet will be
used in beam search, default 40.
:type cutoff_top_n: int
:param scorer: External scorer for partially decoded sentence, e.g. word
count or language model.
:type scorer: Scorer
:return: List of tuples of log probability and sentence as decoding
results, in descending order of the probability.
:rtype: list
"""
beam_results = swigwrapper.ctc_beam_search_decoder(
probs_seq, alphabet.config_file(), beam_size, cutoff_prob, cutoff_top_n,
scorer)
beam_results = [(res.probability, alphabet.decode(res.tokens)) for res in beam_results]
return beam_results |
def shuffle(self, times=1):
"""
Shuffles the Stack.
.. note::
Shuffling large numbers of cards (100,000+) may take a while.
:arg int times:
The number of times to shuffle.
"""
for _ in xrange(times):
random.shuffle(self.cards) | Shuffles the Stack.
.. note::
Shuffling large numbers of cards (100,000+) may take a while.
:arg int times:
The number of times to shuffle. | Below is the the instruction that describes the task:
### Input:
Shuffles the Stack.
.. note::
Shuffling large numbers of cards (100,000+) may take a while.
:arg int times:
The number of times to shuffle.
### Response:
def shuffle(self, times=1):
"""
Shuffles the Stack.
.. note::
Shuffling large numbers of cards (100,000+) may take a while.
:arg int times:
The number of times to shuffle.
"""
for _ in xrange(times):
random.shuffle(self.cards) |
def get_service_policy(host, username, password, service_name, protocol=None, port=None, host_names=None):
'''
Get the service name's policy for a given host or list of hosts.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
service_name
The name of the service for which to retrieve the policy. Supported service names are:
- DCUI
- TSM
- SSH
- lbtd
- lsassd
- lwiod
- netlogond
- ntpd
- sfcbd-watchdog
- snmpd
- vprobed
- vpxa
- xorg
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to tell
vCenter the hosts for which to get service policy information.
If host_names is not provided, the service policy information will be retrieved
for the ``host`` location instead. This is useful for when service instance
connection information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.get_service_policy my.esxi.host root bad-password 'ssh'
# Used for connecting to a vCenter Server
salt '*' vsphere.get_service_policy my.vcenter.location root bad-password 'ntpd' \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
valid_services = ['DCUI', 'TSM', 'SSH', 'ssh', 'lbtd', 'lsassd', 'lwiod', 'netlogond',
'ntpd', 'sfcbd-watchdog', 'snmpd', 'vprobed', 'vpxa', 'xorg']
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
# Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services:
ret.update({host_name: {'Error': '{0} is not a valid service name.'.format(service_name)}})
return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
services = host_ref.configManager.serviceSystem.serviceInfo.service
# Don't require users to know that VMware lists the ssh service as TSM-SSH
if service_name == 'SSH' or service_name == 'ssh':
temp_service_name = 'TSM-SSH'
else:
temp_service_name = service_name
# Loop through services until we find a matching name
for service in services:
if service.key == temp_service_name:
ret.update({host_name:
{service_name: service.policy}})
# We've found a match - break out of the loop so we don't overwrite the
# Updated host_name value with an error message.
break
else:
msg = 'Could not find service \'{0}\' for host \'{1}\'.'.format(service_name,
host_name)
ret.update({host_name: {'Error': msg}})
# If we made it this far, something else has gone wrong.
if ret.get(host_name) is None:
msg = '\'vsphere.get_service_policy\' failed for host {0}.'.format(host_name)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
return ret | Get the service name's policy for a given host or list of hosts.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
service_name
The name of the service for which to retrieve the policy. Supported service names are:
- DCUI
- TSM
- SSH
- lbtd
- lsassd
- lwiod
- netlogond
- ntpd
- sfcbd-watchdog
- snmpd
- vprobed
- vpxa
- xorg
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to tell
vCenter the hosts for which to get service policy information.
If host_names is not provided, the service policy information will be retrieved
for the ``host`` location instead. This is useful for when service instance
connection information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.get_service_policy my.esxi.host root bad-password 'ssh'
# Used for connecting to a vCenter Server
salt '*' vsphere.get_service_policy my.vcenter.location root bad-password 'ntpd' \
host_names='[esxi-1.host.com, esxi-2.host.com]' | Below is the the instruction that describes the task:
### Input:
Get the service name's policy for a given host or list of hosts.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
service_name
The name of the service for which to retrieve the policy. Supported service names are:
- DCUI
- TSM
- SSH
- lbtd
- lsassd
- lwiod
- netlogond
- ntpd
- sfcbd-watchdog
- snmpd
- vprobed
- vpxa
- xorg
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to tell
vCenter the hosts for which to get service policy information.
If host_names is not provided, the service policy information will be retrieved
for the ``host`` location instead. This is useful for when service instance
connection information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.get_service_policy my.esxi.host root bad-password 'ssh'
# Used for connecting to a vCenter Server
salt '*' vsphere.get_service_policy my.vcenter.location root bad-password 'ntpd' \
host_names='[esxi-1.host.com, esxi-2.host.com]'
### Response:
def get_service_policy(host, username, password, service_name, protocol=None, port=None, host_names=None):
'''
Get the service name's policy for a given host or list of hosts.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
service_name
The name of the service for which to retrieve the policy. Supported service names are:
- DCUI
- TSM
- SSH
- lbtd
- lsassd
- lwiod
- netlogond
- ntpd
- sfcbd-watchdog
- snmpd
- vprobed
- vpxa
- xorg
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to tell
vCenter the hosts for which to get service policy information.
If host_names is not provided, the service policy information will be retrieved
for the ``host`` location instead. This is useful for when service instance
connection information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.get_service_policy my.esxi.host root bad-password 'ssh'
# Used for connecting to a vCenter Server
salt '*' vsphere.get_service_policy my.vcenter.location root bad-password 'ntpd' \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
valid_services = ['DCUI', 'TSM', 'SSH', 'ssh', 'lbtd', 'lsassd', 'lwiod', 'netlogond',
'ntpd', 'sfcbd-watchdog', 'snmpd', 'vprobed', 'vpxa', 'xorg']
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
# Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services:
ret.update({host_name: {'Error': '{0} is not a valid service name.'.format(service_name)}})
return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
services = host_ref.configManager.serviceSystem.serviceInfo.service
# Don't require users to know that VMware lists the ssh service as TSM-SSH
if service_name == 'SSH' or service_name == 'ssh':
temp_service_name = 'TSM-SSH'
else:
temp_service_name = service_name
# Loop through services until we find a matching name
for service in services:
if service.key == temp_service_name:
ret.update({host_name:
{service_name: service.policy}})
# We've found a match - break out of the loop so we don't overwrite the
# Updated host_name value with an error message.
break
else:
msg = 'Could not find service \'{0}\' for host \'{1}\'.'.format(service_name,
host_name)
ret.update({host_name: {'Error': msg}})
# If we made it this far, something else has gone wrong.
if ret.get(host_name) is None:
msg = '\'vsphere.get_service_policy\' failed for host {0}.'.format(host_name)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
return ret |
def _collapse_variants_by_function(graph: BELGraph, func: str) -> None:
"""Collapse all of the given functions' variants' edges to their parents, in-place."""
for parent_node, variant_node, data in graph.edges(data=True):
if data[RELATION] == HAS_VARIANT and parent_node.function == func:
collapse_pair(graph, from_node=variant_node, to_node=parent_node) | Collapse all of the given functions' variants' edges to their parents, in-place. | Below is the the instruction that describes the task:
### Input:
Collapse all of the given functions' variants' edges to their parents, in-place.
### Response:
def _collapse_variants_by_function(graph: BELGraph, func: str) -> None:
"""Collapse all of the given functions' variants' edges to their parents, in-place."""
for parent_node, variant_node, data in graph.edges(data=True):
if data[RELATION] == HAS_VARIANT and parent_node.function == func:
collapse_pair(graph, from_node=variant_node, to_node=parent_node) |
def associate(self, id_option_vip, id_environment_vip):
"""Create a relationship of OptionVip with EnvironmentVip.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param id_environment_vip: Identifier of the Environment VIP. Integer value and greater than zero.
:return: Following dictionary
::
{'opcoesvip_ambiente_xref': {'id': < id_opcoesvip_ambiente_xref >} }
:raise InvalidParameterError: Option VIP/Environment VIP identifier is null and/or invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise OptionVipError: Option vip is already associated with the environment vip.
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_vip):
raise InvalidParameterError(
u'The identifier of Option VIP is invalid or was not informed.')
if not is_valid_int_param(id_environment_vip):
raise InvalidParameterError(
u'The identifier of Environment VIP is invalid or was not informed.')
url = 'optionvip/' + \
str(id_option_vip) + '/environmentvip/' + str(id_environment_vip) + '/'
code, xml = self.submit(None, 'PUT', url)
return self.response(code, xml) | Create a relationship of OptionVip with EnvironmentVip.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param id_environment_vip: Identifier of the Environment VIP. Integer value and greater than zero.
:return: Following dictionary
::
{'opcoesvip_ambiente_xref': {'id': < id_opcoesvip_ambiente_xref >} }
:raise InvalidParameterError: Option VIP/Environment VIP identifier is null and/or invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise OptionVipError: Option vip is already associated with the environment vip.
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. | Below is the the instruction that describes the task:
### Input:
Create a relationship of OptionVip with EnvironmentVip.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param id_environment_vip: Identifier of the Environment VIP. Integer value and greater than zero.
:return: Following dictionary
::
{'opcoesvip_ambiente_xref': {'id': < id_opcoesvip_ambiente_xref >} }
:raise InvalidParameterError: Option VIP/Environment VIP identifier is null and/or invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise OptionVipError: Option vip is already associated with the environment vip.
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
### Response:
def associate(self, id_option_vip, id_environment_vip):
"""Create a relationship of OptionVip with EnvironmentVip.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param id_environment_vip: Identifier of the Environment VIP. Integer value and greater than zero.
:return: Following dictionary
::
{'opcoesvip_ambiente_xref': {'id': < id_opcoesvip_ambiente_xref >} }
:raise InvalidParameterError: Option VIP/Environment VIP identifier is null and/or invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise OptionVipError: Option vip is already associated with the environment vip.
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_vip):
raise InvalidParameterError(
u'The identifier of Option VIP is invalid or was not informed.')
if not is_valid_int_param(id_environment_vip):
raise InvalidParameterError(
u'The identifier of Environment VIP is invalid or was not informed.')
url = 'optionvip/' + \
str(id_option_vip) + '/environmentvip/' + str(id_environment_vip) + '/'
code, xml = self.submit(None, 'PUT', url)
return self.response(code, xml) |
def keyPressEvent(self, event):
"""
Listens for the escape key to cancel out from this snapshot.
:param event | <QKeyPressEvent>
"""
# reject on a cancel
if event.key() == Qt.Key_Escape:
self.reject()
super(XSnapshotWidget, self).keyPressEvent(event) | Listens for the escape key to cancel out from this snapshot.
:param event | <QKeyPressEvent> | Below is the the instruction that describes the task:
### Input:
Listens for the escape key to cancel out from this snapshot.
:param event | <QKeyPressEvent>
### Response:
def keyPressEvent(self, event):
"""
Listens for the escape key to cancel out from this snapshot.
:param event | <QKeyPressEvent>
"""
# reject on a cancel
if event.key() == Qt.Key_Escape:
self.reject()
super(XSnapshotWidget, self).keyPressEvent(event) |
def add_triple(
self,
subj: Union[URIRef, str],
pred: Union[URIRef, str],
obj: Union[URIRef, Literal, str]
) -> None:
""" Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: )
"""
if obj in [None, "", " "]: return # Empty objects are bad practice
_subj = self.process_subj_or_pred(subj)
_pred = self.process_subj_or_pred(pred)
_obj = self.process_obj(obj)
self.g.add( (_subj, _pred, _obj) ) | Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: ) | Below is the the instruction that describes the task:
### Input:
Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: )
### Response:
def add_triple(
self,
subj: Union[URIRef, str],
pred: Union[URIRef, str],
obj: Union[URIRef, Literal, str]
) -> None:
""" Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: )
"""
if obj in [None, "", " "]: return # Empty objects are bad practice
_subj = self.process_subj_or_pred(subj)
_pred = self.process_subj_or_pred(pred)
_obj = self.process_obj(obj)
self.g.add( (_subj, _pred, _obj) ) |
def send_immediately(self, message, fail_silently=False):
"""Send a message immediately, outside the transaction manager.
If there is a connection error to the mail server this will have to
be handled manually. However if you pass ``fail_silently`` the error
will be swallowed.
:versionadded: 0.3
:param message: a 'Message' instance.
:param fail_silently: silently handle connection errors.
"""
try:
return self.smtp_mailer.send(*self._message_args(message))
except smtplib.socket.error:
if not fail_silently:
raise | Send a message immediately, outside the transaction manager.
If there is a connection error to the mail server this will have to
be handled manually. However if you pass ``fail_silently`` the error
will be swallowed.
:versionadded: 0.3
:param message: a 'Message' instance.
:param fail_silently: silently handle connection errors. | Below is the the instruction that describes the task:
### Input:
Send a message immediately, outside the transaction manager.
If there is a connection error to the mail server this will have to
be handled manually. However if you pass ``fail_silently`` the error
will be swallowed.
:versionadded: 0.3
:param message: a 'Message' instance.
:param fail_silently: silently handle connection errors.
### Response:
def send_immediately(self, message, fail_silently=False):
"""Send a message immediately, outside the transaction manager.
If there is a connection error to the mail server this will have to
be handled manually. However if you pass ``fail_silently`` the error
will be swallowed.
:versionadded: 0.3
:param message: a 'Message' instance.
:param fail_silently: silently handle connection errors.
"""
try:
return self.smtp_mailer.send(*self._message_args(message))
except smtplib.socket.error:
if not fail_silently:
raise |
def adjust_for_registry_api_versions(self):
"""
Enable/disable plugins depending on supported registry API versions
"""
versions = self.spec.registry_api_versions.value
if 'v2' not in versions:
raise OsbsValidationException('v1-only docker registry API is not supported')
try:
push_conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins',
'tag_and_push')
tag_and_push_registries = push_conf['args']['registries']
except (KeyError, IndexError):
tag_and_push_registries = {}
if 'v1' not in versions:
# Remove v1-only plugins
for phase, name in [('postbuild_plugins', 'pulp_push')]:
logger.info("removing v1-only plugin: %s", name)
self.dj.remove_plugin(phase, name)
# remove extra tag_and_push config
self.remove_tag_and_push_registries(tag_and_push_registries, 'v1')
# Remove 'version' from tag_and_push plugin config as it's no
# longer needed
for regdict in tag_and_push_registries.values():
if 'version' in regdict:
del regdict['version'] | Enable/disable plugins depending on supported registry API versions | Below is the the instruction that describes the task:
### Input:
Enable/disable plugins depending on supported registry API versions
### Response:
def adjust_for_registry_api_versions(self):
"""
Enable/disable plugins depending on supported registry API versions
"""
versions = self.spec.registry_api_versions.value
if 'v2' not in versions:
raise OsbsValidationException('v1-only docker registry API is not supported')
try:
push_conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins',
'tag_and_push')
tag_and_push_registries = push_conf['args']['registries']
except (KeyError, IndexError):
tag_and_push_registries = {}
if 'v1' not in versions:
# Remove v1-only plugins
for phase, name in [('postbuild_plugins', 'pulp_push')]:
logger.info("removing v1-only plugin: %s", name)
self.dj.remove_plugin(phase, name)
# remove extra tag_and_push config
self.remove_tag_and_push_registries(tag_and_push_registries, 'v1')
# Remove 'version' from tag_and_push plugin config as it's no
# longer needed
for regdict in tag_and_push_registries.values():
if 'version' in regdict:
del regdict['version'] |
def load(self, **kwargs):
"""Method to list the UCS on the system
Since this is only fixed in 12.1.0 and up
we implemented version check here
"""
# Check if we are using 12.1.0 version or above when using this method
self._is_version_supported_method('12.1.0')
newinst = self._stamp_out_core()
newinst._refresh(**kwargs)
return newinst | Method to list the UCS on the system
Since this is only fixed in 12.1.0 and up
we implemented version check here | Below is the the instruction that describes the task:
### Input:
Method to list the UCS on the system
Since this is only fixed in 12.1.0 and up
we implemented version check here
### Response:
def load(self, **kwargs):
"""Method to list the UCS on the system
Since this is only fixed in 12.1.0 and up
we implemented version check here
"""
# Check if we are using 12.1.0 version or above when using this method
self._is_version_supported_method('12.1.0')
newinst = self._stamp_out_core()
newinst._refresh(**kwargs)
return newinst |
def setHorCrossPlotAutoRangeOn(self, axisNumber):
""" Sets the horizontal cross-hair plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
"""
setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.horCrossPlotRangeCti, axisNumber) | Sets the horizontal cross-hair plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes). | Below is the the instruction that describes the task:
### Input:
Sets the horizontal cross-hair plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
### Response:
def setHorCrossPlotAutoRangeOn(self, axisNumber):
""" Sets the horizontal cross-hair plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
"""
setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.horCrossPlotRangeCti, axisNumber) |
def percentile(data, n):
"""Return the n-th percentile of the given data
Assume that the data are already sorted
"""
size = len(data)
idx = (n / 100.0) * size - 0.5
if idx < 0 or idx > size:
raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n))
return data[int(idx)] | Return the n-th percentile of the given data
Assume that the data are already sorted | Below is the the instruction that describes the task:
### Input:
Return the n-th percentile of the given data
Assume that the data are already sorted
### Response:
def percentile(data, n):
"""Return the n-th percentile of the given data
Assume that the data are already sorted
"""
size = len(data)
idx = (n / 100.0) * size - 0.5
if idx < 0 or idx > size:
raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n))
return data[int(idx)] |
def files(self):
"""files that will be add to tar file later
should be tuple, list or generator that returns strings
"""
ios_names = [info.name for info in self._ios_to_add.keys()]
return set(self.files_to_add + ios_names) | files that will be add to tar file later
should be tuple, list or generator that returns strings | Below is the the instruction that describes the task:
### Input:
files that will be add to tar file later
should be tuple, list or generator that returns strings
### Response:
def files(self):
"""files that will be add to tar file later
should be tuple, list or generator that returns strings
"""
ios_names = [info.name for info in self._ios_to_add.keys()]
return set(self.files_to_add + ios_names) |
def propose(self):
"""
Proposals for positive definite matrix using random walk deviations on the Cholesky
factor of the current value.
"""
# Locally store size of matrix
dims = self.stochastic.value.shape
# Add normal deviate to value and symmetrize
dev = rnormal(
0,
self.adaptive_scale_factor *
self.proposal_sd,
size=dims)
symmetrize(dev)
# Replace
self.stochastic.value = dev + self.stochastic.value | Proposals for positive definite matrix using random walk deviations on the Cholesky
factor of the current value. | Below is the the instruction that describes the task:
### Input:
Proposals for positive definite matrix using random walk deviations on the Cholesky
factor of the current value.
### Response:
def propose(self):
"""
Proposals for positive definite matrix using random walk deviations on the Cholesky
factor of the current value.
"""
# Locally store size of matrix
dims = self.stochastic.value.shape
# Add normal deviate to value and symmetrize
dev = rnormal(
0,
self.adaptive_scale_factor *
self.proposal_sd,
size=dims)
symmetrize(dev)
# Replace
self.stochastic.value = dev + self.stochastic.value |
def safe_extract_proto_from_ipfs(ipfs_client, ipfs_hash, protodir):
"""
Tar files might be dangerous (see https://bugs.python.org/issue21109,
and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning)
we extract only simple files
"""
spec_tar = get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash)
with tarfile.open(fileobj=io.BytesIO(spec_tar)) as f:
for m in f.getmembers():
if (os.path.dirname(m.name) != ""):
raise Exception("tarball has directories. We do not support it.")
if (not m.isfile()):
raise Exception("tarball contains %s which is not a files"%m.name)
fullname = os.path.join(protodir, m.name)
if (os.path.exists(fullname)):
raise Exception("%s already exists."%fullname)
# now it is safe to call extractall
f.extractall(protodir) | Tar files might be dangerous (see https://bugs.python.org/issue21109,
and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning)
we extract only simple files | Below is the the instruction that describes the task:
### Input:
Tar files might be dangerous (see https://bugs.python.org/issue21109,
and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning)
we extract only simple files
### Response:
def safe_extract_proto_from_ipfs(ipfs_client, ipfs_hash, protodir):
"""
Tar files might be dangerous (see https://bugs.python.org/issue21109,
and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning)
we extract only simple files
"""
spec_tar = get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash)
with tarfile.open(fileobj=io.BytesIO(spec_tar)) as f:
for m in f.getmembers():
if (os.path.dirname(m.name) != ""):
raise Exception("tarball has directories. We do not support it.")
if (not m.isfile()):
raise Exception("tarball contains %s which is not a files"%m.name)
fullname = os.path.join(protodir, m.name)
if (os.path.exists(fullname)):
raise Exception("%s already exists."%fullname)
# now it is safe to call extractall
f.extractall(protodir) |
def get_input_kwargs(self, key=None, default=None):
"""
Deprecated. Use `get_catalog_info` instead.
Get information from the catalog config file.
If *key* is `None`, return the full dict.
"""
warnings.warn("`get_input_kwargs` is deprecated; use `get_catalog_info` instead.", DeprecationWarning)
return self.get_catalog_info(key, default) | Deprecated. Use `get_catalog_info` instead.
Get information from the catalog config file.
If *key* is `None`, return the full dict. | Below is the the instruction that describes the task:
### Input:
Deprecated. Use `get_catalog_info` instead.
Get information from the catalog config file.
If *key* is `None`, return the full dict.
### Response:
def get_input_kwargs(self, key=None, default=None):
"""
Deprecated. Use `get_catalog_info` instead.
Get information from the catalog config file.
If *key* is `None`, return the full dict.
"""
warnings.warn("`get_input_kwargs` is deprecated; use `get_catalog_info` instead.", DeprecationWarning)
return self.get_catalog_info(key, default) |
def get_tweet(self, id):
"""
Get an existing tweet.
:param id: ID of the tweet in question
:return: Tweet object. None if not found
"""
try:
return Tweet(self._client.get_status(id=id)._json)
except TweepError as e:
if e.api_code == TWITTER_TWEET_NOT_FOUND_ERROR:
return None
raise | Get an existing tweet.
:param id: ID of the tweet in question
:return: Tweet object. None if not found | Below is the the instruction that describes the task:
### Input:
Get an existing tweet.
:param id: ID of the tweet in question
:return: Tweet object. None if not found
### Response:
def get_tweet(self, id):
"""
Get an existing tweet.
:param id: ID of the tweet in question
:return: Tweet object. None if not found
"""
try:
return Tweet(self._client.get_status(id=id)._json)
except TweepError as e:
if e.api_code == TWITTER_TWEET_NOT_FOUND_ERROR:
return None
raise |
def read_config(config_path_or_dict=None):
"""
Read config from given path string or dict object.
:param config_path_or_dict:
:type config_path_or_dict: str or dict
:return: Returns config object or None if not found.
:rtype: :class:`revision.config.Config`
"""
config = None
if isinstance(config_path_or_dict, dict):
config = Config(config_path_or_dict)
if isinstance(config_path_or_dict, string_types):
if os.path.isabs(config_path_or_dict):
config_path = config_path_or_dict
else:
config_path = os.path.join(
os.getcwd(),
os.path.normpath(config_path_or_dict)
)
else:
config_path = os.path.join(
os.getcwd(),
DEFAULT_CONFIG_PATH
)
if os.path.exists(config_path):
with open(config_path, 'r') as f:
data = json.load(f)
config = Config(data)
if config is None:
raise ConfigNotFound()
else:
config.validate()
return config | Read config from given path string or dict object.
:param config_path_or_dict:
:type config_path_or_dict: str or dict
:return: Returns config object or None if not found.
:rtype: :class:`revision.config.Config` | Below is the the instruction that describes the task:
### Input:
Read config from given path string or dict object.
:param config_path_or_dict:
:type config_path_or_dict: str or dict
:return: Returns config object or None if not found.
:rtype: :class:`revision.config.Config`
### Response:
def read_config(config_path_or_dict=None):
"""
Read config from given path string or dict object.
:param config_path_or_dict:
:type config_path_or_dict: str or dict
:return: Returns config object or None if not found.
:rtype: :class:`revision.config.Config`
"""
config = None
if isinstance(config_path_or_dict, dict):
config = Config(config_path_or_dict)
if isinstance(config_path_or_dict, string_types):
if os.path.isabs(config_path_or_dict):
config_path = config_path_or_dict
else:
config_path = os.path.join(
os.getcwd(),
os.path.normpath(config_path_or_dict)
)
else:
config_path = os.path.join(
os.getcwd(),
DEFAULT_CONFIG_PATH
)
if os.path.exists(config_path):
with open(config_path, 'r') as f:
data = json.load(f)
config = Config(data)
if config is None:
raise ConfigNotFound()
else:
config.validate()
return config |
def delay(self, wait, *args):
"""
Delays a function for the given number of milliseconds, and then calls
it with the arguments supplied.
"""
def call_it():
self.obj(*args)
t = Timer((float(wait) / float(1000)), call_it)
t.start()
return self._wrap(self.obj) | Delays a function for the given number of milliseconds, and then calls
it with the arguments supplied. | Below is the the instruction that describes the task:
### Input:
Delays a function for the given number of milliseconds, and then calls
it with the arguments supplied.
### Response:
def delay(self, wait, *args):
"""
Delays a function for the given number of milliseconds, and then calls
it with the arguments supplied.
"""
def call_it():
self.obj(*args)
t = Timer((float(wait) / float(1000)), call_it)
t.start()
return self._wrap(self.obj) |
def update(self, td):
"""Update state of ball"""
self.sprite.last_position = self.sprite.position
self.sprite.last_velocity = self.sprite.velocity
if self.particle_group != None:
self.update_particle_group(td) | Update state of ball | Below is the the instruction that describes the task:
### Input:
Update state of ball
### Response:
def update(self, td):
"""Update state of ball"""
self.sprite.last_position = self.sprite.position
self.sprite.last_velocity = self.sprite.velocity
if self.particle_group != None:
self.update_particle_group(td) |
async def DestroyMachines(self, force, machine_names):
'''
force : bool
machine_names : typing.Sequence[str]
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Client',
request='DestroyMachines',
version=1,
params=_params)
_params['force'] = force
_params['machine-names'] = machine_names
reply = await self.rpc(msg)
return reply | force : bool
machine_names : typing.Sequence[str]
Returns -> None | Below is the the instruction that describes the task:
### Input:
force : bool
machine_names : typing.Sequence[str]
Returns -> None
### Response:
async def DestroyMachines(self, force, machine_names):
'''
force : bool
machine_names : typing.Sequence[str]
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Client',
request='DestroyMachines',
version=1,
params=_params)
_params['force'] = force
_params['machine-names'] = machine_names
reply = await self.rpc(msg)
return reply |
def where_entry_date(query, datespec):
""" Where clause for entries which match a textual date spec
datespec -- The date spec to check for, in YYYY[[-]MM[[-]DD]] format
"""
date, interval, _ = utils.parse_date(datespec)
start_date, end_date = date.span(interval)
return orm.select(
e for e in query if
e.local_date >= start_date.naive and
e.local_date <= end_date.naive
) | Where clause for entries which match a textual date spec
datespec -- The date spec to check for, in YYYY[[-]MM[[-]DD]] format | Below is the the instruction that describes the task:
### Input:
Where clause for entries which match a textual date spec
datespec -- The date spec to check for, in YYYY[[-]MM[[-]DD]] format
### Response:
def where_entry_date(query, datespec):
""" Where clause for entries which match a textual date spec
datespec -- The date spec to check for, in YYYY[[-]MM[[-]DD]] format
"""
date, interval, _ = utils.parse_date(datespec)
start_date, end_date = date.span(interval)
return orm.select(
e for e in query if
e.local_date >= start_date.naive and
e.local_date <= end_date.naive
) |
def run(self, data, rewrap=False, prefetch=0):
"""
Wires the pipeline and returns a lazy object of
the transformed data.
:param data: must be an iterable, where a full document
must be returned for each loop
:param rewrap: (optional) is a bool that indicates the need to rewrap
data in cases where iterating over it produces undesired results,
for instance ``dict`` instances.
:param prefetch: (optional) is an int defining the number of items to
be prefetched once the pipeline starts yielding data. The
default prefetching mechanism is based on threads, so be
careful with CPU-bound processing pipelines.
"""
if rewrap:
data = [data]
for _filter in self._filters:
_filter.feed(data)
data = _filter
else:
iterable = self._prefetch_callable(data, prefetch) if prefetch else data
for out_data in iterable:
yield out_data | Wires the pipeline and returns a lazy object of
the transformed data.
:param data: must be an iterable, where a full document
must be returned for each loop
:param rewrap: (optional) is a bool that indicates the need to rewrap
data in cases where iterating over it produces undesired results,
for instance ``dict`` instances.
:param prefetch: (optional) is an int defining the number of items to
be prefetched once the pipeline starts yielding data. The
default prefetching mechanism is based on threads, so be
careful with CPU-bound processing pipelines. | Below is the the instruction that describes the task:
### Input:
Wires the pipeline and returns a lazy object of
the transformed data.
:param data: must be an iterable, where a full document
must be returned for each loop
:param rewrap: (optional) is a bool that indicates the need to rewrap
data in cases where iterating over it produces undesired results,
for instance ``dict`` instances.
:param prefetch: (optional) is an int defining the number of items to
be prefetched once the pipeline starts yielding data. The
default prefetching mechanism is based on threads, so be
careful with CPU-bound processing pipelines.
### Response:
def run(self, data, rewrap=False, prefetch=0):
"""
Wires the pipeline and returns a lazy object of
the transformed data.
:param data: must be an iterable, where a full document
must be returned for each loop
:param rewrap: (optional) is a bool that indicates the need to rewrap
data in cases where iterating over it produces undesired results,
for instance ``dict`` instances.
:param prefetch: (optional) is an int defining the number of items to
be prefetched once the pipeline starts yielding data. The
default prefetching mechanism is based on threads, so be
careful with CPU-bound processing pipelines.
"""
if rewrap:
data = [data]
for _filter in self._filters:
_filter.feed(data)
data = _filter
else:
iterable = self._prefetch_callable(data, prefetch) if prefetch else data
for out_data in iterable:
yield out_data |
def _get_answer(self, part):
"""
Note: Answers are only revealed after a correct submission. If you've
have not already solved the puzzle, AocdError will be raised.
"""
answer_fname = getattr(self, "answer_{}_fname".format(part))
if os.path.isfile(answer_fname):
with open(answer_fname) as f:
return f.read().strip()
# scrape puzzle page for any previously solved answers
response = requests.get(self.url, cookies=self._cookies, headers=self._headers)
response.raise_for_status()
soup = bs4.BeautifulSoup(response.text, "html.parser")
if not self._title:
# may as well save this while we're here
self._save_title(soup=soup)
hit = "Your puzzle answer was"
paras = [p for p in soup.find_all("p") if p.text.startswith(hit)]
if paras:
parta_correct_answer = paras[0].code.text
self._save_correct_answer(value=parta_correct_answer, part="a")
if len(paras) > 1:
_p1, p2 = paras
partb_correct_answer = p2.code.text
self._save_correct_answer(value=partb_correct_answer, part="b")
if os.path.isfile(answer_fname):
with open(answer_fname) as f:
return f.read().strip()
msg = "Answer {}-{}{} is not available".format(self.year, self.day, part)
raise PuzzleUnsolvedError(msg) | Note: Answers are only revealed after a correct submission. If you've
have not already solved the puzzle, AocdError will be raised. | Below is the the instruction that describes the task:
### Input:
Note: Answers are only revealed after a correct submission. If you've
have not already solved the puzzle, AocdError will be raised.
### Response:
def _get_answer(self, part):
"""
Note: Answers are only revealed after a correct submission. If you've
have not already solved the puzzle, AocdError will be raised.
"""
answer_fname = getattr(self, "answer_{}_fname".format(part))
if os.path.isfile(answer_fname):
with open(answer_fname) as f:
return f.read().strip()
# scrape puzzle page for any previously solved answers
response = requests.get(self.url, cookies=self._cookies, headers=self._headers)
response.raise_for_status()
soup = bs4.BeautifulSoup(response.text, "html.parser")
if not self._title:
# may as well save this while we're here
self._save_title(soup=soup)
hit = "Your puzzle answer was"
paras = [p for p in soup.find_all("p") if p.text.startswith(hit)]
if paras:
parta_correct_answer = paras[0].code.text
self._save_correct_answer(value=parta_correct_answer, part="a")
if len(paras) > 1:
_p1, p2 = paras
partb_correct_answer = p2.code.text
self._save_correct_answer(value=partb_correct_answer, part="b")
if os.path.isfile(answer_fname):
with open(answer_fname) as f:
return f.read().strip()
msg = "Answer {}-{}{} is not available".format(self.year, self.day, part)
raise PuzzleUnsolvedError(msg) |
def find(self, *strings, **kwargs):
"""
Search the entire editor for lines that match the string.
.. code-block:: Python
string = '''word one
word two
three'''
ed = Editor(string)
ed.find('word') # [(0, "word one"), (1, "word two")]
ed.find('word', 'three') # {'word': [...], 'three': [(2, "three")]}
Args:
strings (str): Any number of strings to search for
keys_only (bool): Only return keys
start (int): Optional line to start searching on
stop (int): Optional line to stop searching on
Returns:
results: If multiple strings searched a dictionary of string key, (line number, line) values (else just values)
"""
start = kwargs.pop("start", 0)
stop = kwargs.pop("stop", None)
keys_only = kwargs.pop("keys_only", False)
results = {string: [] for string in strings}
stop = len(self) if stop is None else stop
for i, line in enumerate(self[start:stop]):
for string in strings:
if string in line:
if keys_only:
results[string].append(i)
else:
results[string].append((i, line))
if len(strings) == 1:
return results[strings[0]]
return results | Search the entire editor for lines that match the string.
.. code-block:: Python
string = '''word one
word two
three'''
ed = Editor(string)
ed.find('word') # [(0, "word one"), (1, "word two")]
ed.find('word', 'three') # {'word': [...], 'three': [(2, "three")]}
Args:
strings (str): Any number of strings to search for
keys_only (bool): Only return keys
start (int): Optional line to start searching on
stop (int): Optional line to stop searching on
Returns:
results: If multiple strings searched a dictionary of string key, (line number, line) values (else just values) | Below is the the instruction that describes the task:
### Input:
Search the entire editor for lines that match the string.
.. code-block:: Python
string = '''word one
word two
three'''
ed = Editor(string)
ed.find('word') # [(0, "word one"), (1, "word two")]
ed.find('word', 'three') # {'word': [...], 'three': [(2, "three")]}
Args:
strings (str): Any number of strings to search for
keys_only (bool): Only return keys
start (int): Optional line to start searching on
stop (int): Optional line to stop searching on
Returns:
results: If multiple strings searched a dictionary of string key, (line number, line) values (else just values)
### Response:
def find(self, *strings, **kwargs):
"""
Search the entire editor for lines that match the string.
.. code-block:: Python
string = '''word one
word two
three'''
ed = Editor(string)
ed.find('word') # [(0, "word one"), (1, "word two")]
ed.find('word', 'three') # {'word': [...], 'three': [(2, "three")]}
Args:
strings (str): Any number of strings to search for
keys_only (bool): Only return keys
start (int): Optional line to start searching on
stop (int): Optional line to stop searching on
Returns:
results: If multiple strings searched a dictionary of string key, (line number, line) values (else just values)
"""
start = kwargs.pop("start", 0)
stop = kwargs.pop("stop", None)
keys_only = kwargs.pop("keys_only", False)
results = {string: [] for string in strings}
stop = len(self) if stop is None else stop
for i, line in enumerate(self[start:stop]):
for string in strings:
if string in line:
if keys_only:
results[string].append(i)
else:
results[string].append((i, line))
if len(strings) == 1:
return results[strings[0]]
return results |
def download_from_url(source, destination, progress=False, uncompress=False):
"""Download a file from an URL and place it somewhere. Like wget.
Uses requests and tqdm to display progress if you want.
By default it will uncompress files.
#TODO: handle case where destination is a directory"""
# Modules #
from tqdm import tqdm
import requests
from autopaths.file_path import FilePath
# Check destination exists #
destination = FilePath(destination)
destination.directory.create_if_not_exists()
# Over HTTP #
response = requests.get(source, stream=True)
total_size = int(response.headers.get('content-length'))
block_size = int(total_size/1024)
# Do it #
with open(destination, "wb") as handle:
if progress:
for data in tqdm(response.iter_content(chunk_size=block_size), total=1024): handle.write(data)
else:
for data in response.iter_content(chunk_size=block_size): handle.write(data)
# Uncompress #
if uncompress:
with open(destination) as f: header = f.read(4)
if header == "PK\x03\x04": unzip(destination, inplace=True)
# Add other compression formats here
# Return #
return destination | Download a file from an URL and place it somewhere. Like wget.
Uses requests and tqdm to display progress if you want.
By default it will uncompress files.
#TODO: handle case where destination is a directory | Below is the the instruction that describes the task:
### Input:
Download a file from an URL and place it somewhere. Like wget.
Uses requests and tqdm to display progress if you want.
By default it will uncompress files.
#TODO: handle case where destination is a directory
### Response:
def download_from_url(source, destination, progress=False, uncompress=False):
"""Download a file from an URL and place it somewhere. Like wget.
Uses requests and tqdm to display progress if you want.
By default it will uncompress files.
#TODO: handle case where destination is a directory"""
# Modules #
from tqdm import tqdm
import requests
from autopaths.file_path import FilePath
# Check destination exists #
destination = FilePath(destination)
destination.directory.create_if_not_exists()
# Over HTTP #
response = requests.get(source, stream=True)
total_size = int(response.headers.get('content-length'))
block_size = int(total_size/1024)
# Do it #
with open(destination, "wb") as handle:
if progress:
for data in tqdm(response.iter_content(chunk_size=block_size), total=1024): handle.write(data)
else:
for data in response.iter_content(chunk_size=block_size): handle.write(data)
# Uncompress #
if uncompress:
with open(destination) as f: header = f.read(4)
if header == "PK\x03\x04": unzip(destination, inplace=True)
# Add other compression formats here
# Return #
return destination |
def fill(self, passage=None, xpath=None):
""" Fill the xpath with given informations
:param passage: CapitainsCtsPassage reference
:type passage: CtsReference or list or None. Can be list of None and not None
:param xpath: If set to True, will return the replaced self.xpath value and not the whole self.refsDecl
:type xpath: Boolean
:rtype: basestring
:returns: Xpath to find the passage
.. code-block:: python
citation = XmlCtsCitation(name="line", scope="/TEI/text/body/div/div[@n=\"?\"]",xpath="//l[@n=\"?\"]")
print(citation.fill(["1", None]))
# /TEI/text/body/div/div[@n='1']//l[@n]
print(citation.fill(None))
# /TEI/text/body/div/div[@n]//l[@n]
print(citation.fill(CtsReference("1.1"))
# /TEI/text/body/div/div[@n='1']//l[@n='1']
print(citation.fill("1", xpath=True)
# //l[@n='1']
"""
if xpath is True: # Then passage is a string or None
xpath = self.xpath
replacement = r"\1"
if isinstance(passage, str):
replacement = r"\1\2'" + passage + "'"
return REFERENCE_REPLACER.sub(replacement, xpath)
else:
if isinstance(passage, CtsReference):
passage = passage.start.list
elif passage is None:
return REFERENCE_REPLACER.sub(
r"\1",
self.refsDecl
)
passage = iter(passage)
return REFERENCE_REPLACER.sub(
lambda m: _ref_replacer(m, passage),
self.refsDecl
) | Fill the xpath with given informations
:param passage: CapitainsCtsPassage reference
:type passage: CtsReference or list or None. Can be list of None and not None
:param xpath: If set to True, will return the replaced self.xpath value and not the whole self.refsDecl
:type xpath: Boolean
:rtype: basestring
:returns: Xpath to find the passage
.. code-block:: python
citation = XmlCtsCitation(name="line", scope="/TEI/text/body/div/div[@n=\"?\"]",xpath="//l[@n=\"?\"]")
print(citation.fill(["1", None]))
# /TEI/text/body/div/div[@n='1']//l[@n]
print(citation.fill(None))
# /TEI/text/body/div/div[@n]//l[@n]
print(citation.fill(CtsReference("1.1"))
# /TEI/text/body/div/div[@n='1']//l[@n='1']
print(citation.fill("1", xpath=True)
# //l[@n='1'] | Below is the the instruction that describes the task:
### Input:
Fill the xpath with given informations
:param passage: CapitainsCtsPassage reference
:type passage: CtsReference or list or None. Can be list of None and not None
:param xpath: If set to True, will return the replaced self.xpath value and not the whole self.refsDecl
:type xpath: Boolean
:rtype: basestring
:returns: Xpath to find the passage
.. code-block:: python
citation = XmlCtsCitation(name="line", scope="/TEI/text/body/div/div[@n=\"?\"]",xpath="//l[@n=\"?\"]")
print(citation.fill(["1", None]))
# /TEI/text/body/div/div[@n='1']//l[@n]
print(citation.fill(None))
# /TEI/text/body/div/div[@n]//l[@n]
print(citation.fill(CtsReference("1.1"))
# /TEI/text/body/div/div[@n='1']//l[@n='1']
print(citation.fill("1", xpath=True)
# //l[@n='1']
### Response:
def fill(self, passage=None, xpath=None):
""" Fill the xpath with given informations
:param passage: CapitainsCtsPassage reference
:type passage: CtsReference or list or None. Can be list of None and not None
:param xpath: If set to True, will return the replaced self.xpath value and not the whole self.refsDecl
:type xpath: Boolean
:rtype: basestring
:returns: Xpath to find the passage
.. code-block:: python
citation = XmlCtsCitation(name="line", scope="/TEI/text/body/div/div[@n=\"?\"]",xpath="//l[@n=\"?\"]")
print(citation.fill(["1", None]))
# /TEI/text/body/div/div[@n='1']//l[@n]
print(citation.fill(None))
# /TEI/text/body/div/div[@n]//l[@n]
print(citation.fill(CtsReference("1.1"))
# /TEI/text/body/div/div[@n='1']//l[@n='1']
print(citation.fill("1", xpath=True)
# //l[@n='1']
"""
if xpath is True: # Then passage is a string or None
xpath = self.xpath
replacement = r"\1"
if isinstance(passage, str):
replacement = r"\1\2'" + passage + "'"
return REFERENCE_REPLACER.sub(replacement, xpath)
else:
if isinstance(passage, CtsReference):
passage = passage.start.list
elif passage is None:
return REFERENCE_REPLACER.sub(
r"\1",
self.refsDecl
)
passage = iter(passage)
return REFERENCE_REPLACER.sub(
lambda m: _ref_replacer(m, passage),
self.refsDecl
) |
def find_idx_by_threshold(self, threshold, train=False, valid=False, xval=False):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param float threshold: Threshold value to search for in the threshold list.
:param bool train: If True, return the find idx by threshold value for the training data.
:param bool valid: If True, return the find idx by threshold value for the validation data.
:param bool xval: If True, return the find idx by threshold value for each of the cross-validated splits.
:returns: The find idx by threshold values for the specified key(s).
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
m[k] = None if v is None else v.find_idx_by_threshold(threshold)
return list(m.values())[0] if len(m) == 1 else m | Retrieve the index in this metric's threshold list at which the given threshold is located.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param float threshold: Threshold value to search for in the threshold list.
:param bool train: If True, return the find idx by threshold value for the training data.
:param bool valid: If True, return the find idx by threshold value for the validation data.
:param bool xval: If True, return the find idx by threshold value for each of the cross-validated splits.
:returns: The find idx by threshold values for the specified key(s). | Below is the the instruction that describes the task:
### Input:
Retrieve the index in this metric's threshold list at which the given threshold is located.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param float threshold: Threshold value to search for in the threshold list.
:param bool train: If True, return the find idx by threshold value for the training data.
:param bool valid: If True, return the find idx by threshold value for the validation data.
:param bool xval: If True, return the find idx by threshold value for each of the cross-validated splits.
:returns: The find idx by threshold values for the specified key(s).
### Response:
def find_idx_by_threshold(self, threshold, train=False, valid=False, xval=False):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param float threshold: Threshold value to search for in the threshold list.
:param bool train: If True, return the find idx by threshold value for the training data.
:param bool valid: If True, return the find idx by threshold value for the validation data.
:param bool xval: If True, return the find idx by threshold value for each of the cross-validated splits.
:returns: The find idx by threshold values for the specified key(s).
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
m[k] = None if v is None else v.find_idx_by_threshold(threshold)
return list(m.values())[0] if len(m) == 1 else m |
def cost_min2(self, alpha):
"""Residual formulation, Hessian is a low-rank update of the identity.
"""
n = self.V.dim()
ax = alpha[:n]
ay = alpha[n:]
# ml = pyamg.ruge_stuben_solver(self.L)
# # ml = pyamg.smoothed_aggregation_solver(self.L)
# print(ml)
# print()
# print(self.L)
# print()
# x = ml.solve(ax, tol=1e-10)
# print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x)))
# print()
# print(ax)
# print()
# print(x)
# exit(1)
# x = sparse.linalg.spsolve(self.L, ax)
# print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x)))
# exit(1)
q2, r2 = self.get_q2_r2(ax, ay)
Lax = self.L * ax
Lay = self.L * ay
out = [
0.5 * numpy.dot(Lax, Lax),
0.5 * numpy.dot(Lay, Lay),
0.5 * numpy.dot(q2 - 1, q2 - 1),
0.5 * numpy.dot(r2, r2),
]
if self.num_f_eval % 10000 == 0:
print("{:7d} {:e} {:e} {:e} {:e}".format(self.num_f_eval, *out))
self.num_f_eval += 1
return numpy.sum(out) | Residual formulation, Hessian is a low-rank update of the identity. | Below is the the instruction that describes the task:
### Input:
Residual formulation, Hessian is a low-rank update of the identity.
### Response:
def cost_min2(self, alpha):
"""Residual formulation, Hessian is a low-rank update of the identity.
"""
n = self.V.dim()
ax = alpha[:n]
ay = alpha[n:]
# ml = pyamg.ruge_stuben_solver(self.L)
# # ml = pyamg.smoothed_aggregation_solver(self.L)
# print(ml)
# print()
# print(self.L)
# print()
# x = ml.solve(ax, tol=1e-10)
# print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x)))
# print()
# print(ax)
# print()
# print(x)
# exit(1)
# x = sparse.linalg.spsolve(self.L, ax)
# print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x)))
# exit(1)
q2, r2 = self.get_q2_r2(ax, ay)
Lax = self.L * ax
Lay = self.L * ay
out = [
0.5 * numpy.dot(Lax, Lax),
0.5 * numpy.dot(Lay, Lay),
0.5 * numpy.dot(q2 - 1, q2 - 1),
0.5 * numpy.dot(r2, r2),
]
if self.num_f_eval % 10000 == 0:
print("{:7d} {:e} {:e} {:e} {:e}".format(self.num_f_eval, *out))
self.num_f_eval += 1
return numpy.sum(out) |
def set_embeddings(self, embeddings):
'''
Specifies fixed set of embeddings
:param embeddings: array-like, sparse or dense, shape should be (embedding size, # terms)
:return: EmbeddingsResolver
'''
if self.embeddings_ is not None:
raise Exception("You have already set embeddings by running set_embeddings or set_embeddings_model.")
assert embeddings.shape[1] == self.corpus_.get_num_terms()
self.embeddings_ = embeddings.T
self.vocab_ = self.corpus_.get_terms()
return self | Specifies fixed set of embeddings
:param embeddings: array-like, sparse or dense, shape should be (embedding size, # terms)
:return: EmbeddingsResolver | Below is the the instruction that describes the task:
### Input:
Specifies fixed set of embeddings
:param embeddings: array-like, sparse or dense, shape should be (embedding size, # terms)
:return: EmbeddingsResolver
### Response:
def set_embeddings(self, embeddings):
'''
Specifies fixed set of embeddings
:param embeddings: array-like, sparse or dense, shape should be (embedding size, # terms)
:return: EmbeddingsResolver
'''
if self.embeddings_ is not None:
raise Exception("You have already set embeddings by running set_embeddings or set_embeddings_model.")
assert embeddings.shape[1] == self.corpus_.get_num_terms()
self.embeddings_ = embeddings.T
self.vocab_ = self.corpus_.get_terms()
return self |
def on_quit(self, connection, event):
"""
Someone left the channel - send the nicknames list to the
WebSocket.
"""
nickname = self.get_nickname(event)
nickname_color = self.nicknames[nickname]
del self.nicknames[nickname]
self.namespace.emit("message", nickname, "leaves", nickname_color)
self.emit_nicknames() | Someone left the channel - send the nicknames list to the
WebSocket. | Below is the the instruction that describes the task:
### Input:
Someone left the channel - send the nicknames list to the
WebSocket.
### Response:
def on_quit(self, connection, event):
"""
Someone left the channel - send the nicknames list to the
WebSocket.
"""
nickname = self.get_nickname(event)
nickname_color = self.nicknames[nickname]
del self.nicknames[nickname]
self.namespace.emit("message", nickname, "leaves", nickname_color)
self.emit_nicknames() |
def _open_ok(self, args):
"""
signal that the connection is ready
This method signals to the client that the connection is ready
for use.
PARAMETERS:
known_hosts: shortstr
"""
self.known_hosts = args.read_shortstr()
AMQP_LOGGER.debug('Open OK! known_hosts [%s]' % self.known_hosts)
return None | signal that the connection is ready
This method signals to the client that the connection is ready
for use.
PARAMETERS:
known_hosts: shortstr | Below is the the instruction that describes the task:
### Input:
signal that the connection is ready
This method signals to the client that the connection is ready
for use.
PARAMETERS:
known_hosts: shortstr
### Response:
def _open_ok(self, args):
"""
signal that the connection is ready
This method signals to the client that the connection is ready
for use.
PARAMETERS:
known_hosts: shortstr
"""
self.known_hosts = args.read_shortstr()
AMQP_LOGGER.debug('Open OK! known_hosts [%s]' % self.known_hosts)
return None |
def on_configparser_dumps(self, configparser, config, dictionary, **kwargs):
""" The :mod:`configparser` dumps method.
:param module configparser: The ``configparser`` module
:param class config: The instance's config class
:param dict dictionary: The dictionary instance to serialize
:param str root: The top-level section of the ini file,
defaults to ``config.__name__``, optional
:param str delimiter: The delimiter character used for representing nested
dictionaries, defaults to ":", optional
:return: The ini serialization of the given ``dictionary``
:rtype: str
"""
root_section = kwargs.pop("root")
if not isinstance(root_section, str):
root_section = config.__name__
delimiter = kwargs.pop("delimiter", ":")
if delimiter in root_section:
warnings.warn(
f"root section {root_section!r} contains delimiter character "
f"{delimiter!r}, loading from the resulting content will likely fail"
)
try:
return INIParser.from_dict(
dictionary,
root_section=root_section,
delimiter=kwargs.pop("delimiter", ":"),
empty_sections=kwargs.pop("empty_sections", False),
).to_ini()
except ValueError:
raise ValueError("INI cannot handle this config, try using toml instead") | The :mod:`configparser` dumps method.
:param module configparser: The ``configparser`` module
:param class config: The instance's config class
:param dict dictionary: The dictionary instance to serialize
:param str root: The top-level section of the ini file,
defaults to ``config.__name__``, optional
:param str delimiter: The delimiter character used for representing nested
dictionaries, defaults to ":", optional
:return: The ini serialization of the given ``dictionary``
:rtype: str | Below is the the instruction that describes the task:
### Input:
The :mod:`configparser` dumps method.
:param module configparser: The ``configparser`` module
:param class config: The instance's config class
:param dict dictionary: The dictionary instance to serialize
:param str root: The top-level section of the ini file,
defaults to ``config.__name__``, optional
:param str delimiter: The delimiter character used for representing nested
dictionaries, defaults to ":", optional
:return: The ini serialization of the given ``dictionary``
:rtype: str
### Response:
def on_configparser_dumps(self, configparser, config, dictionary, **kwargs):
""" The :mod:`configparser` dumps method.
:param module configparser: The ``configparser`` module
:param class config: The instance's config class
:param dict dictionary: The dictionary instance to serialize
:param str root: The top-level section of the ini file,
defaults to ``config.__name__``, optional
:param str delimiter: The delimiter character used for representing nested
dictionaries, defaults to ":", optional
:return: The ini serialization of the given ``dictionary``
:rtype: str
"""
root_section = kwargs.pop("root")
if not isinstance(root_section, str):
root_section = config.__name__
delimiter = kwargs.pop("delimiter", ":")
if delimiter in root_section:
warnings.warn(
f"root section {root_section!r} contains delimiter character "
f"{delimiter!r}, loading from the resulting content will likely fail"
)
try:
return INIParser.from_dict(
dictionary,
root_section=root_section,
delimiter=kwargs.pop("delimiter", ":"),
empty_sections=kwargs.pop("empty_sections", False),
).to_ini()
except ValueError:
raise ValueError("INI cannot handle this config, try using toml instead") |
def get_author_and_version(package):
"""
Return package author and version as listed in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
author = re.search("__author__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
return author, version | Return package author and version as listed in `init.py`. | Below is the the instruction that describes the task:
### Input:
Return package author and version as listed in `init.py`.
### Response:
def get_author_and_version(package):
"""
Return package author and version as listed in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
author = re.search("__author__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
return author, version |
def run_instance_jmap(cluster, environ, topology, instance, role=None):
'''
:param cluster:
:param environ:
:param topology:
:param instance:
:param role:
:return:
'''
params = dict(
cluster=cluster,
environ=environ,
topology=topology,
instance=instance)
if role is not None:
params['role'] = role
request_url = tornado.httputil.url_concat(
create_url(JMAP_URL_FMT), params)
if role is not None:
request_url = tornado.httputil.url_concat(request_url, dict(role=role))
raise tornado.gen.Return((yield fetch_url_as_json(request_url))) | :param cluster:
:param environ:
:param topology:
:param instance:
:param role:
:return: | Below is the the instruction that describes the task:
### Input:
:param cluster:
:param environ:
:param topology:
:param instance:
:param role:
:return:
### Response:
def run_instance_jmap(cluster, environ, topology, instance, role=None):
'''
:param cluster:
:param environ:
:param topology:
:param instance:
:param role:
:return:
'''
params = dict(
cluster=cluster,
environ=environ,
topology=topology,
instance=instance)
if role is not None:
params['role'] = role
request_url = tornado.httputil.url_concat(
create_url(JMAP_URL_FMT), params)
if role is not None:
request_url = tornado.httputil.url_concat(request_url, dict(role=role))
raise tornado.gen.Return((yield fetch_url_as_json(request_url))) |
def pgettext(self, context, string, domain=None, **variables):
"""Like :meth:`gettext` but with a context."""
t = self.get_translations(domain)
return t.upgettext(context, string) % variables | Like :meth:`gettext` but with a context. | Below is the the instruction that describes the task:
### Input:
Like :meth:`gettext` but with a context.
### Response:
def pgettext(self, context, string, domain=None, **variables):
"""Like :meth:`gettext` but with a context."""
t = self.get_translations(domain)
return t.upgettext(context, string) % variables |
def cd(cls, directory):
"""Change directory. It behaves like "cd directory"."""
Log.debug('CMD: cd {0}'.format(directory))
os.chdir(directory) | Change directory. It behaves like "cd directory". | Below is the the instruction that describes the task:
### Input:
Change directory. It behaves like "cd directory".
### Response:
def cd(cls, directory):
"""Change directory. It behaves like "cd directory"."""
Log.debug('CMD: cd {0}'.format(directory))
os.chdir(directory) |
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super().write(obj, **kwargs)
for name, ss in obj.items():
key = 'sparse_series_{name}'.format(name=name)
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns) | write it as a collection of individual sparse series | Below is the the instruction that describes the task:
### Input:
write it as a collection of individual sparse series
### Response:
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super().write(obj, **kwargs)
for name, ss in obj.items():
key = 'sparse_series_{name}'.format(name=name)
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns) |
def _ggplot(df, out_file):
"""Plot faceted items with ggplot wrapper on top of matplotlib.
XXX Not yet functional
"""
import ggplot as gg
df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]]
df["category"] = [cat_labels[x] for x in df["category"]]
df["caller"] = [caller_labels.get(x, None) for x in df["caller"]]
p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar()
+ gg.facet_wrap("variant.type", "category")
+ gg.theme_seaborn())
gg.ggsave(p, out_file) | Plot faceted items with ggplot wrapper on top of matplotlib.
XXX Not yet functional | Below is the the instruction that describes the task:
### Input:
Plot faceted items with ggplot wrapper on top of matplotlib.
XXX Not yet functional
### Response:
def _ggplot(df, out_file):
"""Plot faceted items with ggplot wrapper on top of matplotlib.
XXX Not yet functional
"""
import ggplot as gg
df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]]
df["category"] = [cat_labels[x] for x in df["category"]]
df["caller"] = [caller_labels.get(x, None) for x in df["caller"]]
p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar()
+ gg.facet_wrap("variant.type", "category")
+ gg.theme_seaborn())
gg.ggsave(p, out_file) |
def init_class(self, class_, step_func=None):
"""
This method simulates the loading of a class by the JVM, during which
parts of the class (e.g. static fields) are initialized. For this, we
run the class initializer method <clinit> (if available) and update
the state accordingly.
Note: Initialization is skipped, if the class has already been
initialized (or if it's not loaded in CLE).
"""
if self.is_class_initialized(class_):
l.debug("Class %r already initialized.", class_)
return
l.debug("Initialize class %r.", class_)
self.initialized_classes.add(class_)
if not class_.is_loaded:
l.warning("Class %r is not loaded in CLE. Skip initializiation.", class_)
return
clinit_method = resolve_method(self.state, '<clinit>', class_.name,
include_superclasses=False, init_class=False)
if clinit_method.is_loaded:
javavm_simos = self.state.project.simos
clinit_state = javavm_simos.state_call(addr=SootAddressDescriptor(clinit_method, 0, 0),
base_state=self.state,
ret_addr=SootAddressTerminator())
simgr = self.state.project.factory.simgr(clinit_state)
l.info(">"*15 + " Run class initializer %r ... " + ">"*15, clinit_method)
simgr.run(step_func=step_func)
l.debug("<"*15 + " Run class initializer %r ... done " + "<"*15, clinit_method)
# The only thing that can be updated during initialization are
# static or rather global information, which are either stored on
# the heap or in the vm_static_table
self.state.memory.vm_static_table = simgr.deadended[-1].memory.vm_static_table.copy()
self.state.memory.heap = simgr.deadended[-1].memory.heap.copy()
else:
l.debug("Class initializer <clinit> is not loaded in CLE. Skip initializiation.") | This method simulates the loading of a class by the JVM, during which
parts of the class (e.g. static fields) are initialized. For this, we
run the class initializer method <clinit> (if available) and update
the state accordingly.
Note: Initialization is skipped, if the class has already been
initialized (or if it's not loaded in CLE). | Below is the the instruction that describes the task:
### Input:
This method simulates the loading of a class by the JVM, during which
parts of the class (e.g. static fields) are initialized. For this, we
run the class initializer method <clinit> (if available) and update
the state accordingly.
Note: Initialization is skipped, if the class has already been
initialized (or if it's not loaded in CLE).
### Response:
def init_class(self, class_, step_func=None):
"""
This method simulates the loading of a class by the JVM, during which
parts of the class (e.g. static fields) are initialized. For this, we
run the class initializer method <clinit> (if available) and update
the state accordingly.
Note: Initialization is skipped, if the class has already been
initialized (or if it's not loaded in CLE).
"""
if self.is_class_initialized(class_):
l.debug("Class %r already initialized.", class_)
return
l.debug("Initialize class %r.", class_)
self.initialized_classes.add(class_)
if not class_.is_loaded:
l.warning("Class %r is not loaded in CLE. Skip initializiation.", class_)
return
clinit_method = resolve_method(self.state, '<clinit>', class_.name,
include_superclasses=False, init_class=False)
if clinit_method.is_loaded:
javavm_simos = self.state.project.simos
clinit_state = javavm_simos.state_call(addr=SootAddressDescriptor(clinit_method, 0, 0),
base_state=self.state,
ret_addr=SootAddressTerminator())
simgr = self.state.project.factory.simgr(clinit_state)
l.info(">"*15 + " Run class initializer %r ... " + ">"*15, clinit_method)
simgr.run(step_func=step_func)
l.debug("<"*15 + " Run class initializer %r ... done " + "<"*15, clinit_method)
# The only thing that can be updated during initialization are
# static or rather global information, which are either stored on
# the heap or in the vm_static_table
self.state.memory.vm_static_table = simgr.deadended[-1].memory.vm_static_table.copy()
self.state.memory.heap = simgr.deadended[-1].memory.heap.copy()
else:
l.debug("Class initializer <clinit> is not loaded in CLE. Skip initializiation.") |
def seed_zoom(seeds, zoom):
"""
Smart zoom for sparse matrix. If there is resize to bigger resolution
thin line of label could be lost. This function prefers labels larger
then zero. If there is only one small voxel in larger volume with zeros
it is selected.
"""
# import scipy
# loseeds=seeds
labels = np.unique(seeds)
# remove first label - 0
labels = np.delete(labels, 0)
# @TODO smart interpolation for seeds in one block
# loseeds = scipy.ndimage.interpolation.zoom(
# seeds, zoom, order=0)
loshape = np.ceil(np.array(seeds.shape) * 1.0 / zoom).astype(np.int)
loseeds = np.zeros(loshape, dtype=np.int8)
loseeds = loseeds.astype(np.int8)
for label in labels:
a, b, c = np.where(seeds == label)
loa = np.round(a // zoom)
lob = np.round(b // zoom)
loc = np.round(c // zoom)
# loseeds = np.zeros(loshape)
loseeds[loa, lob, loc] += label
# this is to detect conflict seeds
loseeds[loseeds > label] = 100
# remove conflict seeds
loseeds[loseeds > 99] = 0
# import py3DSeedEditor
# ped = py3DSeedEditor.py3DSeedEditor(loseeds)
# ped.show()
return loseeds | Smart zoom for sparse matrix. If there is resize to bigger resolution
thin line of label could be lost. This function prefers labels larger
then zero. If there is only one small voxel in larger volume with zeros
it is selected. | Below is the the instruction that describes the task:
### Input:
Smart zoom for sparse matrix. If there is resize to bigger resolution
thin line of label could be lost. This function prefers labels larger
then zero. If there is only one small voxel in larger volume with zeros
it is selected.
### Response:
def seed_zoom(seeds, zoom):
"""
Smart zoom for sparse matrix. If there is resize to bigger resolution
thin line of label could be lost. This function prefers labels larger
then zero. If there is only one small voxel in larger volume with zeros
it is selected.
"""
# import scipy
# loseeds=seeds
labels = np.unique(seeds)
# remove first label - 0
labels = np.delete(labels, 0)
# @TODO smart interpolation for seeds in one block
# loseeds = scipy.ndimage.interpolation.zoom(
# seeds, zoom, order=0)
loshape = np.ceil(np.array(seeds.shape) * 1.0 / zoom).astype(np.int)
loseeds = np.zeros(loshape, dtype=np.int8)
loseeds = loseeds.astype(np.int8)
for label in labels:
a, b, c = np.where(seeds == label)
loa = np.round(a // zoom)
lob = np.round(b // zoom)
loc = np.round(c // zoom)
# loseeds = np.zeros(loshape)
loseeds[loa, lob, loc] += label
# this is to detect conflict seeds
loseeds[loseeds > label] = 100
# remove conflict seeds
loseeds[loseeds > 99] = 0
# import py3DSeedEditor
# ped = py3DSeedEditor.py3DSeedEditor(loseeds)
# ped.show()
return loseeds |
def _match_nodes(self, validators, obj):
"""Apply each validator in validators to each node in obj.
Return each node in obj which matches all validators.
"""
results = []
for node in object_iter(obj):
if all([validate(node) for validate in validators]):
results.append(node)
return results | Apply each validator in validators to each node in obj.
Return each node in obj which matches all validators. | Below is the the instruction that describes the task:
### Input:
Apply each validator in validators to each node in obj.
Return each node in obj which matches all validators.
### Response:
def _match_nodes(self, validators, obj):
"""Apply each validator in validators to each node in obj.
Return each node in obj which matches all validators.
"""
results = []
for node in object_iter(obj):
if all([validate(node) for validate in validators]):
results.append(node)
return results |
def match_string(self, stype):
"""Match string type."""
return not (stype - self.string_types) or bool(stype & self.wild_string_types) | Match string type. | Below is the the instruction that describes the task:
### Input:
Match string type.
### Response:
def match_string(self, stype):
"""Match string type."""
return not (stype - self.string_types) or bool(stype & self.wild_string_types) |
def page(self, enabled=values.unset, date_created_after=values.unset,
date_created_before=values.unset, friendly_name=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CompositionHookInstance records from the API.
Request is executed immediately
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CompositionHookInstance
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage
"""
params = values.of({
'Enabled': enabled,
'DateCreatedAfter': serialize.iso8601_datetime(date_created_after),
'DateCreatedBefore': serialize.iso8601_datetime(date_created_before),
'FriendlyName': friendly_name,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return CompositionHookPage(self._version, response, self._solution) | Retrieve a single page of CompositionHookInstance records from the API.
Request is executed immediately
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CompositionHookInstance
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage | Below is the the instruction that describes the task:
### Input:
Retrieve a single page of CompositionHookInstance records from the API.
Request is executed immediately
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CompositionHookInstance
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage
### Response:
def page(self, enabled=values.unset, date_created_after=values.unset,
date_created_before=values.unset, friendly_name=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CompositionHookInstance records from the API.
Request is executed immediately
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CompositionHookInstance
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage
"""
params = values.of({
'Enabled': enabled,
'DateCreatedAfter': serialize.iso8601_datetime(date_created_after),
'DateCreatedBefore': serialize.iso8601_datetime(date_created_before),
'FriendlyName': friendly_name,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return CompositionHookPage(self._version, response, self._solution) |
def publish(self, user=None, when=None):
"""
Publishes a item and any sub items.
A new transaction will be started if
we aren't already in a transaction.
Should only be run on draft items
"""
assert self.state == self.DRAFT
user_published = 'code'
if user:
user_published = user.username
now = timezone.now()
with xact():
# If this item hasn't got live yet and no new date was specified
# delete the old scheduled items and schedule this one on that date
published = False
if getattr(self._meta, '_is_view', False):
published = self.is_published
else:
published = self.object.is_published
if not when and not published and self.last_scheduled:
klass = self.get_version_class()
for obj in klass.normal.filter(object_id=self.object_id,
last_scheduled=self.last_scheduled,
state=self.SCHEDULED):
when = self.date_published
obj.delete()
when = when or now
# Drafts get preserved so save the
# time we last cloned this
if self.state == self.DRAFT:
self.last_scheduled = now
self.date_published = when
self.save(last_save=now)
self._clone()
self.user_published = user_published
self.state = self.SCHEDULED
self.save()
self.schedule(when=when) | Publishes a item and any sub items.
A new transaction will be started if
we aren't already in a transaction.
Should only be run on draft items | Below is the the instruction that describes the task:
### Input:
Publishes a item and any sub items.
A new transaction will be started if
we aren't already in a transaction.
Should only be run on draft items
### Response:
def publish(self, user=None, when=None):
"""
Publishes a item and any sub items.
A new transaction will be started if
we aren't already in a transaction.
Should only be run on draft items
"""
assert self.state == self.DRAFT
user_published = 'code'
if user:
user_published = user.username
now = timezone.now()
with xact():
# If this item hasn't got live yet and no new date was specified
# delete the old scheduled items and schedule this one on that date
published = False
if getattr(self._meta, '_is_view', False):
published = self.is_published
else:
published = self.object.is_published
if not when and not published and self.last_scheduled:
klass = self.get_version_class()
for obj in klass.normal.filter(object_id=self.object_id,
last_scheduled=self.last_scheduled,
state=self.SCHEDULED):
when = self.date_published
obj.delete()
when = when or now
# Drafts get preserved so save the
# time we last cloned this
if self.state == self.DRAFT:
self.last_scheduled = now
self.date_published = when
self.save(last_save=now)
self._clone()
self.user_published = user_published
self.state = self.SCHEDULED
self.save()
self.schedule(when=when) |
def eintr_retry_zmq(f, *args, **kwargs):
"""The specialization of :func:`eintr_retry` by :exc:`zmq.ZMQError`."""
return eintr_retry(zmq.ZMQError, f, *args, **kwargs) | The specialization of :func:`eintr_retry` by :exc:`zmq.ZMQError`. | Below is the the instruction that describes the task:
### Input:
The specialization of :func:`eintr_retry` by :exc:`zmq.ZMQError`.
### Response:
def eintr_retry_zmq(f, *args, **kwargs):
"""The specialization of :func:`eintr_retry` by :exc:`zmq.ZMQError`."""
return eintr_retry(zmq.ZMQError, f, *args, **kwargs) |
def _resolve_dotted_name(dotted_name):
"""Returns objects from strings
Deals e.g. with 'torch.nn.Softmax(dim=-1)'.
Modified from palladium:
https://github.com/ottogroup/palladium/blob/8a066a9a7690557d9b1b6ed54b7d1a1502ba59e3/palladium/util.py
with added support for instantiated objects.
"""
if not isinstance(dotted_name, str):
return dotted_name
if '.' not in dotted_name:
return dotted_name
args = None
params = None
match = P_PARAMS.match(dotted_name)
if match:
dotted_name = match.group('name')
params = match.group('params')
module, name = dotted_name.rsplit('.', 1)
attr = import_module(module)
attr = getattr(attr, name)
if params:
args, kwargs = _parse_args_kwargs(params[1:-1])
attr = attr(*args, **kwargs)
return attr | Returns objects from strings
Deals e.g. with 'torch.nn.Softmax(dim=-1)'.
Modified from palladium:
https://github.com/ottogroup/palladium/blob/8a066a9a7690557d9b1b6ed54b7d1a1502ba59e3/palladium/util.py
with added support for instantiated objects. | Below is the the instruction that describes the task:
### Input:
Returns objects from strings
Deals e.g. with 'torch.nn.Softmax(dim=-1)'.
Modified from palladium:
https://github.com/ottogroup/palladium/blob/8a066a9a7690557d9b1b6ed54b7d1a1502ba59e3/palladium/util.py
with added support for instantiated objects.
### Response:
def _resolve_dotted_name(dotted_name):
"""Returns objects from strings
Deals e.g. with 'torch.nn.Softmax(dim=-1)'.
Modified from palladium:
https://github.com/ottogroup/palladium/blob/8a066a9a7690557d9b1b6ed54b7d1a1502ba59e3/palladium/util.py
with added support for instantiated objects.
"""
if not isinstance(dotted_name, str):
return dotted_name
if '.' not in dotted_name:
return dotted_name
args = None
params = None
match = P_PARAMS.match(dotted_name)
if match:
dotted_name = match.group('name')
params = match.group('params')
module, name = dotted_name.rsplit('.', 1)
attr = import_module(module)
attr = getattr(attr, name)
if params:
args, kwargs = _parse_args_kwargs(params[1:-1])
attr = attr(*args, **kwargs)
return attr |
def display_message(pymux, variables):
" Display a message. "
message = variables['<message>']
client_state = pymux.get_client_state()
client_state.message = message | Display a message. | Below is the the instruction that describes the task:
### Input:
Display a message.
### Response:
def display_message(pymux, variables):
" Display a message. "
message = variables['<message>']
client_state = pymux.get_client_state()
client_state.message = message |
def clear_tc(self, owner, data, clear_type):
"""Delete threat intel from ThreatConnect platform.
Args:
owner (str): The ThreatConnect owner.
data (dict): The data for the threat intel to clear.
clear_type (str): The type of clear action.
"""
batch = self.tcex.batch(owner, action='Delete')
tc_type = data.get('type')
path = data.get('path')
if tc_type in self.tcex.group_types:
name = self.tcex.playbook.read(data.get('name'))
name = self.path_data(name, path)
if name is not None:
print(
'Deleting ThreatConnect Group: {}{}{}'.format(
c.Style.BRIGHT, c.Fore.MAGENTA, name
)
)
self.log.info(
'[{}] Deleting ThreatConnect {} with name: {}.'.format(
clear_type, tc_type, name
)
)
batch.group(tc_type, name)
elif tc_type in self.tcex.indicator_types:
if data.get('summary') is not None:
summary = self.tcex.playbook.read(data.get('summary'))
else:
resource = self.tcex.resource(tc_type)
summary = resource.summary(data)
summary = self.path_data(summary, path)
if summary is not None:
print(
'Deleting ThreatConnect Indicator: {}{}{}'.format(
c.Style.BRIGHT, c.Fore.MAGENTA, summary
)
)
self.log.info(
'[{}] Deleting ThreatConnect {} with value: {}.'.format(
clear_type, tc_type, summary
)
)
batch.indicator(tc_type, summary)
batch_results = batch.submit()
self.log.debug('[{}] Batch Results: {}'.format(clear_type, batch_results))
for error in batch_results.get('errors') or []:
self.log.error('[{}] Batch Error: {}'.format(clear_type, error)) | Delete threat intel from ThreatConnect platform.
Args:
owner (str): The ThreatConnect owner.
data (dict): The data for the threat intel to clear.
clear_type (str): The type of clear action. | Below is the the instruction that describes the task:
### Input:
Delete threat intel from ThreatConnect platform.
Args:
owner (str): The ThreatConnect owner.
data (dict): The data for the threat intel to clear.
clear_type (str): The type of clear action.
### Response:
def clear_tc(self, owner, data, clear_type):
"""Delete threat intel from ThreatConnect platform.
Args:
owner (str): The ThreatConnect owner.
data (dict): The data for the threat intel to clear.
clear_type (str): The type of clear action.
"""
batch = self.tcex.batch(owner, action='Delete')
tc_type = data.get('type')
path = data.get('path')
if tc_type in self.tcex.group_types:
name = self.tcex.playbook.read(data.get('name'))
name = self.path_data(name, path)
if name is not None:
print(
'Deleting ThreatConnect Group: {}{}{}'.format(
c.Style.BRIGHT, c.Fore.MAGENTA, name
)
)
self.log.info(
'[{}] Deleting ThreatConnect {} with name: {}.'.format(
clear_type, tc_type, name
)
)
batch.group(tc_type, name)
elif tc_type in self.tcex.indicator_types:
if data.get('summary') is not None:
summary = self.tcex.playbook.read(data.get('summary'))
else:
resource = self.tcex.resource(tc_type)
summary = resource.summary(data)
summary = self.path_data(summary, path)
if summary is not None:
print(
'Deleting ThreatConnect Indicator: {}{}{}'.format(
c.Style.BRIGHT, c.Fore.MAGENTA, summary
)
)
self.log.info(
'[{}] Deleting ThreatConnect {} with value: {}.'.format(
clear_type, tc_type, summary
)
)
batch.indicator(tc_type, summary)
batch_results = batch.submit()
self.log.debug('[{}] Batch Results: {}'.format(clear_type, batch_results))
for error in batch_results.get('errors') or []:
self.log.error('[{}] Batch Error: {}'.format(clear_type, error)) |
def sample_stats_to_xarray(self):
"""Extract sample_stats from tfp trace."""
if self.model_fn is None or self.observed is None:
return None
log_likelihood = []
sample_size = self.posterior[0].shape[0]
for i in range(sample_size):
variables = {}
for var_i, var_name in enumerate(self.var_names):
variables[var_name] = self.posterior[var_i][i]
with self.ed.interception(self._value_setter(variables)):
log_likelihood.append((self.model_fn().distribution.log_prob(self.observed)))
data = {}
if self.dims is not None:
coord_name = self.dims.get("obs")
else:
coord_name = None
dims = {"log_likelihood": coord_name}
with self.tf.Session() as sess:
data["log_likelihood"] = np.expand_dims(
sess.run(log_likelihood, feed_dict=self.feed_dict), axis=0
)
return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=dims) | Extract sample_stats from tfp trace. | Below is the the instruction that describes the task:
### Input:
Extract sample_stats from tfp trace.
### Response:
def sample_stats_to_xarray(self):
"""Extract sample_stats from tfp trace."""
if self.model_fn is None or self.observed is None:
return None
log_likelihood = []
sample_size = self.posterior[0].shape[0]
for i in range(sample_size):
variables = {}
for var_i, var_name in enumerate(self.var_names):
variables[var_name] = self.posterior[var_i][i]
with self.ed.interception(self._value_setter(variables)):
log_likelihood.append((self.model_fn().distribution.log_prob(self.observed)))
data = {}
if self.dims is not None:
coord_name = self.dims.get("obs")
else:
coord_name = None
dims = {"log_likelihood": coord_name}
with self.tf.Session() as sess:
data["log_likelihood"] = np.expand_dims(
sess.run(log_likelihood, feed_dict=self.feed_dict), axis=0
)
return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=dims) |
def available_modes(self):
"""Return list of available mode names."""
if not self._available_modes:
modes = self.available_modes_with_ids
if not modes:
return None
self._available_modes = list(modes.keys())
return self._available_modes | Return list of available mode names. | Below is the the instruction that describes the task:
### Input:
Return list of available mode names.
### Response:
def available_modes(self):
"""Return list of available mode names."""
if not self._available_modes:
modes = self.available_modes_with_ids
if not modes:
return None
self._available_modes = list(modes.keys())
return self._available_modes |
def get_byte_array(integer):
"""Return the variable length bytes corresponding to the given int"""
# Operate in big endian (unlike most of Telegram API) since:
# > "...pq is a representation of a natural number
# (in binary *big endian* format)..."
# > "...current value of dh_prime equals
# (in *big-endian* byte order)..."
# Reference: https://core.telegram.org/mtproto/auth_key
return int.to_bytes(
integer,
(integer.bit_length() + 8 - 1) // 8, # 8 bits per byte,
byteorder='big',
signed=False
) | Return the variable length bytes corresponding to the given int | Below is the the instruction that describes the task:
### Input:
Return the variable length bytes corresponding to the given int
### Response:
def get_byte_array(integer):
"""Return the variable length bytes corresponding to the given int"""
# Operate in big endian (unlike most of Telegram API) since:
# > "...pq is a representation of a natural number
# (in binary *big endian* format)..."
# > "...current value of dh_prime equals
# (in *big-endian* byte order)..."
# Reference: https://core.telegram.org/mtproto/auth_key
return int.to_bytes(
integer,
(integer.bit_length() + 8 - 1) // 8, # 8 bits per byte,
byteorder='big',
signed=False
) |
def write_var(self, var_spec, var_attrs=None, var_data=None):
'''
Writes a variable, along with variable attributes and data.
Parameters
----------
var_spec : dict
The specifications of the variable.
The required/optional keys for creating a variable:
Required keys:
- ['Variable']: The name of the variable
- ['Data_Type']: the CDF data type
- ['Num_Elements']: The number of elements. Always 1 the
for numeric type. The char length for string type.
- ['Rec_Vary']: Record variance
For zVariables:
- ['Dims_Sizes']: The dimensional sizes for zVariables only.
Use [] for 0-dimension. Each and
every dimension is varying for zVariables.
For rVariables:
- ['Dim_Vary']: The dimensional variances for rVariables only.
Optional keys:
- ['Var_Type']: Whether the variable is a zVariable or
rVariable. Valid values: "zVariable" and
"rVariable". The default is "zVariable".
- ['Sparse']: Whether the variable has sparse records.
Valid values are "no_sparse", "pad_sparse",
and "prev_sparse". The default is 'no_sparse'.
- ['Compress']: Set the gzip compression level (0 to 9), 0 for
no compression. The default is to compress
with level 6 (done only if the compressed
data is less than the uncompressed data).
- ['Block_Factor']: The blocking factor, the number of
records in a chunk when the variable is compressed.
- ['Pad']: The padded value (in bytes, numpy.ndarray or string)
var_attrs : dict
{attribute:value} pairs.
The attribute is the name of a variable attribute.
The value can have its data type specified for the
numeric data. If not, based on Python's type, a
corresponding CDF type is assumed: CDF_INT4 for int,
CDF_DOUBLE for float, CDF_EPOCH16 for complex and
and CDF_INT8 for long.
For example, the following defined attributes will
have the same types in the CDF::
var_attrs= { 'attr1': 'value1',
'attr2': 12.45,
'attr3': [3,4,5],
.....
}
With data type (in the list form)::
var_attrs= { 'attr1': 'value1',
'attr2': [12.45, 'CDF_DOUBLE'],
'attr3': [[3,4,5], 'CDF_INT4'],
.....
}
var_data :
The data for the variable. If the variable is
a regular variable without sparse records, it must
be in a single structure of bytes, or numpy.ndarray
for numeric variable, or str or list of strs for
string variable.
If the variable has sparse records, var_data should
be presented in a list/tuple with two elements,
the first being a list/tuple that contains the
physical record number(s), the second being the variable
data in bytes, numpy.ndarray, or a list of strings. Variable
data can have just physical records' data (with the same
number of records as the first element) or have data from both
physical records and virtual records (which with filled data).
The var_data has the form::
[[rec_#1,rec_#2,rec_#3,...],
[data_#1,data_#2,data_#3,...]]
See the sample for its setup.
'''
if not isinstance(var_spec, dict):
raise TypeError('Variable should be in dictionary form.')
# Get variable info from var_spec
try:
dataType = int(var_spec['Data_Type'])
numElems = int(var_spec['Num_Elements'])
name = var_spec['Variable']
recVary = var_spec['Rec_Vary']
except Exception:
raise ValueError('Missing/invalid required spec for creating variable.')
# Get whether or not it is a z variable
var_type = var_spec.setdefault('Var_Type', 'zvariable')
if (var_type.lower() == 'zvariable'):
zVar = True
else:
var_spec['Var_Type'] = 'rVariable'
zVar = False
if (dataType == CDF.CDF_CHAR or dataType == CDF.CDF_UCHAR):
if numElems < 1:
raise ValueError('Invalid Num_Elements for string data type variable')
else:
if numElems != 1:
raise ValueError('Invalid Num_Elements for numeric data type variable')
# If its a z variable, get the dimension info
# Otherwise, use r variable info
if zVar:
try:
dimSizes = var_spec['Dim_Sizes']
numDims = len(dimSizes)
dimVary = []
for _ in range(0, numDims):
dimVary.append(True)
except Exception:
raise ValueError('Missing/invalid required spec for creating variable.')
else:
dimSizes = self.rdim_sizes
numDims = self.num_rdim
try:
dimVary = var_spec['Dim_Vary']
if (len(dimVary) != numDims):
raise ValueError('Invalid Dim_Vary size for the rVariable.')
except Exception:
raise ValueError('Missing/invalid required spec for Dim_Vary for rVariable')
# Get Sparseness info
sparse = CDF._sparse_token(var_spec.get('Sparse', 'no_sparse'))
# Get compression info
compression = var_spec.get('Compress', 6)
if (isinstance(compression, int)):
if not 0 <= compression <= 9:
compression = 0
else:
compression = 6 if compression else 0
# Get blocking factor
blockingfactor = int(var_spec.get('Block_Factor', 1))
# Get pad value
pad = var_spec.get('Pad', None)
if (isinstance(pad, list) or isinstance(pad, tuple)):
pad = pad[0]
if (name in self.zvars or name in self.rvars):
raise ValueError('{} already exists'.format(name))
with self.path.open('rb+') as f:
f.seek(0, 2) # EOF (appending)
varNum, offset = self._write_vdr(f, dataType, numElems, numDims,
dimSizes, name, dimVary, recVary,
sparse, blockingfactor, compression,
pad, zVar)
# Update the GDR pointers if needed
if zVar:
if len(self.zvars) == 1:
# GDR's zVDRhead
self._update_offset_value(f, self.gdr_head+20, 8, offset)
else:
if len(self.rvars) == 1:
# GDR's rVDRhead
self._update_offset_value(f, self.gdr_head+12, 8, offset)
# Write the variable attributes
if var_attrs is not None:
self._write_var_attrs(f, varNum, var_attrs, zVar)
# Write the actual data to the file
if not (var_data is None):
if (sparse == 0):
varMaxRec = self._write_var_data_nonsparse(f, zVar, varNum,
dataType, numElems,
recVary, compression,
blockingfactor,
var_data)
else:
notsupport = False
if not isinstance(var_data, (list, tuple)):
notsupport = True
if notsupport or len(var_data) != 2:
print('Sparse record #s and data are not of list/tuple form:')
print(' [ [rec_#1, rec_#2, rec_#3, ],')
print(' [data_#1, data_#2, data_#3, ....] ]')
return
# Format data into: [[recstart1, recend1, data1],
# [recstart2,recend2,data2], ...]
var_data = self._make_sparse_blocks(var_spec, var_data[0],
var_data[1])
for block in var_data:
varMaxRec = self._write_var_data_sparse(f, zVar, varNum,
dataType, numElems,
recVary, block)
# Update GDR MaxRec if writing an r variable
if not zVar:
# GDR's rMaxRec
f.seek(self.gdr_head+52)
maxRec = int.from_bytes(f.read(4), 'big', signed=True)
if (maxRec < varMaxRec):
self._update_offset_value(f, self.gdr_head+52, 4, varMaxRec) | Writes a variable, along with variable attributes and data.
Parameters
----------
var_spec : dict
The specifications of the variable.
The required/optional keys for creating a variable:
Required keys:
- ['Variable']: The name of the variable
- ['Data_Type']: the CDF data type
- ['Num_Elements']: The number of elements. Always 1 the
for numeric type. The char length for string type.
- ['Rec_Vary']: Record variance
For zVariables:
- ['Dims_Sizes']: The dimensional sizes for zVariables only.
Use [] for 0-dimension. Each and
every dimension is varying for zVariables.
For rVariables:
- ['Dim_Vary']: The dimensional variances for rVariables only.
Optional keys:
- ['Var_Type']: Whether the variable is a zVariable or
rVariable. Valid values: "zVariable" and
"rVariable". The default is "zVariable".
- ['Sparse']: Whether the variable has sparse records.
Valid values are "no_sparse", "pad_sparse",
and "prev_sparse". The default is 'no_sparse'.
- ['Compress']: Set the gzip compression level (0 to 9), 0 for
no compression. The default is to compress
with level 6 (done only if the compressed
data is less than the uncompressed data).
- ['Block_Factor']: The blocking factor, the number of
records in a chunk when the variable is compressed.
- ['Pad']: The padded value (in bytes, numpy.ndarray or string)
var_attrs : dict
{attribute:value} pairs.
The attribute is the name of a variable attribute.
The value can have its data type specified for the
numeric data. If not, based on Python's type, a
corresponding CDF type is assumed: CDF_INT4 for int,
CDF_DOUBLE for float, CDF_EPOCH16 for complex and
and CDF_INT8 for long.
For example, the following defined attributes will
have the same types in the CDF::
var_attrs= { 'attr1': 'value1',
'attr2': 12.45,
'attr3': [3,4,5],
.....
}
With data type (in the list form)::
var_attrs= { 'attr1': 'value1',
'attr2': [12.45, 'CDF_DOUBLE'],
'attr3': [[3,4,5], 'CDF_INT4'],
.....
}
var_data :
The data for the variable. If the variable is
a regular variable without sparse records, it must
be in a single structure of bytes, or numpy.ndarray
for numeric variable, or str or list of strs for
string variable.
If the variable has sparse records, var_data should
be presented in a list/tuple with two elements,
the first being a list/tuple that contains the
physical record number(s), the second being the variable
data in bytes, numpy.ndarray, or a list of strings. Variable
data can have just physical records' data (with the same
number of records as the first element) or have data from both
physical records and virtual records (which with filled data).
The var_data has the form::
[[rec_#1,rec_#2,rec_#3,...],
[data_#1,data_#2,data_#3,...]]
See the sample for its setup. | Below is the the instruction that describes the task:
### Input:
Writes a variable, along with variable attributes and data.
Parameters
----------
var_spec : dict
The specifications of the variable.
The required/optional keys for creating a variable:
Required keys:
- ['Variable']: The name of the variable
- ['Data_Type']: the CDF data type
- ['Num_Elements']: The number of elements. Always 1 the
for numeric type. The char length for string type.
- ['Rec_Vary']: Record variance
For zVariables:
- ['Dims_Sizes']: The dimensional sizes for zVariables only.
Use [] for 0-dimension. Each and
every dimension is varying for zVariables.
For rVariables:
- ['Dim_Vary']: The dimensional variances for rVariables only.
Optional keys:
- ['Var_Type']: Whether the variable is a zVariable or
rVariable. Valid values: "zVariable" and
"rVariable". The default is "zVariable".
- ['Sparse']: Whether the variable has sparse records.
Valid values are "no_sparse", "pad_sparse",
and "prev_sparse". The default is 'no_sparse'.
- ['Compress']: Set the gzip compression level (0 to 9), 0 for
no compression. The default is to compress
with level 6 (done only if the compressed
data is less than the uncompressed data).
- ['Block_Factor']: The blocking factor, the number of
records in a chunk when the variable is compressed.
- ['Pad']: The padded value (in bytes, numpy.ndarray or string)
var_attrs : dict
{attribute:value} pairs.
The attribute is the name of a variable attribute.
The value can have its data type specified for the
numeric data. If not, based on Python's type, a
corresponding CDF type is assumed: CDF_INT4 for int,
CDF_DOUBLE for float, CDF_EPOCH16 for complex and
and CDF_INT8 for long.
For example, the following defined attributes will
have the same types in the CDF::
var_attrs= { 'attr1': 'value1',
'attr2': 12.45,
'attr3': [3,4,5],
.....
}
With data type (in the list form)::
var_attrs= { 'attr1': 'value1',
'attr2': [12.45, 'CDF_DOUBLE'],
'attr3': [[3,4,5], 'CDF_INT4'],
.....
}
var_data :
The data for the variable. If the variable is
a regular variable without sparse records, it must
be in a single structure of bytes, or numpy.ndarray
for numeric variable, or str or list of strs for
string variable.
If the variable has sparse records, var_data should
be presented in a list/tuple with two elements,
the first being a list/tuple that contains the
physical record number(s), the second being the variable
data in bytes, numpy.ndarray, or a list of strings. Variable
data can have just physical records' data (with the same
number of records as the first element) or have data from both
physical records and virtual records (which with filled data).
The var_data has the form::
[[rec_#1,rec_#2,rec_#3,...],
[data_#1,data_#2,data_#3,...]]
See the sample for its setup.
### Response:
def write_var(self, var_spec, var_attrs=None, var_data=None):
'''
Writes a variable, along with variable attributes and data.
Parameters
----------
var_spec : dict
The specifications of the variable.
The required/optional keys for creating a variable:
Required keys:
- ['Variable']: The name of the variable
- ['Data_Type']: the CDF data type
- ['Num_Elements']: The number of elements. Always 1 the
for numeric type. The char length for string type.
- ['Rec_Vary']: Record variance
For zVariables:
- ['Dims_Sizes']: The dimensional sizes for zVariables only.
Use [] for 0-dimension. Each and
every dimension is varying for zVariables.
For rVariables:
- ['Dim_Vary']: The dimensional variances for rVariables only.
Optional keys:
- ['Var_Type']: Whether the variable is a zVariable or
rVariable. Valid values: "zVariable" and
"rVariable". The default is "zVariable".
- ['Sparse']: Whether the variable has sparse records.
Valid values are "no_sparse", "pad_sparse",
and "prev_sparse". The default is 'no_sparse'.
- ['Compress']: Set the gzip compression level (0 to 9), 0 for
no compression. The default is to compress
with level 6 (done only if the compressed
data is less than the uncompressed data).
- ['Block_Factor']: The blocking factor, the number of
records in a chunk when the variable is compressed.
- ['Pad']: The padded value (in bytes, numpy.ndarray or string)
var_attrs : dict
{attribute:value} pairs.
The attribute is the name of a variable attribute.
The value can have its data type specified for the
numeric data. If not, based on Python's type, a
corresponding CDF type is assumed: CDF_INT4 for int,
CDF_DOUBLE for float, CDF_EPOCH16 for complex and
and CDF_INT8 for long.
For example, the following defined attributes will
have the same types in the CDF::
var_attrs= { 'attr1': 'value1',
'attr2': 12.45,
'attr3': [3,4,5],
.....
}
With data type (in the list form)::
var_attrs= { 'attr1': 'value1',
'attr2': [12.45, 'CDF_DOUBLE'],
'attr3': [[3,4,5], 'CDF_INT4'],
.....
}
var_data :
The data for the variable. If the variable is
a regular variable without sparse records, it must
be in a single structure of bytes, or numpy.ndarray
for numeric variable, or str or list of strs for
string variable.
If the variable has sparse records, var_data should
be presented in a list/tuple with two elements,
the first being a list/tuple that contains the
physical record number(s), the second being the variable
data in bytes, numpy.ndarray, or a list of strings. Variable
data can have just physical records' data (with the same
number of records as the first element) or have data from both
physical records and virtual records (which with filled data).
The var_data has the form::
[[rec_#1,rec_#2,rec_#3,...],
[data_#1,data_#2,data_#3,...]]
See the sample for its setup.
'''
if not isinstance(var_spec, dict):
raise TypeError('Variable should be in dictionary form.')
# Get variable info from var_spec
try:
dataType = int(var_spec['Data_Type'])
numElems = int(var_spec['Num_Elements'])
name = var_spec['Variable']
recVary = var_spec['Rec_Vary']
except Exception:
raise ValueError('Missing/invalid required spec for creating variable.')
# Get whether or not it is a z variable
var_type = var_spec.setdefault('Var_Type', 'zvariable')
if (var_type.lower() == 'zvariable'):
zVar = True
else:
var_spec['Var_Type'] = 'rVariable'
zVar = False
if (dataType == CDF.CDF_CHAR or dataType == CDF.CDF_UCHAR):
if numElems < 1:
raise ValueError('Invalid Num_Elements for string data type variable')
else:
if numElems != 1:
raise ValueError('Invalid Num_Elements for numeric data type variable')
# If its a z variable, get the dimension info
# Otherwise, use r variable info
if zVar:
try:
dimSizes = var_spec['Dim_Sizes']
numDims = len(dimSizes)
dimVary = []
for _ in range(0, numDims):
dimVary.append(True)
except Exception:
raise ValueError('Missing/invalid required spec for creating variable.')
else:
dimSizes = self.rdim_sizes
numDims = self.num_rdim
try:
dimVary = var_spec['Dim_Vary']
if (len(dimVary) != numDims):
raise ValueError('Invalid Dim_Vary size for the rVariable.')
except Exception:
raise ValueError('Missing/invalid required spec for Dim_Vary for rVariable')
# Get Sparseness info
sparse = CDF._sparse_token(var_spec.get('Sparse', 'no_sparse'))
# Get compression info
compression = var_spec.get('Compress', 6)
if (isinstance(compression, int)):
if not 0 <= compression <= 9:
compression = 0
else:
compression = 6 if compression else 0
# Get blocking factor
blockingfactor = int(var_spec.get('Block_Factor', 1))
# Get pad value
pad = var_spec.get('Pad', None)
if (isinstance(pad, list) or isinstance(pad, tuple)):
pad = pad[0]
if (name in self.zvars or name in self.rvars):
raise ValueError('{} already exists'.format(name))
with self.path.open('rb+') as f:
f.seek(0, 2) # EOF (appending)
varNum, offset = self._write_vdr(f, dataType, numElems, numDims,
dimSizes, name, dimVary, recVary,
sparse, blockingfactor, compression,
pad, zVar)
# Update the GDR pointers if needed
if zVar:
if len(self.zvars) == 1:
# GDR's zVDRhead
self._update_offset_value(f, self.gdr_head+20, 8, offset)
else:
if len(self.rvars) == 1:
# GDR's rVDRhead
self._update_offset_value(f, self.gdr_head+12, 8, offset)
# Write the variable attributes
if var_attrs is not None:
self._write_var_attrs(f, varNum, var_attrs, zVar)
# Write the actual data to the file
if not (var_data is None):
if (sparse == 0):
varMaxRec = self._write_var_data_nonsparse(f, zVar, varNum,
dataType, numElems,
recVary, compression,
blockingfactor,
var_data)
else:
notsupport = False
if not isinstance(var_data, (list, tuple)):
notsupport = True
if notsupport or len(var_data) != 2:
print('Sparse record #s and data are not of list/tuple form:')
print(' [ [rec_#1, rec_#2, rec_#3, ],')
print(' [data_#1, data_#2, data_#3, ....] ]')
return
# Format data into: [[recstart1, recend1, data1],
# [recstart2,recend2,data2], ...]
var_data = self._make_sparse_blocks(var_spec, var_data[0],
var_data[1])
for block in var_data:
varMaxRec = self._write_var_data_sparse(f, zVar, varNum,
dataType, numElems,
recVary, block)
# Update GDR MaxRec if writing an r variable
if not zVar:
# GDR's rMaxRec
f.seek(self.gdr_head+52)
maxRec = int.from_bytes(f.read(4), 'big', signed=True)
if (maxRec < varMaxRec):
self._update_offset_value(f, self.gdr_head+52, 4, varMaxRec) |
def DiamAns(cmd, **fields):
"""Craft Diameter answer commands"""
upfields, name = getCmdParams(cmd, False, **fields)
p = DiamG(**upfields)
p.name = name
return p | Craft Diameter answer commands | Below is the the instruction that describes the task:
### Input:
Craft Diameter answer commands
### Response:
def DiamAns(cmd, **fields):
"""Craft Diameter answer commands"""
upfields, name = getCmdParams(cmd, False, **fields)
p = DiamG(**upfields)
p.name = name
return p |
def attr_delete(args):
''' Delete key=value attributes: if entity name & type are specified then
attributes will be deleted from that entity, otherwise the attribute will
be removed from the workspace'''
if args.entity_type and args.entities:
# Since there is no attribute deletion endpoint, we must perform 2 steps
# here: first we retrieve the entity_ids, and any foreign keys (e.g.
# participant_id for sample_id); and then construct a loadfile which
# specifies which entities are to have what attributes removed. Note
# that FireCloud uses the magic keyword __DELETE__ to indicate that
# an attribute should be deleted from an entity.
# Step 1: see what entities are present, and filter to those requested
entities = _entity_paginator(args.project, args.workspace,
args.entity_type,
page_size=1000, filter_terms=None,
sort_direction="asc")
if args.entities:
entities = [e for e in entities if e['name'] in args.entities]
# Step 2: construct a loadfile to delete these attributes
attrs = sorted(args.attributes)
etype = args.entity_type
entity_data = []
for entity_dict in entities:
name = entity_dict['name']
line = name
# TODO: Fix other types?
if etype == "sample":
line += "\t" + entity_dict['attributes']['participant']['entityName']
for attr in attrs:
line += "\t__DELETE__"
# Improve performance by only updating records that have changed
entity_data.append(line)
entity_header = ["entity:" + etype + "_id"]
if etype == "sample":
entity_header.append("participant_id")
entity_header = '\t'.join(entity_header + list(attrs))
# Remove attributes from an entity
message = "WARNING: this will delete these attributes:\n\n" + \
','.join(args.attributes) + "\n\n"
if args.entities:
message += 'on these {0}s:\n\n'.format(args.entity_type) + \
', '.join(args.entities)
else:
message += 'on all {0}s'.format(args.entity_type)
message += "\n\nin workspace {0}/{1}\n".format(args.project, args.workspace)
if not args.yes and not _confirm_prompt(message):
return 0
# TODO: reconcile with other batch updates
# Chunk the entities into batches of 500, and upload to FC
if args.verbose:
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
chunk_len = 500
total = int(len(entity_data) / chunk_len) + 1
batch = 0
for i in range(0, len(entity_data), chunk_len):
batch += 1
if args.verbose:
print("Updating samples {0}-{1}, batch {2}/{3}".format(
i+1, min(i+chunk_len, len(entity_data)), batch, total
))
this_data = entity_header + '\n' + '\n'.join(entity_data[i:i+chunk_len])
# Now push the entity data back to firecloud
r = fapi.upload_entities(args.project, args.workspace, this_data)
fapi._check_response_code(r, 200)
else:
message = "WARNING: this will delete the following attributes in " + \
"{0}/{1}\n\t".format(args.project, args.workspace) + \
"\n\t".join(args.attributes)
if not (args.yes or _confirm_prompt(message)):
return 0
updates = [fapi._attr_rem(a) for a in args.attributes]
r = fapi.update_workspace_attributes(args.project, args.workspace,
updates)
fapi._check_response_code(r, 200)
return 0 | Delete key=value attributes: if entity name & type are specified then
attributes will be deleted from that entity, otherwise the attribute will
be removed from the workspace | Below is the the instruction that describes the task:
### Input:
Delete key=value attributes: if entity name & type are specified then
attributes will be deleted from that entity, otherwise the attribute will
be removed from the workspace
### Response:
def attr_delete(args):
''' Delete key=value attributes: if entity name & type are specified then
attributes will be deleted from that entity, otherwise the attribute will
be removed from the workspace'''
if args.entity_type and args.entities:
# Since there is no attribute deletion endpoint, we must perform 2 steps
# here: first we retrieve the entity_ids, and any foreign keys (e.g.
# participant_id for sample_id); and then construct a loadfile which
# specifies which entities are to have what attributes removed. Note
# that FireCloud uses the magic keyword __DELETE__ to indicate that
# an attribute should be deleted from an entity.
# Step 1: see what entities are present, and filter to those requested
entities = _entity_paginator(args.project, args.workspace,
args.entity_type,
page_size=1000, filter_terms=None,
sort_direction="asc")
if args.entities:
entities = [e for e in entities if e['name'] in args.entities]
# Step 2: construct a loadfile to delete these attributes
attrs = sorted(args.attributes)
etype = args.entity_type
entity_data = []
for entity_dict in entities:
name = entity_dict['name']
line = name
# TODO: Fix other types?
if etype == "sample":
line += "\t" + entity_dict['attributes']['participant']['entityName']
for attr in attrs:
line += "\t__DELETE__"
# Improve performance by only updating records that have changed
entity_data.append(line)
entity_header = ["entity:" + etype + "_id"]
if etype == "sample":
entity_header.append("participant_id")
entity_header = '\t'.join(entity_header + list(attrs))
# Remove attributes from an entity
message = "WARNING: this will delete these attributes:\n\n" + \
','.join(args.attributes) + "\n\n"
if args.entities:
message += 'on these {0}s:\n\n'.format(args.entity_type) + \
', '.join(args.entities)
else:
message += 'on all {0}s'.format(args.entity_type)
message += "\n\nin workspace {0}/{1}\n".format(args.project, args.workspace)
if not args.yes and not _confirm_prompt(message):
return 0
# TODO: reconcile with other batch updates
# Chunk the entities into batches of 500, and upload to FC
if args.verbose:
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
chunk_len = 500
total = int(len(entity_data) / chunk_len) + 1
batch = 0
for i in range(0, len(entity_data), chunk_len):
batch += 1
if args.verbose:
print("Updating samples {0}-{1}, batch {2}/{3}".format(
i+1, min(i+chunk_len, len(entity_data)), batch, total
))
this_data = entity_header + '\n' + '\n'.join(entity_data[i:i+chunk_len])
# Now push the entity data back to firecloud
r = fapi.upload_entities(args.project, args.workspace, this_data)
fapi._check_response_code(r, 200)
else:
message = "WARNING: this will delete the following attributes in " + \
"{0}/{1}\n\t".format(args.project, args.workspace) + \
"\n\t".join(args.attributes)
if not (args.yes or _confirm_prompt(message)):
return 0
updates = [fapi._attr_rem(a) for a in args.attributes]
r = fapi.update_workspace_attributes(args.project, args.workspace,
updates)
fapi._check_response_code(r, 200)
return 0 |
def open_files(self, path):
"""Load file(s) -- image*.fits, image*.fits[ext].
Returns success code (True or False).
"""
paths = []
input_list = _patt.findall(path)
if not input_list:
input_list = [path]
for path in input_list:
# Strips trailing wildcard
if path.endswith('*'):
path = path[:-1]
if os.path.isdir(path):
continue
self.logger.debug('Opening files matched by {0}'.format(path))
info = iohelper.get_fileinfo(path)
ext = iohelper.get_hdu_suffix(info.numhdu)
files = glob.glob(info.filepath) # Expand wildcard
paths.extend(['{0}{1}'.format(f, ext) for f in files])
if len(paths) > 0:
self.load_paths(paths)
return True
return False | Load file(s) -- image*.fits, image*.fits[ext].
Returns success code (True or False). | Below is the the instruction that describes the task:
### Input:
Load file(s) -- image*.fits, image*.fits[ext].
Returns success code (True or False).
### Response:
def open_files(self, path):
"""Load file(s) -- image*.fits, image*.fits[ext].
Returns success code (True or False).
"""
paths = []
input_list = _patt.findall(path)
if not input_list:
input_list = [path]
for path in input_list:
# Strips trailing wildcard
if path.endswith('*'):
path = path[:-1]
if os.path.isdir(path):
continue
self.logger.debug('Opening files matched by {0}'.format(path))
info = iohelper.get_fileinfo(path)
ext = iohelper.get_hdu_suffix(info.numhdu)
files = glob.glob(info.filepath) # Expand wildcard
paths.extend(['{0}{1}'.format(f, ext) for f in files])
if len(paths) > 0:
self.load_paths(paths)
return True
return False |
def create_graph_html(js_template, css_template, html_template=None):
""" Create HTML code block given the graph Javascript and CSS. """
if html_template is None:
html_template = read_lib('html', 'graph')
# Create div ID for the graph and give it to the JS and CSS templates so
# they can reference the graph.
graph_id = 'graph-{0}'.format(_get_random_id())
js = populate_template(js_template, graph_id=graph_id)
css = populate_template(css_template, graph_id=graph_id)
return populate_template(
html_template,
graph_id=graph_id,
css=css,
js=js
) | Create HTML code block given the graph Javascript and CSS. | Below is the the instruction that describes the task:
### Input:
Create HTML code block given the graph Javascript and CSS.
### Response:
def create_graph_html(js_template, css_template, html_template=None):
""" Create HTML code block given the graph Javascript and CSS. """
if html_template is None:
html_template = read_lib('html', 'graph')
# Create div ID for the graph and give it to the JS and CSS templates so
# they can reference the graph.
graph_id = 'graph-{0}'.format(_get_random_id())
js = populate_template(js_template, graph_id=graph_id)
css = populate_template(css_template, graph_id=graph_id)
return populate_template(
html_template,
graph_id=graph_id,
css=css,
js=js
) |
def configuration_check(config):
"""Perform a sanity check on configuration.
First it performs a sanity check against settings for daemon
and then against settings for each service check.
Arguments:
config (obj): A configparser object which holds our configuration.
Returns:
None if all checks are successfully passed otherwise raises a
ValueError exception.
"""
log_level = config.get('daemon', 'loglevel')
num_level = getattr(logging, log_level.upper(), None)
pidfile = config.get('daemon', 'pidfile')
# Catch the case where the directory, under which we store the pid file, is
# missing.
if not os.path.isdir(os.path.dirname(pidfile)):
raise ValueError("{d} doesn't exit".format(d=os.path.dirname(pidfile)))
if not isinstance(num_level, int):
raise ValueError('Invalid log level: {}'.format(log_level))
for _file in 'log_file', 'stderr_file':
if config.has_option('daemon', _file):
try:
touch(config.get('daemon', _file))
except OSError as exc:
raise ValueError(exc)
for option, getter in DAEMON_OPTIONS_TYPE.items():
try:
getattr(config, getter)('daemon', option)
except configparser.NoOptionError as error:
if option not in DAEMON_OPTIONAL_OPTIONS:
raise ValueError(error)
except configparser.Error as error:
raise ValueError(error)
except ValueError as exc:
msg = ("invalid data for '{opt}' option in daemon section: {err}"
.format(opt=option, err=exc))
raise ValueError(msg)
service_configuration_check(config) | Perform a sanity check on configuration.
First it performs a sanity check against settings for daemon
and then against settings for each service check.
Arguments:
config (obj): A configparser object which holds our configuration.
Returns:
None if all checks are successfully passed otherwise raises a
ValueError exception. | Below is the the instruction that describes the task:
### Input:
Perform a sanity check on configuration.
First it performs a sanity check against settings for daemon
and then against settings for each service check.
Arguments:
config (obj): A configparser object which holds our configuration.
Returns:
None if all checks are successfully passed otherwise raises a
ValueError exception.
### Response:
def configuration_check(config):
"""Perform a sanity check on configuration.
First it performs a sanity check against settings for daemon
and then against settings for each service check.
Arguments:
config (obj): A configparser object which holds our configuration.
Returns:
None if all checks are successfully passed otherwise raises a
ValueError exception.
"""
log_level = config.get('daemon', 'loglevel')
num_level = getattr(logging, log_level.upper(), None)
pidfile = config.get('daemon', 'pidfile')
# Catch the case where the directory, under which we store the pid file, is
# missing.
if not os.path.isdir(os.path.dirname(pidfile)):
raise ValueError("{d} doesn't exit".format(d=os.path.dirname(pidfile)))
if not isinstance(num_level, int):
raise ValueError('Invalid log level: {}'.format(log_level))
for _file in 'log_file', 'stderr_file':
if config.has_option('daemon', _file):
try:
touch(config.get('daemon', _file))
except OSError as exc:
raise ValueError(exc)
for option, getter in DAEMON_OPTIONS_TYPE.items():
try:
getattr(config, getter)('daemon', option)
except configparser.NoOptionError as error:
if option not in DAEMON_OPTIONAL_OPTIONS:
raise ValueError(error)
except configparser.Error as error:
raise ValueError(error)
except ValueError as exc:
msg = ("invalid data for '{opt}' option in daemon section: {err}"
.format(opt=option, err=exc))
raise ValueError(msg)
service_configuration_check(config) |
def load(self, filename, format_file='cloudupdrs'):
"""
This is a general load data method where the format of data to load can be passed as a parameter,
:param str filename: The path to load data from
:param str format_file: format of the file. Default is CloudUPDRS. Set to mpower for mpower data.
:return dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration \
data_frame.index is the datetime-like index
"""
try:
ts = load_data(filename, format_file)
validator = CloudUPDRSDataFrameValidator()
if validator.is_valid(ts):
return ts
else:
logging.error('Error loading data, wrong format.')
return None
except IOError as e:
ierr = "({}): {}".format(e.errno, e.strerror)
logging.error("load data, file not found, I/O error %s", ierr)
except ValueError as verr:
logging.error("load data ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on load data method: %s", sys.exc_info()[0]) | This is a general load data method where the format of data to load can be passed as a parameter,
:param str filename: The path to load data from
:param str format_file: format of the file. Default is CloudUPDRS. Set to mpower for mpower data.
:return dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration \
data_frame.index is the datetime-like index | Below is the the instruction that describes the task:
### Input:
This is a general load data method where the format of data to load can be passed as a parameter,
:param str filename: The path to load data from
:param str format_file: format of the file. Default is CloudUPDRS. Set to mpower for mpower data.
:return dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration \
data_frame.index is the datetime-like index
### Response:
def load(self, filename, format_file='cloudupdrs'):
"""
This is a general load data method where the format of data to load can be passed as a parameter,
:param str filename: The path to load data from
:param str format_file: format of the file. Default is CloudUPDRS. Set to mpower for mpower data.
:return dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration \
data_frame.index is the datetime-like index
"""
try:
ts = load_data(filename, format_file)
validator = CloudUPDRSDataFrameValidator()
if validator.is_valid(ts):
return ts
else:
logging.error('Error loading data, wrong format.')
return None
except IOError as e:
ierr = "({}): {}".format(e.errno, e.strerror)
logging.error("load data, file not found, I/O error %s", ierr)
except ValueError as verr:
logging.error("load data ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on load data method: %s", sys.exc_info()[0]) |
def visitPrefixDecl(self, ctx: ShExDocParser.PrefixDeclContext):
""" prefixDecl: KW_PREFIX PNAME_NS IRIREF """
iri = self.context.iriref_to_shexj_iriref(ctx.IRIREF())
prefix = ctx.PNAME_NS().getText()
if iri not in self.context.ld_prefixes:
self.context.prefixes.setdefault(prefix, iri.val) | prefixDecl: KW_PREFIX PNAME_NS IRIREF | Below is the the instruction that describes the task:
### Input:
prefixDecl: KW_PREFIX PNAME_NS IRIREF
### Response:
def visitPrefixDecl(self, ctx: ShExDocParser.PrefixDeclContext):
""" prefixDecl: KW_PREFIX PNAME_NS IRIREF """
iri = self.context.iriref_to_shexj_iriref(ctx.IRIREF())
prefix = ctx.PNAME_NS().getText()
if iri not in self.context.ld_prefixes:
self.context.prefixes.setdefault(prefix, iri.val) |
def add(self, num):
"""
Adds num to the current value
"""
self.index = max(0, min(len(self.allowed)-1, self.index+num))
self.set(self.allowed[self.index]) | Adds num to the current value | Below is the the instruction that describes the task:
### Input:
Adds num to the current value
### Response:
def add(self, num):
"""
Adds num to the current value
"""
self.index = max(0, min(len(self.allowed)-1, self.index+num))
self.set(self.allowed[self.index]) |
def _query(function,
consul_url,
token=None,
method='GET',
api_version='v1',
data=None,
query_params=None):
'''
Consul object method function to construct and execute on the API URL.
:param api_url: The Consul api url.
:param api_version The Consul api version
:param function: The Consul api function to perform.
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method. This param is ignored for GET requests.
:return: The json response from the API call or False.
'''
if not query_params:
query_params = {}
ret = {'data': '',
'res': True}
if not token:
token = _get_token()
headers = {"X-Consul-Token": token, "Content-Type": "application/json"}
base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version))
url = urllib.parse.urljoin(base_url, function, False)
if method == 'GET':
data = None
else:
if data is None:
data = {}
data = salt.utils.json.dumps(data)
result = salt.utils.http.query(
url,
method=method,
params=query_params,
data=data,
decode=True,
status=True,
header_dict=headers,
opts=__opts__,
)
if result.get('status', None) == http_client.OK:
ret['data'] = result.get('dict', result)
ret['res'] = True
elif result.get('status', None) == http_client.NO_CONTENT:
ret['res'] = False
elif result.get('status', None) == http_client.NOT_FOUND:
ret['data'] = 'Key not found.'
ret['res'] = False
else:
if result:
ret['data'] = result
ret['res'] = True
else:
ret['res'] = False
return ret | Consul object method function to construct and execute on the API URL.
:param api_url: The Consul api url.
:param api_version The Consul api version
:param function: The Consul api function to perform.
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method. This param is ignored for GET requests.
:return: The json response from the API call or False. | Below is the the instruction that describes the task:
### Input:
Consul object method function to construct and execute on the API URL.
:param api_url: The Consul api url.
:param api_version The Consul api version
:param function: The Consul api function to perform.
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method. This param is ignored for GET requests.
:return: The json response from the API call or False.
### Response:
def _query(function,
consul_url,
token=None,
method='GET',
api_version='v1',
data=None,
query_params=None):
'''
Consul object method function to construct and execute on the API URL.
:param api_url: The Consul api url.
:param api_version The Consul api version
:param function: The Consul api function to perform.
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method. This param is ignored for GET requests.
:return: The json response from the API call or False.
'''
if not query_params:
query_params = {}
ret = {'data': '',
'res': True}
if not token:
token = _get_token()
headers = {"X-Consul-Token": token, "Content-Type": "application/json"}
base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version))
url = urllib.parse.urljoin(base_url, function, False)
if method == 'GET':
data = None
else:
if data is None:
data = {}
data = salt.utils.json.dumps(data)
result = salt.utils.http.query(
url,
method=method,
params=query_params,
data=data,
decode=True,
status=True,
header_dict=headers,
opts=__opts__,
)
if result.get('status', None) == http_client.OK:
ret['data'] = result.get('dict', result)
ret['res'] = True
elif result.get('status', None) == http_client.NO_CONTENT:
ret['res'] = False
elif result.get('status', None) == http_client.NOT_FOUND:
ret['data'] = 'Key not found.'
ret['res'] = False
else:
if result:
ret['data'] = result
ret['res'] = True
else:
ret['res'] = False
return ret |
def plot_hpd(
x,
y,
credible_interval=0.94,
color="C1",
circular=False,
smooth=True,
smooth_kwargs=None,
fill_kwargs=None,
plot_kwargs=None,
ax=None,
):
"""
Plot hpd intervals for regression data.
Parameters
----------
x : array-like
Values to plot
y : array-like
values from which to compute the hpd
credible_interval : float, optional
Credible interval to plot. Defaults to 0.94.
color : str
Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color
circular : bool, optional
Whether to compute the hpd taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
smooth : boolean
If True the result will be smoothed by first computing a linear interpolation of the data
over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.
Defaults to True.
smooth_kwargs : dict, optional
Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
details
fill_kwargs : dict
Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
plot_kwargs : dict
Keywords passed to HPD limits
ax : matplotlib axes
Returns
-------
ax : matplotlib axes
"""
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault("color", color)
plot_kwargs.setdefault("alpha", 0)
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", color)
fill_kwargs.setdefault("alpha", 0.5)
if ax is None:
ax = gca()
hpd_ = hpd(y, credible_interval=credible_interval, circular=circular)
if smooth:
if smooth_kwargs is None:
smooth_kwargs = {}
smooth_kwargs.setdefault("window_length", 55)
smooth_kwargs.setdefault("polyorder", 2)
x_data = np.linspace(x.min(), x.max(), 200)
hpd_interp = griddata(x, hpd_, x_data)
y_data = savgol_filter(hpd_interp, axis=0, **smooth_kwargs)
else:
idx = np.argsort(x)
x_data = x[idx]
y_data = hpd_[idx]
ax.plot(x_data, y_data, **plot_kwargs)
ax.fill_between(x_data, y_data[:, 0], y_data[:, 1], **fill_kwargs)
return ax | Plot hpd intervals for regression data.
Parameters
----------
x : array-like
Values to plot
y : array-like
values from which to compute the hpd
credible_interval : float, optional
Credible interval to plot. Defaults to 0.94.
color : str
Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color
circular : bool, optional
Whether to compute the hpd taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
smooth : boolean
If True the result will be smoothed by first computing a linear interpolation of the data
over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.
Defaults to True.
smooth_kwargs : dict, optional
Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
details
fill_kwargs : dict
Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
plot_kwargs : dict
Keywords passed to HPD limits
ax : matplotlib axes
Returns
-------
ax : matplotlib axes | Below is the the instruction that describes the task:
### Input:
Plot hpd intervals for regression data.
Parameters
----------
x : array-like
Values to plot
y : array-like
values from which to compute the hpd
credible_interval : float, optional
Credible interval to plot. Defaults to 0.94.
color : str
Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color
circular : bool, optional
Whether to compute the hpd taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
smooth : boolean
If True the result will be smoothed by first computing a linear interpolation of the data
over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.
Defaults to True.
smooth_kwargs : dict, optional
Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
details
fill_kwargs : dict
Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
plot_kwargs : dict
Keywords passed to HPD limits
ax : matplotlib axes
Returns
-------
ax : matplotlib axes
### Response:
def plot_hpd(
x,
y,
credible_interval=0.94,
color="C1",
circular=False,
smooth=True,
smooth_kwargs=None,
fill_kwargs=None,
plot_kwargs=None,
ax=None,
):
"""
Plot hpd intervals for regression data.
Parameters
----------
x : array-like
Values to plot
y : array-like
values from which to compute the hpd
credible_interval : float, optional
Credible interval to plot. Defaults to 0.94.
color : str
Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color
circular : bool, optional
Whether to compute the hpd taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
smooth : boolean
If True the result will be smoothed by first computing a linear interpolation of the data
over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.
Defaults to True.
smooth_kwargs : dict, optional
Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
details
fill_kwargs : dict
Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
plot_kwargs : dict
Keywords passed to HPD limits
ax : matplotlib axes
Returns
-------
ax : matplotlib axes
"""
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault("color", color)
plot_kwargs.setdefault("alpha", 0)
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", color)
fill_kwargs.setdefault("alpha", 0.5)
if ax is None:
ax = gca()
hpd_ = hpd(y, credible_interval=credible_interval, circular=circular)
if smooth:
if smooth_kwargs is None:
smooth_kwargs = {}
smooth_kwargs.setdefault("window_length", 55)
smooth_kwargs.setdefault("polyorder", 2)
x_data = np.linspace(x.min(), x.max(), 200)
hpd_interp = griddata(x, hpd_, x_data)
y_data = savgol_filter(hpd_interp, axis=0, **smooth_kwargs)
else:
idx = np.argsort(x)
x_data = x[idx]
y_data = hpd_[idx]
ax.plot(x_data, y_data, **plot_kwargs)
ax.fill_between(x_data, y_data[:, 0], y_data[:, 1], **fill_kwargs)
return ax |
def add_interrupt_callback(gpio_id, callback, edge='both', \
pull_up_down=PUD_OFF, threaded_callback=False, \
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
If debounce_timeout_ms is set, new interrupts will not be forwarded
until after the specified amount of milliseconds.
"""
_rpio.add_interrupt_callback(gpio_id, callback, edge, pull_up_down, \
threaded_callback, debounce_timeout_ms) | Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
If debounce_timeout_ms is set, new interrupts will not be forwarded
until after the specified amount of milliseconds. | Below is the the instruction that describes the task:
### Input:
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
If debounce_timeout_ms is set, new interrupts will not be forwarded
until after the specified amount of milliseconds.
### Response:
def add_interrupt_callback(gpio_id, callback, edge='both', \
pull_up_down=PUD_OFF, threaded_callback=False, \
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
If debounce_timeout_ms is set, new interrupts will not be forwarded
until after the specified amount of milliseconds.
"""
_rpio.add_interrupt_callback(gpio_id, callback, edge, pull_up_down, \
threaded_callback, debounce_timeout_ms) |
def p_created_1(self, p):
"""created : CREATED DATE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_created_date(self.document, value)
except CardinalityError:
self.more_than_one_error('Created', p.lineno(1)) | created : CREATED DATE | Below is the the instruction that describes the task:
### Input:
created : CREATED DATE
### Response:
def p_created_1(self, p):
"""created : CREATED DATE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_created_date(self.document, value)
except CardinalityError:
self.more_than_one_error('Created', p.lineno(1)) |
def string_value(node):
"""Compute the string-value of a node."""
if (node.nodeType == node.DOCUMENT_NODE or
node.nodeType == node.ELEMENT_NODE):
s = u''
for n in axes['descendant'](node):
if n.nodeType == n.TEXT_NODE:
s += n.data
return s
elif node.nodeType == node.ATTRIBUTE_NODE:
return node.value
elif (node.nodeType == node.PROCESSING_INSTRUCTION_NODE or
node.nodeType == node.COMMENT_NODE or
node.nodeType == node.TEXT_NODE):
return node.data | Compute the string-value of a node. | Below is the the instruction that describes the task:
### Input:
Compute the string-value of a node.
### Response:
def string_value(node):
"""Compute the string-value of a node."""
if (node.nodeType == node.DOCUMENT_NODE or
node.nodeType == node.ELEMENT_NODE):
s = u''
for n in axes['descendant'](node):
if n.nodeType == n.TEXT_NODE:
s += n.data
return s
elif node.nodeType == node.ATTRIBUTE_NODE:
return node.value
elif (node.nodeType == node.PROCESSING_INSTRUCTION_NODE or
node.nodeType == node.COMMENT_NODE or
node.nodeType == node.TEXT_NODE):
return node.data |
def list_replica_set_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_replica_set_for_all_namespaces # noqa: E501
list or watch objects of kind ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_replica_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1ReplicaSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
return data | list_replica_set_for_all_namespaces # noqa: E501
list or watch objects of kind ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_replica_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1ReplicaSetList
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
list_replica_set_for_all_namespaces # noqa: E501
list or watch objects of kind ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_replica_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1ReplicaSetList
If the method is called asynchronously,
returns the request thread.
### Response:
def list_replica_set_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_replica_set_for_all_namespaces # noqa: E501
list or watch objects of kind ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_replica_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1ReplicaSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
return data |
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error) | Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position) | Below is the the instruction that describes the task:
### Input:
Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
### Response:
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error) |
def map_sid2sub(self, sid, sub):
"""
Store the connection between a Session ID and a subject ID.
:param sid: Session ID
:param sub: subject ID
"""
self.set('sid2sub', sid, sub)
self.set('sub2sid', sub, sid) | Store the connection between a Session ID and a subject ID.
:param sid: Session ID
:param sub: subject ID | Below is the the instruction that describes the task:
### Input:
Store the connection between a Session ID and a subject ID.
:param sid: Session ID
:param sub: subject ID
### Response:
def map_sid2sub(self, sid, sub):
"""
Store the connection between a Session ID and a subject ID.
:param sid: Session ID
:param sub: subject ID
"""
self.set('sid2sub', sid, sub)
self.set('sub2sid', sub, sid) |
def draw(self):
"""Draws cell content to context"""
# Content is only rendered within rect
self.context.save()
self.context.rectangle(*self.rect)
self.context.clip()
content = self.get_cell_content()
pos_x, pos_y = self.rect[:2]
self.context.translate(pos_x + 2, pos_y + 2)
cell_attributes = self.code_array.cell_attributes
# Do not draw cell content if cell is too small
# This allows blending out small cells by reducing height to 0
if self.rect[2] < cell_attributes[self.key]["borderwidth_right"] or \
self.rect[3] < cell_attributes[self.key]["borderwidth_bottom"]:
self.context.restore()
return
if self.code_array.cell_attributes[self.key]["button_cell"]:
# Render a button instead of the cell
label = self.code_array.cell_attributes[self.key]["button_cell"]
self.draw_button(1, 1, self.rect[2]-5, self.rect[3]-5, label)
elif isinstance(content, wx._gdi.Bitmap):
# A bitmap is returned --> Draw it!
self.draw_bitmap(content)
elif pyplot is not None and isinstance(content, pyplot.Figure):
# A matplotlib figure is returned --> Draw it!
self.draw_matplotlib_figure(content)
elif isinstance(content, basestring) and is_svg(content):
# The content is a vaid SVG xml string
self.draw_svg(content)
elif content is not None:
self.draw_text(content)
self.context.translate(-pos_x - 2, -pos_y - 2)
# Remove clipping to rect
self.context.restore() | Draws cell content to context | Below is the the instruction that describes the task:
### Input:
Draws cell content to context
### Response:
def draw(self):
"""Draws cell content to context"""
# Content is only rendered within rect
self.context.save()
self.context.rectangle(*self.rect)
self.context.clip()
content = self.get_cell_content()
pos_x, pos_y = self.rect[:2]
self.context.translate(pos_x + 2, pos_y + 2)
cell_attributes = self.code_array.cell_attributes
# Do not draw cell content if cell is too small
# This allows blending out small cells by reducing height to 0
if self.rect[2] < cell_attributes[self.key]["borderwidth_right"] or \
self.rect[3] < cell_attributes[self.key]["borderwidth_bottom"]:
self.context.restore()
return
if self.code_array.cell_attributes[self.key]["button_cell"]:
# Render a button instead of the cell
label = self.code_array.cell_attributes[self.key]["button_cell"]
self.draw_button(1, 1, self.rect[2]-5, self.rect[3]-5, label)
elif isinstance(content, wx._gdi.Bitmap):
# A bitmap is returned --> Draw it!
self.draw_bitmap(content)
elif pyplot is not None and isinstance(content, pyplot.Figure):
# A matplotlib figure is returned --> Draw it!
self.draw_matplotlib_figure(content)
elif isinstance(content, basestring) and is_svg(content):
# The content is a vaid SVG xml string
self.draw_svg(content)
elif content is not None:
self.draw_text(content)
self.context.translate(-pos_x - 2, -pos_y - 2)
# Remove clipping to rect
self.context.restore() |
def write(self):
'''write status to status.txt'''
f = open('status.txt', mode='w')
self.show(f)
f.close() | write status to status.txt | Below is the the instruction that describes the task:
### Input:
write status to status.txt
### Response:
def write(self):
'''write status to status.txt'''
f = open('status.txt', mode='w')
self.show(f)
f.close() |
def _format_templates(name, command, templates):
''' Creates a list-table directive
for a set of defined environment variables
Parameters:
name (str):
The name of the config section
command (object):
The sdss_access path instance
templates (dict):
A dictionary of the path templates
Yields:
A string rst-formated list-table directive
'''
yield '.. list-table:: {0}'.format(name)
yield _indent(':widths: 20 50 70')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Template')
yield _indent(' - Kwargs')
for key, var in templates.items():
kwargs = command.lookup_keys(key)
yield _indent('* - {0}'.format(key))
yield _indent(' - {0}'.format(var))
yield _indent(' - {0}'.format(', '.join(kwargs)))
yield '' | Creates a list-table directive
for a set of defined environment variables
Parameters:
name (str):
The name of the config section
command (object):
The sdss_access path instance
templates (dict):
A dictionary of the path templates
Yields:
A string rst-formated list-table directive | Below is the the instruction that describes the task:
### Input:
Creates a list-table directive
for a set of defined environment variables
Parameters:
name (str):
The name of the config section
command (object):
The sdss_access path instance
templates (dict):
A dictionary of the path templates
Yields:
A string rst-formated list-table directive
### Response:
def _format_templates(name, command, templates):
''' Creates a list-table directive
for a set of defined environment variables
Parameters:
name (str):
The name of the config section
command (object):
The sdss_access path instance
templates (dict):
A dictionary of the path templates
Yields:
A string rst-formated list-table directive
'''
yield '.. list-table:: {0}'.format(name)
yield _indent(':widths: 20 50 70')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Template')
yield _indent(' - Kwargs')
for key, var in templates.items():
kwargs = command.lookup_keys(key)
yield _indent('* - {0}'.format(key))
yield _indent(' - {0}'.format(var))
yield _indent(' - {0}'.format(', '.join(kwargs)))
yield '' |
def get_exchange_rate(self, base, target, raise_errors=True):
"""Return the ::base:: to ::target:: exchange rate."""
assert base and target
base, target = base.lower(), target.lower()
r = self.session.get(API_SIMPLE_TICKER.format(base, target))
if r.status_code != requests.codes.ok:
if not raise_errors:
return None
raise CryptonatorException(
("An error occurred while getting requested exchange rate "
"({} from Cryptonator).").format(r.status_code)
)
j = r.json()
if not j['success'] or j['error']:
if not raise_errors:
return None
raise CryptonatorException(
("An error occurred while getting requested exchange rate ({}, {})"
"('{}').").format(base, target, j['error'])
)
return float(j['ticker']['price']) | Return the ::base:: to ::target:: exchange rate. | Below is the the instruction that describes the task:
### Input:
Return the ::base:: to ::target:: exchange rate.
### Response:
def get_exchange_rate(self, base, target, raise_errors=True):
"""Return the ::base:: to ::target:: exchange rate."""
assert base and target
base, target = base.lower(), target.lower()
r = self.session.get(API_SIMPLE_TICKER.format(base, target))
if r.status_code != requests.codes.ok:
if not raise_errors:
return None
raise CryptonatorException(
("An error occurred while getting requested exchange rate "
"({} from Cryptonator).").format(r.status_code)
)
j = r.json()
if not j['success'] or j['error']:
if not raise_errors:
return None
raise CryptonatorException(
("An error occurred while getting requested exchange rate ({}, {})"
"('{}').").format(base, target, j['error'])
)
return float(j['ticker']['price']) |
def killJobs(self, jobsToKill):
"""
Kills the given set of jobs and then sends them for processing
"""
if len(jobsToKill) > 0:
self.batchSystem.killBatchJobs(jobsToKill)
for jobBatchSystemID in jobsToKill:
self.processFinishedJob(jobBatchSystemID, 1) | Kills the given set of jobs and then sends them for processing | Below is the the instruction that describes the task:
### Input:
Kills the given set of jobs and then sends them for processing
### Response:
def killJobs(self, jobsToKill):
"""
Kills the given set of jobs and then sends them for processing
"""
if len(jobsToKill) > 0:
self.batchSystem.killBatchJobs(jobsToKill)
for jobBatchSystemID in jobsToKill:
self.processFinishedJob(jobBatchSystemID, 1) |
def _iter_text_wave(
self, text, numbers, step=1,
fore=None, back=None, style=None, rgb_mode=False):
""" Yield colorized characters from `text`, using a wave of `numbers`.
Arguments:
text : String to be colorized.
numbers : A list/tuple of numbers (256 colors).
step : Number of characters to colorize per color.
fore : Fore color to use (name or number).
(Back will be gradient)
back : Background color to use (name or number).
(Fore will be gradient)
style : Style name to use.
rgb_mode : Use number for rgb value.
This should never be used when the numbers
are rgb values themselves.
"""
if fore and back:
raise ValueError('Both fore and back colors cannot be specified.')
pos = 0
end = len(text)
numbergen = self._iter_wave(numbers)
def make_color(n):
try:
r, g, b = n
except TypeError:
if rgb_mode:
return n, n, n
return n
return r, g, b
for value in numbergen:
lastchar = pos + step
yield self.color(
text[pos:lastchar],
fore=make_color(value) if fore is None else fore,
back=make_color(value) if fore is not None else back,
style=style
)
if lastchar >= end:
numbergen.send(True)
pos = lastchar | Yield colorized characters from `text`, using a wave of `numbers`.
Arguments:
text : String to be colorized.
numbers : A list/tuple of numbers (256 colors).
step : Number of characters to colorize per color.
fore : Fore color to use (name or number).
(Back will be gradient)
back : Background color to use (name or number).
(Fore will be gradient)
style : Style name to use.
rgb_mode : Use number for rgb value.
This should never be used when the numbers
are rgb values themselves. | Below is the the instruction that describes the task:
### Input:
Yield colorized characters from `text`, using a wave of `numbers`.
Arguments:
text : String to be colorized.
numbers : A list/tuple of numbers (256 colors).
step : Number of characters to colorize per color.
fore : Fore color to use (name or number).
(Back will be gradient)
back : Background color to use (name or number).
(Fore will be gradient)
style : Style name to use.
rgb_mode : Use number for rgb value.
This should never be used when the numbers
are rgb values themselves.
### Response:
def _iter_text_wave(
self, text, numbers, step=1,
fore=None, back=None, style=None, rgb_mode=False):
""" Yield colorized characters from `text`, using a wave of `numbers`.
Arguments:
text : String to be colorized.
numbers : A list/tuple of numbers (256 colors).
step : Number of characters to colorize per color.
fore : Fore color to use (name or number).
(Back will be gradient)
back : Background color to use (name or number).
(Fore will be gradient)
style : Style name to use.
rgb_mode : Use number for rgb value.
This should never be used when the numbers
are rgb values themselves.
"""
if fore and back:
raise ValueError('Both fore and back colors cannot be specified.')
pos = 0
end = len(text)
numbergen = self._iter_wave(numbers)
def make_color(n):
try:
r, g, b = n
except TypeError:
if rgb_mode:
return n, n, n
return n
return r, g, b
for value in numbergen:
lastchar = pos + step
yield self.color(
text[pos:lastchar],
fore=make_color(value) if fore is None else fore,
back=make_color(value) if fore is not None else back,
style=style
)
if lastchar >= end:
numbergen.send(True)
pos = lastchar |
def page(self, end_date=values.unset, event_type=values.unset,
minutes=values.unset, reservation_sid=values.unset,
start_date=values.unset, task_queue_sid=values.unset,
task_sid=values.unset, worker_sid=values.unset,
workflow_sid=values.unset, task_channel=values.unset, sid=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of EventInstance records from the API.
Request is executed immediately
:param datetime end_date: Filter events by an end date.
:param unicode event_type: Filter events by those of a certain event type
:param unicode minutes: Filter events by up to 'x' minutes in the past.
:param unicode reservation_sid: Filter events by those pertaining to a particular reservation
:param datetime start_date: Filter events by a start date.
:param unicode task_queue_sid: Filter events by those pertaining to a particular queue
:param unicode task_sid: Filter events by those pertaining to a particular task
:param unicode worker_sid: Filter events by those pertaining to a particular worker
:param unicode workflow_sid: Filter events by those pertaining to a particular workflow
:param unicode task_channel: Filter events by those pertaining to a particular task channel
:param unicode sid: Filter events by those pertaining to a particular event
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EventInstance
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventPage
"""
params = values.of({
'EndDate': serialize.iso8601_datetime(end_date),
'EventType': event_type,
'Minutes': minutes,
'ReservationSid': reservation_sid,
'StartDate': serialize.iso8601_datetime(start_date),
'TaskQueueSid': task_queue_sid,
'TaskSid': task_sid,
'WorkerSid': worker_sid,
'WorkflowSid': workflow_sid,
'TaskChannel': task_channel,
'Sid': sid,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return EventPage(self._version, response, self._solution) | Retrieve a single page of EventInstance records from the API.
Request is executed immediately
:param datetime end_date: Filter events by an end date.
:param unicode event_type: Filter events by those of a certain event type
:param unicode minutes: Filter events by up to 'x' minutes in the past.
:param unicode reservation_sid: Filter events by those pertaining to a particular reservation
:param datetime start_date: Filter events by a start date.
:param unicode task_queue_sid: Filter events by those pertaining to a particular queue
:param unicode task_sid: Filter events by those pertaining to a particular task
:param unicode worker_sid: Filter events by those pertaining to a particular worker
:param unicode workflow_sid: Filter events by those pertaining to a particular workflow
:param unicode task_channel: Filter events by those pertaining to a particular task channel
:param unicode sid: Filter events by those pertaining to a particular event
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EventInstance
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventPage | Below is the the instruction that describes the task:
### Input:
Retrieve a single page of EventInstance records from the API.
Request is executed immediately
:param datetime end_date: Filter events by an end date.
:param unicode event_type: Filter events by those of a certain event type
:param unicode minutes: Filter events by up to 'x' minutes in the past.
:param unicode reservation_sid: Filter events by those pertaining to a particular reservation
:param datetime start_date: Filter events by a start date.
:param unicode task_queue_sid: Filter events by those pertaining to a particular queue
:param unicode task_sid: Filter events by those pertaining to a particular task
:param unicode worker_sid: Filter events by those pertaining to a particular worker
:param unicode workflow_sid: Filter events by those pertaining to a particular workflow
:param unicode task_channel: Filter events by those pertaining to a particular task channel
:param unicode sid: Filter events by those pertaining to a particular event
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EventInstance
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventPage
### Response:
def page(self, end_date=values.unset, event_type=values.unset,
minutes=values.unset, reservation_sid=values.unset,
start_date=values.unset, task_queue_sid=values.unset,
task_sid=values.unset, worker_sid=values.unset,
workflow_sid=values.unset, task_channel=values.unset, sid=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of EventInstance records from the API.
Request is executed immediately
:param datetime end_date: Filter events by an end date.
:param unicode event_type: Filter events by those of a certain event type
:param unicode minutes: Filter events by up to 'x' minutes in the past.
:param unicode reservation_sid: Filter events by those pertaining to a particular reservation
:param datetime start_date: Filter events by a start date.
:param unicode task_queue_sid: Filter events by those pertaining to a particular queue
:param unicode task_sid: Filter events by those pertaining to a particular task
:param unicode worker_sid: Filter events by those pertaining to a particular worker
:param unicode workflow_sid: Filter events by those pertaining to a particular workflow
:param unicode task_channel: Filter events by those pertaining to a particular task channel
:param unicode sid: Filter events by those pertaining to a particular event
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EventInstance
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventPage
"""
params = values.of({
'EndDate': serialize.iso8601_datetime(end_date),
'EventType': event_type,
'Minutes': minutes,
'ReservationSid': reservation_sid,
'StartDate': serialize.iso8601_datetime(start_date),
'TaskQueueSid': task_queue_sid,
'TaskSid': task_sid,
'WorkerSid': worker_sid,
'WorkflowSid': workflow_sid,
'TaskChannel': task_channel,
'Sid': sid,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return EventPage(self._version, response, self._solution) |
def parse_reference_line(ref_line, kbs, bad_titles_count={}, linker_callback=None):
"""Parse one reference line
@input a string representing a single reference bullet
@output parsed references (a list of elements objects)
"""
# Strip the 'marker' (e.g. [1]) from this reference line:
line_marker, ref_line = remove_reference_line_marker(ref_line)
# Find DOI sections in citation
ref_line, identified_dois = identify_and_tag_DOI(ref_line)
# Identify and replace URLs in the line:
ref_line, identified_urls = identify_and_tag_URLs(ref_line)
# Tag <cds.JOURNAL>, etc.
tagged_line, bad_titles_count = tag_reference_line(ref_line,
kbs,
bad_titles_count)
# Debug print tagging (authors, titles, volumes, etc.)
LOGGER.debug("tags %r", tagged_line)
# Using the recorded information, create a MARC XML representation
# of the rebuilt line:
# At the same time, get stats of citations found in the reference line
# (titles, urls, etc):
citation_elements, line_marker, counts = \
parse_tagged_reference_line(line_marker,
tagged_line,
identified_dois,
identified_urls)
# Transformations on elements
split_volume_from_journal(citation_elements)
format_volume(citation_elements)
handle_special_journals(citation_elements, kbs)
format_report_number(citation_elements)
format_author_ed(citation_elements)
look_for_books(citation_elements, kbs)
format_hep(citation_elements)
remove_b_for_nucl_phys(citation_elements)
mangle_volume(citation_elements)
arxiv_urls_to_report_numbers(citation_elements)
look_for_hdl(citation_elements)
look_for_hdl_urls(citation_elements)
# Link references if desired
if linker_callback:
associate_recids(citation_elements, linker_callback)
# Split the reference in multiple ones if needed
splitted_citations = split_citations(citation_elements)
# Look for implied ibids
look_for_implied_ibids(splitted_citations)
# Find year
add_year_elements(splitted_citations)
# Look for books in misc field
look_for_undetected_books(splitted_citations, kbs)
if linker_callback:
# Link references with the newly added ibids/books information
for citations in splitted_citations:
associate_recids(citations, linker_callback)
# FIXME: Needed?
# Remove references with only misc text
# splitted_citations = remove_invalid_references(splitted_citations)
# Merge references with only misc text
# splitted_citations = merge_invalid_references(splitted_citations)
remove_duplicated_authors(splitted_citations)
remove_duplicated_dois(splitted_citations)
remove_duplicated_collaborations(splitted_citations)
add_recid_elements(splitted_citations)
# For debugging purposes
print_citations(splitted_citations, line_marker)
return splitted_citations, line_marker, counts, bad_titles_count | Parse one reference line
@input a string representing a single reference bullet
@output parsed references (a list of elements objects) | Below is the the instruction that describes the task:
### Input:
Parse one reference line
@input a string representing a single reference bullet
@output parsed references (a list of elements objects)
### Response:
def parse_reference_line(ref_line, kbs, bad_titles_count={}, linker_callback=None):
"""Parse one reference line
@input a string representing a single reference bullet
@output parsed references (a list of elements objects)
"""
# Strip the 'marker' (e.g. [1]) from this reference line:
line_marker, ref_line = remove_reference_line_marker(ref_line)
# Find DOI sections in citation
ref_line, identified_dois = identify_and_tag_DOI(ref_line)
# Identify and replace URLs in the line:
ref_line, identified_urls = identify_and_tag_URLs(ref_line)
# Tag <cds.JOURNAL>, etc.
tagged_line, bad_titles_count = tag_reference_line(ref_line,
kbs,
bad_titles_count)
# Debug print tagging (authors, titles, volumes, etc.)
LOGGER.debug("tags %r", tagged_line)
# Using the recorded information, create a MARC XML representation
# of the rebuilt line:
# At the same time, get stats of citations found in the reference line
# (titles, urls, etc):
citation_elements, line_marker, counts = \
parse_tagged_reference_line(line_marker,
tagged_line,
identified_dois,
identified_urls)
# Transformations on elements
split_volume_from_journal(citation_elements)
format_volume(citation_elements)
handle_special_journals(citation_elements, kbs)
format_report_number(citation_elements)
format_author_ed(citation_elements)
look_for_books(citation_elements, kbs)
format_hep(citation_elements)
remove_b_for_nucl_phys(citation_elements)
mangle_volume(citation_elements)
arxiv_urls_to_report_numbers(citation_elements)
look_for_hdl(citation_elements)
look_for_hdl_urls(citation_elements)
# Link references if desired
if linker_callback:
associate_recids(citation_elements, linker_callback)
# Split the reference in multiple ones if needed
splitted_citations = split_citations(citation_elements)
# Look for implied ibids
look_for_implied_ibids(splitted_citations)
# Find year
add_year_elements(splitted_citations)
# Look for books in misc field
look_for_undetected_books(splitted_citations, kbs)
if linker_callback:
# Link references with the newly added ibids/books information
for citations in splitted_citations:
associate_recids(citations, linker_callback)
# FIXME: Needed?
# Remove references with only misc text
# splitted_citations = remove_invalid_references(splitted_citations)
# Merge references with only misc text
# splitted_citations = merge_invalid_references(splitted_citations)
remove_duplicated_authors(splitted_citations)
remove_duplicated_dois(splitted_citations)
remove_duplicated_collaborations(splitted_citations)
add_recid_elements(splitted_citations)
# For debugging purposes
print_citations(splitted_citations, line_marker)
return splitted_citations, line_marker, counts, bad_titles_count |
def security_rule_get(security_rule, security_group, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Get a security rule within a specified network security group.
:param name: The name of the security rule to query.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.security_rule_get testrule1 testnsg testgroup
'''
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
secrule = netconn.security_rules.get(
network_security_group_name=security_group,
resource_group_name=resource_group,
security_rule_name=security_rule
)
result = secrule.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | .. versionadded:: 2019.2.0
Get a security rule within a specified network security group.
:param name: The name of the security rule to query.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.security_rule_get testrule1 testnsg testgroup | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Get a security rule within a specified network security group.
:param name: The name of the security rule to query.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.security_rule_get testrule1 testnsg testgroup
### Response:
def security_rule_get(security_rule, security_group, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Get a security rule within a specified network security group.
:param name: The name of the security rule to query.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.security_rule_get testrule1 testnsg testgroup
'''
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
secrule = netconn.security_rules.get(
network_security_group_name=security_group,
resource_group_name=resource_group,
security_rule_name=security_rule
)
result = secrule.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result |
def katex_rendering_delimiters(app):
"""Delimiters for rendering KaTeX math.
If no delimiters are specified in katex_options, add the
katex_inline and katex_display delimiters. See also
https://khan.github.io/KaTeX/docs/autorender.html
"""
# Return if we have user defined rendering delimiters
if 'delimiters' in app.config.katex_options:
return ''
katex_inline = [d.replace('\\', '\\\\') for d in app.config.katex_inline]
katex_display = [d.replace('\\', '\\\\') for d in app.config.katex_display]
katex_delimiters = {'inline': katex_inline, 'display': katex_display}
# Set chosen delimiters for the auto-rendering options of KaTeX
delimiters = r'''delimiters: [
{{ left: "{inline[0]}", right: "{inline[1]}", display: false }},
{{ left: "{display[0]}", right: "{display[1]}", display: true }}
]'''.format(**katex_delimiters)
return delimiters | Delimiters for rendering KaTeX math.
If no delimiters are specified in katex_options, add the
katex_inline and katex_display delimiters. See also
https://khan.github.io/KaTeX/docs/autorender.html | Below is the the instruction that describes the task:
### Input:
Delimiters for rendering KaTeX math.
If no delimiters are specified in katex_options, add the
katex_inline and katex_display delimiters. See also
https://khan.github.io/KaTeX/docs/autorender.html
### Response:
def katex_rendering_delimiters(app):
"""Delimiters for rendering KaTeX math.
If no delimiters are specified in katex_options, add the
katex_inline and katex_display delimiters. See also
https://khan.github.io/KaTeX/docs/autorender.html
"""
# Return if we have user defined rendering delimiters
if 'delimiters' in app.config.katex_options:
return ''
katex_inline = [d.replace('\\', '\\\\') for d in app.config.katex_inline]
katex_display = [d.replace('\\', '\\\\') for d in app.config.katex_display]
katex_delimiters = {'inline': katex_inline, 'display': katex_display}
# Set chosen delimiters for the auto-rendering options of KaTeX
delimiters = r'''delimiters: [
{{ left: "{inline[0]}", right: "{inline[1]}", display: false }},
{{ left: "{display[0]}", right: "{display[1]}", display: true }}
]'''.format(**katex_delimiters)
return delimiters |
def ReadPreprocessingInformation(self, knowledge_base):
"""Reads preprocessing information.
The preprocessing information contains the system configuration which
contains information about various system specific configuration data,
for example the user accounts.
Args:
knowledge_base (KnowledgeBase): is used to store the preprocessing
information.
"""
generator = self._GetAttributeContainers(
self._CONTAINER_TYPE_SYSTEM_CONFIGURATION)
for stream_number, system_configuration in enumerate(generator):
# TODO: replace stream_number by session_identifier.
knowledge_base.ReadSystemConfigurationArtifact(
system_configuration, session_identifier=stream_number) | Reads preprocessing information.
The preprocessing information contains the system configuration which
contains information about various system specific configuration data,
for example the user accounts.
Args:
knowledge_base (KnowledgeBase): is used to store the preprocessing
information. | Below is the the instruction that describes the task:
### Input:
Reads preprocessing information.
The preprocessing information contains the system configuration which
contains information about various system specific configuration data,
for example the user accounts.
Args:
knowledge_base (KnowledgeBase): is used to store the preprocessing
information.
### Response:
def ReadPreprocessingInformation(self, knowledge_base):
"""Reads preprocessing information.
The preprocessing information contains the system configuration which
contains information about various system specific configuration data,
for example the user accounts.
Args:
knowledge_base (KnowledgeBase): is used to store the preprocessing
information.
"""
generator = self._GetAttributeContainers(
self._CONTAINER_TYPE_SYSTEM_CONFIGURATION)
for stream_number, system_configuration in enumerate(generator):
# TODO: replace stream_number by session_identifier.
knowledge_base.ReadSystemConfigurationArtifact(
system_configuration, session_identifier=stream_number) |
def sample(self, rstate=None, return_q=False):
"""
Sample a point uniformly distributed within the *union* of ellipsoids.
Returns
-------
x : `~numpy.ndarray` with shape (ndim,)
A coordinate within the set of ellipsoids.
idx : int
The index of the ellipsoid `x` was sampled from.
q : int, optional
The number of ellipsoids `x` falls within.
"""
if rstate is None:
rstate = np.random
# If there is only one ellipsoid, sample from it.
if self.nells == 1:
x = self.ells[0].sample(rstate=rstate)
idx = 0
q = 1
if return_q:
return x, idx, q
else:
return x, idx
# Select an ellipsoid at random proportional to its volume.
idx = rstate.choice(self.nells, p=self.vols/self.vol_tot)
# Select a point from the chosen ellipsoid.
x = self.ells[idx].sample(rstate=rstate)
# Check how many ellipsoids the point lies within, passing over
# the `idx`-th ellipsoid `x` was sampled from.
q = self.overlap(x, j=idx) + 1
if return_q:
# If `q` is being returned, assume the user wants to
# explicitly apply the `1. / q` acceptance criterion to
# properly sample from the union of ellipsoids.
return x, idx, q
else:
# If `q` is not being returned, assume the user wants this
# done internally.
while rstate.rand() > (1. / q):
idx = rstate.choice(self.nells, p=self.vols/self.vol_tot)
x = self.ells[idx].sample(rstate=rstate)
q = self.overlap(x, j=idx) + 1
return x, idx | Sample a point uniformly distributed within the *union* of ellipsoids.
Returns
-------
x : `~numpy.ndarray` with shape (ndim,)
A coordinate within the set of ellipsoids.
idx : int
The index of the ellipsoid `x` was sampled from.
q : int, optional
The number of ellipsoids `x` falls within. | Below is the the instruction that describes the task:
### Input:
Sample a point uniformly distributed within the *union* of ellipsoids.
Returns
-------
x : `~numpy.ndarray` with shape (ndim,)
A coordinate within the set of ellipsoids.
idx : int
The index of the ellipsoid `x` was sampled from.
q : int, optional
The number of ellipsoids `x` falls within.
### Response:
def sample(self, rstate=None, return_q=False):
"""
Sample a point uniformly distributed within the *union* of ellipsoids.
Returns
-------
x : `~numpy.ndarray` with shape (ndim,)
A coordinate within the set of ellipsoids.
idx : int
The index of the ellipsoid `x` was sampled from.
q : int, optional
The number of ellipsoids `x` falls within.
"""
if rstate is None:
rstate = np.random
# If there is only one ellipsoid, sample from it.
if self.nells == 1:
x = self.ells[0].sample(rstate=rstate)
idx = 0
q = 1
if return_q:
return x, idx, q
else:
return x, idx
# Select an ellipsoid at random proportional to its volume.
idx = rstate.choice(self.nells, p=self.vols/self.vol_tot)
# Select a point from the chosen ellipsoid.
x = self.ells[idx].sample(rstate=rstate)
# Check how many ellipsoids the point lies within, passing over
# the `idx`-th ellipsoid `x` was sampled from.
q = self.overlap(x, j=idx) + 1
if return_q:
# If `q` is being returned, assume the user wants to
# explicitly apply the `1. / q` acceptance criterion to
# properly sample from the union of ellipsoids.
return x, idx, q
else:
# If `q` is not being returned, assume the user wants this
# done internally.
while rstate.rand() > (1. / q):
idx = rstate.choice(self.nells, p=self.vols/self.vol_tot)
x = self.ells[idx].sample(rstate=rstate)
q = self.overlap(x, j=idx) + 1
return x, idx |
def mount(self, app=None):
"""Mounts all registered routes to a bottle.py application instance.
Args:
app (instance): A `bottle.Bottle()` application instance.
Returns:
The Router instance (for chaining purposes).
"""
for endpoint in self._routes:
endpoint.register_app(app)
return self | Mounts all registered routes to a bottle.py application instance.
Args:
app (instance): A `bottle.Bottle()` application instance.
Returns:
The Router instance (for chaining purposes). | Below is the the instruction that describes the task:
### Input:
Mounts all registered routes to a bottle.py application instance.
Args:
app (instance): A `bottle.Bottle()` application instance.
Returns:
The Router instance (for chaining purposes).
### Response:
def mount(self, app=None):
"""Mounts all registered routes to a bottle.py application instance.
Args:
app (instance): A `bottle.Bottle()` application instance.
Returns:
The Router instance (for chaining purposes).
"""
for endpoint in self._routes:
endpoint.register_app(app)
return self |
def save(self):
"""
Save the current instance to the DB
"""
with rconnect() as conn:
try:
self.validate()
except ValidationError as e:
log.warn(e.messages)
raise
except ModelValidationError as e:
log.warn(e.messages)
raise
except ModelConversionError as e:
log.warn(e.messages)
raise
except ValueError as e:
log.warn(e)
raise
except FrinkError as e:
log.warn(e.messages)
raise
except Exception as e:
log.warn(e)
raise
else:
# If this is a new unsaved object, it'll likely have an
# id of None, which RethinkDB won't like. So if it's None,
# generate a UUID for it. If the save fails, we should re-set
# it to None.
if self.id is None:
self.id = str(uuid.uuid4())
log.debug(self.id)
try:
query = r.db(self._db).table(self._table).insert(
self.to_primitive(),
conflict="replace"
)
log.debug(query)
rv = query.run(conn)
# Returns something like this:
# {
# u'errors': 0,
# u'deleted': 0,
# u'generated_keys': [u'dd8ad1bc-8609-4484-b6c4-ed96c72c03f2'],
# u'unchanged': 0,
# u'skipped': 0,
# u'replaced': 0,
# u'inserted': 1
# }
log.debug(rv)
except Exception as e:
log.warn(e)
self.id = None
raise
else:
return self | Save the current instance to the DB | Below is the the instruction that describes the task:
### Input:
Save the current instance to the DB
### Response:
def save(self):
"""
Save the current instance to the DB
"""
with rconnect() as conn:
try:
self.validate()
except ValidationError as e:
log.warn(e.messages)
raise
except ModelValidationError as e:
log.warn(e.messages)
raise
except ModelConversionError as e:
log.warn(e.messages)
raise
except ValueError as e:
log.warn(e)
raise
except FrinkError as e:
log.warn(e.messages)
raise
except Exception as e:
log.warn(e)
raise
else:
# If this is a new unsaved object, it'll likely have an
# id of None, which RethinkDB won't like. So if it's None,
# generate a UUID for it. If the save fails, we should re-set
# it to None.
if self.id is None:
self.id = str(uuid.uuid4())
log.debug(self.id)
try:
query = r.db(self._db).table(self._table).insert(
self.to_primitive(),
conflict="replace"
)
log.debug(query)
rv = query.run(conn)
# Returns something like this:
# {
# u'errors': 0,
# u'deleted': 0,
# u'generated_keys': [u'dd8ad1bc-8609-4484-b6c4-ed96c72c03f2'],
# u'unchanged': 0,
# u'skipped': 0,
# u'replaced': 0,
# u'inserted': 1
# }
log.debug(rv)
except Exception as e:
log.warn(e)
self.id = None
raise
else:
return self |
def get_arrive_stop(self, **kwargs):
"""Obtain bus arrival info in target stop.
Args:
stop_number (int): Stop number to query.
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[Arrival]), or message string
in case of error.
"""
# Endpoint parameters
params = {
'idStop': kwargs.get('stop_number'),
'cultureInfo': util.language_code(kwargs.get('lang'))
}
# Request
result = self.make_request('geo', 'get_arrive_stop', **params)
# Funny endpoint, no status code
if not util.check_result(result, 'arrives'):
return False, 'UNKNOWN ERROR'
# Parse
values = util.response_list(result, 'arrives')
return True, [emtype.Arrival(**a) for a in values] | Obtain bus arrival info in target stop.
Args:
stop_number (int): Stop number to query.
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[Arrival]), or message string
in case of error. | Below is the the instruction that describes the task:
### Input:
Obtain bus arrival info in target stop.
Args:
stop_number (int): Stop number to query.
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[Arrival]), or message string
in case of error.
### Response:
def get_arrive_stop(self, **kwargs):
"""Obtain bus arrival info in target stop.
Args:
stop_number (int): Stop number to query.
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[Arrival]), or message string
in case of error.
"""
# Endpoint parameters
params = {
'idStop': kwargs.get('stop_number'),
'cultureInfo': util.language_code(kwargs.get('lang'))
}
# Request
result = self.make_request('geo', 'get_arrive_stop', **params)
# Funny endpoint, no status code
if not util.check_result(result, 'arrives'):
return False, 'UNKNOWN ERROR'
# Parse
values = util.response_list(result, 'arrives')
return True, [emtype.Arrival(**a) for a in values] |
def _at_for(self, calculator, rule, scope, block):
"""
Implements @for
"""
var, _, name = block.argument.partition(' from ')
frm, _, through = name.partition(' through ')
if through:
inclusive = True
else:
inclusive = False
frm, _, through = frm.partition(' to ')
frm = calculator.calculate(frm)
through = calculator.calculate(through)
try:
frm = int(float(frm))
through = int(float(through))
except ValueError:
return
if frm > through:
# DEVIATION: allow reversed '@for .. from .. through' (same as enumerate() and range())
frm, through = through, frm
rev = reversed
else:
rev = lambda x: x
var = var.strip()
var = calculator.do_glob_math(var)
var = normalize_var(var)
inner_rule = rule.copy()
inner_rule.unparsed_contents = block.unparsed_contents
if not self.should_scope_loop_in_rule(inner_rule):
# DEVIATION: Allow not creating a new namespace
inner_rule.namespace = rule.namespace
if inclusive:
through += 1
for i in rev(range(frm, through)):
inner_rule.namespace.set_variable(var, Number(i))
self.manage_children(inner_rule, scope) | Implements @for | Below is the the instruction that describes the task:
### Input:
Implements @for
### Response:
def _at_for(self, calculator, rule, scope, block):
"""
Implements @for
"""
var, _, name = block.argument.partition(' from ')
frm, _, through = name.partition(' through ')
if through:
inclusive = True
else:
inclusive = False
frm, _, through = frm.partition(' to ')
frm = calculator.calculate(frm)
through = calculator.calculate(through)
try:
frm = int(float(frm))
through = int(float(through))
except ValueError:
return
if frm > through:
# DEVIATION: allow reversed '@for .. from .. through' (same as enumerate() and range())
frm, through = through, frm
rev = reversed
else:
rev = lambda x: x
var = var.strip()
var = calculator.do_glob_math(var)
var = normalize_var(var)
inner_rule = rule.copy()
inner_rule.unparsed_contents = block.unparsed_contents
if not self.should_scope_loop_in_rule(inner_rule):
# DEVIATION: Allow not creating a new namespace
inner_rule.namespace = rule.namespace
if inclusive:
through += 1
for i in rev(range(frm, through)):
inner_rule.namespace.set_variable(var, Number(i))
self.manage_children(inner_rule, scope) |
def glover_time_derivative(tr, oversampling=50, time_length=32., onset=0.):
"""Implementation of the Glover time derivative hrf (dhrf) model
Parameters
----------
tr: float
scan repeat time, in seconds
oversampling: int,
temporal oversampling factor, optional
time_length: float,
hrf kernel length, in seconds
onset: float,
onset of the response
Returns
-------
dhrf: array of shape(length / tr), dtype=float
dhrf sampling on the provided grid
"""
do = .1
dhrf = 1. / do * (glover_hrf(tr, oversampling, time_length, onset) -
glover_hrf(tr, oversampling, time_length, onset + do))
return dhrf | Implementation of the Glover time derivative hrf (dhrf) model
Parameters
----------
tr: float
scan repeat time, in seconds
oversampling: int,
temporal oversampling factor, optional
time_length: float,
hrf kernel length, in seconds
onset: float,
onset of the response
Returns
-------
dhrf: array of shape(length / tr), dtype=float
dhrf sampling on the provided grid | Below is the the instruction that describes the task:
### Input:
Implementation of the Glover time derivative hrf (dhrf) model
Parameters
----------
tr: float
scan repeat time, in seconds
oversampling: int,
temporal oversampling factor, optional
time_length: float,
hrf kernel length, in seconds
onset: float,
onset of the response
Returns
-------
dhrf: array of shape(length / tr), dtype=float
dhrf sampling on the provided grid
### Response:
def glover_time_derivative(tr, oversampling=50, time_length=32., onset=0.):
"""Implementation of the Glover time derivative hrf (dhrf) model
Parameters
----------
tr: float
scan repeat time, in seconds
oversampling: int,
temporal oversampling factor, optional
time_length: float,
hrf kernel length, in seconds
onset: float,
onset of the response
Returns
-------
dhrf: array of shape(length / tr), dtype=float
dhrf sampling on the provided grid
"""
do = .1
dhrf = 1. / do * (glover_hrf(tr, oversampling, time_length, onset) -
glover_hrf(tr, oversampling, time_length, onset + do))
return dhrf |
def space(self, newlines=1):
"""Creates a vertical space of newlines
Args:
newlines (int): number of empty lines
Returns:
self for chaining
"""
space = Space()
for line in range(newlines):
space.add_line('\n')
self._container.structure.insert(self._idx, space)
self._idx += 1
return self | Creates a vertical space of newlines
Args:
newlines (int): number of empty lines
Returns:
self for chaining | Below is the the instruction that describes the task:
### Input:
Creates a vertical space of newlines
Args:
newlines (int): number of empty lines
Returns:
self for chaining
### Response:
def space(self, newlines=1):
"""Creates a vertical space of newlines
Args:
newlines (int): number of empty lines
Returns:
self for chaining
"""
space = Space()
for line in range(newlines):
space.add_line('\n')
self._container.structure.insert(self._idx, space)
self._idx += 1
return self |
def get_instances(self, object_specs, version=None):
"""Get the cached native representation for one or more objects.
Keyword arguments:
object_specs - A sequence of triples (model name, pk, obj):
- model name - the name of the model
- pk - the primary key of the instance
- obj - the instance, or None to load it
version - The cache version to use, or None for default
To get the 'new object' representation, set pk and obj to None
Return is a dictionary:
key - (model name, pk)
value - (native representation, pk, object or None)
"""
ret = dict()
spec_keys = set()
cache_keys = []
version = version or self.default_version
# Construct all the cache keys to fetch
for model_name, obj_pk, obj in object_specs:
assert model_name
assert obj_pk
# Get cache keys to fetch
obj_key = self.key_for(version, model_name, obj_pk)
spec_keys.add((model_name, obj_pk, obj, obj_key))
cache_keys.append(obj_key)
# Fetch the cache keys
if cache_keys and self.cache:
cache_vals = self.cache.get_many(cache_keys)
else:
cache_vals = {}
# Use cached representations, or recreate
cache_to_set = {}
for model_name, obj_pk, obj, obj_key in spec_keys:
# Load cached objects
obj_val = cache_vals.get(obj_key)
obj_native = json.loads(obj_val) if obj_val else None
# Invalid or not set - load from database
if not obj_native:
if not obj:
loader = self.model_function(model_name, version, 'loader')
obj = loader(obj_pk)
serializer = self.model_function(
model_name, version, 'serializer')
obj_native = serializer(obj) or {}
if obj_native:
cache_to_set[obj_key] = json.dumps(obj_native)
# Get fields to convert
keys = [key for key in obj_native.keys() if ':' in key]
for key in keys:
json_value = obj_native.pop(key)
name, value = self.field_from_json(key, json_value)
assert name not in obj_native
obj_native[name] = value
if obj_native:
ret[(model_name, obj_pk)] = (obj_native, obj_key, obj)
# Save any new cached representations
if cache_to_set and self.cache:
self.cache.set_many(cache_to_set)
return ret | Get the cached native representation for one or more objects.
Keyword arguments:
object_specs - A sequence of triples (model name, pk, obj):
- model name - the name of the model
- pk - the primary key of the instance
- obj - the instance, or None to load it
version - The cache version to use, or None for default
To get the 'new object' representation, set pk and obj to None
Return is a dictionary:
key - (model name, pk)
value - (native representation, pk, object or None) | Below is the the instruction that describes the task:
### Input:
Get the cached native representation for one or more objects.
Keyword arguments:
object_specs - A sequence of triples (model name, pk, obj):
- model name - the name of the model
- pk - the primary key of the instance
- obj - the instance, or None to load it
version - The cache version to use, or None for default
To get the 'new object' representation, set pk and obj to None
Return is a dictionary:
key - (model name, pk)
value - (native representation, pk, object or None)
### Response:
def get_instances(self, object_specs, version=None):
"""Get the cached native representation for one or more objects.
Keyword arguments:
object_specs - A sequence of triples (model name, pk, obj):
- model name - the name of the model
- pk - the primary key of the instance
- obj - the instance, or None to load it
version - The cache version to use, or None for default
To get the 'new object' representation, set pk and obj to None
Return is a dictionary:
key - (model name, pk)
value - (native representation, pk, object or None)
"""
ret = dict()
spec_keys = set()
cache_keys = []
version = version or self.default_version
# Construct all the cache keys to fetch
for model_name, obj_pk, obj in object_specs:
assert model_name
assert obj_pk
# Get cache keys to fetch
obj_key = self.key_for(version, model_name, obj_pk)
spec_keys.add((model_name, obj_pk, obj, obj_key))
cache_keys.append(obj_key)
# Fetch the cache keys
if cache_keys and self.cache:
cache_vals = self.cache.get_many(cache_keys)
else:
cache_vals = {}
# Use cached representations, or recreate
cache_to_set = {}
for model_name, obj_pk, obj, obj_key in spec_keys:
# Load cached objects
obj_val = cache_vals.get(obj_key)
obj_native = json.loads(obj_val) if obj_val else None
# Invalid or not set - load from database
if not obj_native:
if not obj:
loader = self.model_function(model_name, version, 'loader')
obj = loader(obj_pk)
serializer = self.model_function(
model_name, version, 'serializer')
obj_native = serializer(obj) or {}
if obj_native:
cache_to_set[obj_key] = json.dumps(obj_native)
# Get fields to convert
keys = [key for key in obj_native.keys() if ':' in key]
for key in keys:
json_value = obj_native.pop(key)
name, value = self.field_from_json(key, json_value)
assert name not in obj_native
obj_native[name] = value
if obj_native:
ret[(model_name, obj_pk)] = (obj_native, obj_key, obj)
# Save any new cached representations
if cache_to_set and self.cache:
self.cache.set_many(cache_to_set)
return ret |
def make_functions(self):
"""
Revisit the entire control flow graph, create Function instances accordingly, and correctly put blocks into
each function.
Although Function objects are crated during the CFG recovery, they are neither sound nor accurate. With a
pre-constructed CFG, this method rebuilds all functions bearing the following rules:
- A block may only belong to one function.
- Small functions lying inside the startpoint and the endpoint of another function will be merged with the
other function
- Tail call optimizations are detected.
- PLT stubs are aligned by 16.
:return: None
"""
# TODO: Is it required that PLT stubs are always aligned by 16? If so, on what architectures and platforms is it
# TODO: enforced?
tmp_functions = self.kb.functions.copy()
for function in tmp_functions.values():
function.mark_nonreturning_calls_endpoints()
# Clear old functions dict
self.kb.functions.clear()
blockaddr_to_function = { }
traversed_cfg_nodes = set()
function_nodes = set()
# Find nodes for beginnings of all functions
for _, dst, data in self.graph.edges(data=True):
jumpkind = data.get('jumpkind', "")
if jumpkind == 'Ijk_Call' or jumpkind.startswith('Ijk_Sys'):
function_nodes.add(dst)
entry_node = self.model.get_any_node(self._binary.entry)
if entry_node is not None:
function_nodes.add(entry_node)
# aggressively remove and merge functions
# For any function, if there is a call to it, it won't be removed
called_function_addrs = { n.addr for n in function_nodes }
removed_functions_a = self._process_irrational_functions(tmp_functions,
called_function_addrs,
blockaddr_to_function
)
removed_functions_b, adjusted_cfgnodes = self._process_irrational_function_starts(tmp_functions,
called_function_addrs,
blockaddr_to_function
)
removed_functions = removed_functions_a | removed_functions_b
# Remove all nodes that are adjusted
function_nodes.difference_update(adjusted_cfgnodes)
for n in self.graph.nodes():
if n.addr in tmp_functions or n.addr in removed_functions:
function_nodes.add(n)
# traverse the graph starting from each node, not following call edges
# it's important that we traverse all functions in order so that we have a greater chance to come across
# rational functions before its irrational counterparts (e.g. due to failed jump table resolution)
min_stage_2_progress = 50.0
max_stage_2_progress = 90.0
nodes_count = len(function_nodes)
for i, fn in enumerate(sorted(function_nodes, key=lambda n: n.addr)):
if self._low_priority:
self._release_gil(i, 20)
if self._show_progressbar or self._progress_callback:
progress = min_stage_2_progress + (max_stage_2_progress - min_stage_2_progress) * (i * 1.0 / nodes_count)
self._update_progress(progress)
self._graph_bfs_custom(self.graph, [ fn ], self._graph_traversal_handler, blockaddr_to_function,
tmp_functions, traversed_cfg_nodes
)
# Don't forget those small function chunks that are not called by anything.
# There might be references to them from data, or simply references that we cannot find via static analysis
secondary_function_nodes = set()
# add all function chunks ("functions" that are not called from anywhere)
for func_addr in tmp_functions:
node = self.model.get_any_node(func_addr)
if node is None:
continue
if node.addr not in blockaddr_to_function:
secondary_function_nodes.add(node)
missing_cfg_nodes = set(self.graph.nodes()) - traversed_cfg_nodes
missing_cfg_nodes = { node for node in missing_cfg_nodes if node.function_address is not None }
if missing_cfg_nodes:
l.debug('%d CFGNodes are missing in the first traversal.', len(missing_cfg_nodes))
secondary_function_nodes |= missing_cfg_nodes
min_stage_3_progress = 90.0
max_stage_3_progress = 99.9
nodes_count = len(secondary_function_nodes)
for i, fn in enumerate(sorted(secondary_function_nodes, key=lambda n: n.addr)):
if self._show_progressbar or self._progress_callback:
progress = min_stage_3_progress + (max_stage_3_progress - min_stage_3_progress) * (i * 1.0 / nodes_count)
self._update_progress(progress)
self._graph_bfs_custom(self.graph, [fn], self._graph_traversal_handler, blockaddr_to_function,
tmp_functions
)
to_remove = set()
# Remove all stubs after PLT entries
if not is_arm_arch(self.project.arch):
for fn in self.kb.functions.values():
addr = fn.addr - (fn.addr % 16)
if addr != fn.addr and addr in self.kb.functions and self.kb.functions[addr].is_plt:
to_remove.add(fn.addr)
# remove empty functions
for func in self.kb.functions.values():
if func.startpoint is None:
to_remove.add(func.addr)
for addr in to_remove:
del self.kb.functions[addr]
# Update CFGNode.function_address
for node in self._nodes.values():
if node.addr in blockaddr_to_function:
node.function_address = blockaddr_to_function[node.addr].addr | Revisit the entire control flow graph, create Function instances accordingly, and correctly put blocks into
each function.
Although Function objects are crated during the CFG recovery, they are neither sound nor accurate. With a
pre-constructed CFG, this method rebuilds all functions bearing the following rules:
- A block may only belong to one function.
- Small functions lying inside the startpoint and the endpoint of another function will be merged with the
other function
- Tail call optimizations are detected.
- PLT stubs are aligned by 16.
:return: None | Below is the the instruction that describes the task:
### Input:
Revisit the entire control flow graph, create Function instances accordingly, and correctly put blocks into
each function.
Although Function objects are crated during the CFG recovery, they are neither sound nor accurate. With a
pre-constructed CFG, this method rebuilds all functions bearing the following rules:
- A block may only belong to one function.
- Small functions lying inside the startpoint and the endpoint of another function will be merged with the
other function
- Tail call optimizations are detected.
- PLT stubs are aligned by 16.
:return: None
### Response:
def make_functions(self):
"""
Revisit the entire control flow graph, create Function instances accordingly, and correctly put blocks into
each function.
Although Function objects are crated during the CFG recovery, they are neither sound nor accurate. With a
pre-constructed CFG, this method rebuilds all functions bearing the following rules:
- A block may only belong to one function.
- Small functions lying inside the startpoint and the endpoint of another function will be merged with the
other function
- Tail call optimizations are detected.
- PLT stubs are aligned by 16.
:return: None
"""
# TODO: Is it required that PLT stubs are always aligned by 16? If so, on what architectures and platforms is it
# TODO: enforced?
tmp_functions = self.kb.functions.copy()
for function in tmp_functions.values():
function.mark_nonreturning_calls_endpoints()
# Clear old functions dict
self.kb.functions.clear()
blockaddr_to_function = { }
traversed_cfg_nodes = set()
function_nodes = set()
# Find nodes for beginnings of all functions
for _, dst, data in self.graph.edges(data=True):
jumpkind = data.get('jumpkind', "")
if jumpkind == 'Ijk_Call' or jumpkind.startswith('Ijk_Sys'):
function_nodes.add(dst)
entry_node = self.model.get_any_node(self._binary.entry)
if entry_node is not None:
function_nodes.add(entry_node)
# aggressively remove and merge functions
# For any function, if there is a call to it, it won't be removed
called_function_addrs = { n.addr for n in function_nodes }
removed_functions_a = self._process_irrational_functions(tmp_functions,
called_function_addrs,
blockaddr_to_function
)
removed_functions_b, adjusted_cfgnodes = self._process_irrational_function_starts(tmp_functions,
called_function_addrs,
blockaddr_to_function
)
removed_functions = removed_functions_a | removed_functions_b
# Remove all nodes that are adjusted
function_nodes.difference_update(adjusted_cfgnodes)
for n in self.graph.nodes():
if n.addr in tmp_functions or n.addr in removed_functions:
function_nodes.add(n)
# traverse the graph starting from each node, not following call edges
# it's important that we traverse all functions in order so that we have a greater chance to come across
# rational functions before its irrational counterparts (e.g. due to failed jump table resolution)
min_stage_2_progress = 50.0
max_stage_2_progress = 90.0
nodes_count = len(function_nodes)
for i, fn in enumerate(sorted(function_nodes, key=lambda n: n.addr)):
if self._low_priority:
self._release_gil(i, 20)
if self._show_progressbar or self._progress_callback:
progress = min_stage_2_progress + (max_stage_2_progress - min_stage_2_progress) * (i * 1.0 / nodes_count)
self._update_progress(progress)
self._graph_bfs_custom(self.graph, [ fn ], self._graph_traversal_handler, blockaddr_to_function,
tmp_functions, traversed_cfg_nodes
)
# Don't forget those small function chunks that are not called by anything.
# There might be references to them from data, or simply references that we cannot find via static analysis
secondary_function_nodes = set()
# add all function chunks ("functions" that are not called from anywhere)
for func_addr in tmp_functions:
node = self.model.get_any_node(func_addr)
if node is None:
continue
if node.addr not in blockaddr_to_function:
secondary_function_nodes.add(node)
missing_cfg_nodes = set(self.graph.nodes()) - traversed_cfg_nodes
missing_cfg_nodes = { node for node in missing_cfg_nodes if node.function_address is not None }
if missing_cfg_nodes:
l.debug('%d CFGNodes are missing in the first traversal.', len(missing_cfg_nodes))
secondary_function_nodes |= missing_cfg_nodes
min_stage_3_progress = 90.0
max_stage_3_progress = 99.9
nodes_count = len(secondary_function_nodes)
for i, fn in enumerate(sorted(secondary_function_nodes, key=lambda n: n.addr)):
if self._show_progressbar or self._progress_callback:
progress = min_stage_3_progress + (max_stage_3_progress - min_stage_3_progress) * (i * 1.0 / nodes_count)
self._update_progress(progress)
self._graph_bfs_custom(self.graph, [fn], self._graph_traversal_handler, blockaddr_to_function,
tmp_functions
)
to_remove = set()
# Remove all stubs after PLT entries
if not is_arm_arch(self.project.arch):
for fn in self.kb.functions.values():
addr = fn.addr - (fn.addr % 16)
if addr != fn.addr and addr in self.kb.functions and self.kb.functions[addr].is_plt:
to_remove.add(fn.addr)
# remove empty functions
for func in self.kb.functions.values():
if func.startpoint is None:
to_remove.add(func.addr)
for addr in to_remove:
del self.kb.functions[addr]
# Update CFGNode.function_address
for node in self._nodes.values():
if node.addr in blockaddr_to_function:
node.function_address = blockaddr_to_function[node.addr].addr |
def load_and_migrate() -> Dict[str, Path]:
""" Ensure the settings directory tree is properly configured.
This function does most of its work on the actual robot. It will move
all settings files from wherever they happen to be to the proper
place. On non-robots, this mostly just loads. In addition, it writes
a default config and makes sure all directories required exist (though
the files in them may not).
"""
if IS_ROBOT:
_migrate_robot()
base = infer_config_base_dir()
base.mkdir(parents=True, exist_ok=True)
index = _load_with_overrides(base)
return _ensure_paths_and_types(index) | Ensure the settings directory tree is properly configured.
This function does most of its work on the actual robot. It will move
all settings files from wherever they happen to be to the proper
place. On non-robots, this mostly just loads. In addition, it writes
a default config and makes sure all directories required exist (though
the files in them may not). | Below is the the instruction that describes the task:
### Input:
Ensure the settings directory tree is properly configured.
This function does most of its work on the actual robot. It will move
all settings files from wherever they happen to be to the proper
place. On non-robots, this mostly just loads. In addition, it writes
a default config and makes sure all directories required exist (though
the files in them may not).
### Response:
def load_and_migrate() -> Dict[str, Path]:
""" Ensure the settings directory tree is properly configured.
This function does most of its work on the actual robot. It will move
all settings files from wherever they happen to be to the proper
place. On non-robots, this mostly just loads. In addition, it writes
a default config and makes sure all directories required exist (though
the files in them may not).
"""
if IS_ROBOT:
_migrate_robot()
base = infer_config_base_dir()
base.mkdir(parents=True, exist_ok=True)
index = _load_with_overrides(base)
return _ensure_paths_and_types(index) |
def ra(self,*args,**kwargs):
"""
NAME:
ra
PURPOSE:
return the right ascension
INPUT:
t - (optional) time at which to get ra (can be Quantity)
obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity)
(default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer
(default=Object-wide default; can be Quantity)
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
ra(t) in deg
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
out= self._orb.ra(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out | NAME:
ra
PURPOSE:
return the right ascension
INPUT:
t - (optional) time at which to get ra (can be Quantity)
obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity)
(default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer
(default=Object-wide default; can be Quantity)
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
ra(t) in deg
HISTORY:
2011-02-23 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
ra
PURPOSE:
return the right ascension
INPUT:
t - (optional) time at which to get ra (can be Quantity)
obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity)
(default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer
(default=Object-wide default; can be Quantity)
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
ra(t) in deg
HISTORY:
2011-02-23 - Written - Bovy (NYU)
### Response:
def ra(self,*args,**kwargs):
"""
NAME:
ra
PURPOSE:
return the right ascension
INPUT:
t - (optional) time at which to get ra (can be Quantity)
obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity)
(default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer
(default=Object-wide default; can be Quantity)
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
ra(t) in deg
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
out= self._orb.ra(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out |
def update_function(self, param_vals):
"""Updates the opt_obj, returns new error."""
self.opt_obj.update_function(param_vals)
return self.opt_obj.get_error() | Updates the opt_obj, returns new error. | Below is the the instruction that describes the task:
### Input:
Updates the opt_obj, returns new error.
### Response:
def update_function(self, param_vals):
"""Updates the opt_obj, returns new error."""
self.opt_obj.update_function(param_vals)
return self.opt_obj.get_error() |
def _get_one_pending_job(self):
"""
Retrieve a pending job.
:return: A CFGJob instance or None
"""
pending_job_key, pending_job = self._pending_jobs.popitem()
pending_job_state = pending_job.state
pending_job_call_stack = pending_job.call_stack
pending_job_src_block_id = pending_job.src_block_id
pending_job_src_exit_stmt_idx = pending_job.src_exit_stmt_idx
self._deregister_analysis_job(pending_job.caller_func_addr, pending_job)
# Let's check whether this address has been traced before.
if pending_job_key in self._nodes:
node = self._nodes[pending_job_key]
if node in self.graph:
pending_exit_addr = self._block_id_addr(pending_job_key)
# That block has been traced before. Let's forget about it
l.debug("Target 0x%08x has been traced before. Trying the next one...", pending_exit_addr)
# However, we should still create the FakeRet edge
self._graph_add_edge(pending_job_src_block_id, pending_job_key, jumpkind="Ijk_FakeRet",
stmt_idx=pending_job_src_exit_stmt_idx, ins_addr=pending_job.src_exit_ins_addr)
return None
pending_job_state.history.jumpkind = 'Ijk_FakeRet'
job = CFGJob(pending_job_state.addr,
pending_job_state,
self._context_sensitivity_level,
src_block_id=pending_job_src_block_id,
src_exit_stmt_idx=pending_job_src_exit_stmt_idx,
src_ins_addr=pending_job.src_exit_ins_addr,
call_stack=pending_job_call_stack,
)
l.debug("Tracing a missing return exit %s", self._block_id_repr(pending_job_key))
return job | Retrieve a pending job.
:return: A CFGJob instance or None | Below is the the instruction that describes the task:
### Input:
Retrieve a pending job.
:return: A CFGJob instance or None
### Response:
def _get_one_pending_job(self):
"""
Retrieve a pending job.
:return: A CFGJob instance or None
"""
pending_job_key, pending_job = self._pending_jobs.popitem()
pending_job_state = pending_job.state
pending_job_call_stack = pending_job.call_stack
pending_job_src_block_id = pending_job.src_block_id
pending_job_src_exit_stmt_idx = pending_job.src_exit_stmt_idx
self._deregister_analysis_job(pending_job.caller_func_addr, pending_job)
# Let's check whether this address has been traced before.
if pending_job_key in self._nodes:
node = self._nodes[pending_job_key]
if node in self.graph:
pending_exit_addr = self._block_id_addr(pending_job_key)
# That block has been traced before. Let's forget about it
l.debug("Target 0x%08x has been traced before. Trying the next one...", pending_exit_addr)
# However, we should still create the FakeRet edge
self._graph_add_edge(pending_job_src_block_id, pending_job_key, jumpkind="Ijk_FakeRet",
stmt_idx=pending_job_src_exit_stmt_idx, ins_addr=pending_job.src_exit_ins_addr)
return None
pending_job_state.history.jumpkind = 'Ijk_FakeRet'
job = CFGJob(pending_job_state.addr,
pending_job_state,
self._context_sensitivity_level,
src_block_id=pending_job_src_block_id,
src_exit_stmt_idx=pending_job_src_exit_stmt_idx,
src_ins_addr=pending_job.src_exit_ins_addr,
call_stack=pending_job_call_stack,
)
l.debug("Tracing a missing return exit %s", self._block_id_repr(pending_job_key))
return job |
def _increment_recursion_level(self):
"""Increment current_depth based on either defaults or the enclosing
Async.
"""
# Update the recursion info. This is done so that if an async created
# outside an executing context, or one previously created is later
# loaded from storage, that the "current" setting is correctly set.
self._initialize_recursion_depth()
recursion_options = self._options.get('_recursion', {})
current_depth = recursion_options.get('current', 0) + 1
max_depth = recursion_options.get('max', MAX_DEPTH)
# Increment and store
self.update_options(_recursion={'current': current_depth,
'max': max_depth}) | Increment current_depth based on either defaults or the enclosing
Async. | Below is the the instruction that describes the task:
### Input:
Increment current_depth based on either defaults or the enclosing
Async.
### Response:
def _increment_recursion_level(self):
"""Increment current_depth based on either defaults or the enclosing
Async.
"""
# Update the recursion info. This is done so that if an async created
# outside an executing context, or one previously created is later
# loaded from storage, that the "current" setting is correctly set.
self._initialize_recursion_depth()
recursion_options = self._options.get('_recursion', {})
current_depth = recursion_options.get('current', 0) + 1
max_depth = recursion_options.get('max', MAX_DEPTH)
# Increment and store
self.update_options(_recursion={'current': current_depth,
'max': max_depth}) |
def raise_204(instance):
"""Abort the current request with a 204 (No Content) response code. Clears
out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 204
"""
instance.response.status = 204
instance.response.body = ''
instance.response.body_raw = None
raise ResponseException(instance.response) | Abort the current request with a 204 (No Content) response code. Clears
out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 204 | Below is the the instruction that describes the task:
### Input:
Abort the current request with a 204 (No Content) response code. Clears
out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 204
### Response:
def raise_204(instance):
"""Abort the current request with a 204 (No Content) response code. Clears
out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 204
"""
instance.response.status = 204
instance.response.body = ''
instance.response.body_raw = None
raise ResponseException(instance.response) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.