code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs]) | Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter. | Below is the the instruction that describes the task:
### Input:
Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
### Response:
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs]) |
def user_create(auth=None, **kwargs):
'''
Create a user
CLI Example:
.. code-block:: bash
salt '*' keystoneng.user_create name=user1
salt '*' keystoneng.user_create name=user2 password=1234 enabled=False
salt '*' keystoneng.user_create name=user3 domain_id=b62e76fbeeff4e8fb77073f591cf211e
'''
cloud = get_openstack_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_user(**kwargs) | Create a user
CLI Example:
.. code-block:: bash
salt '*' keystoneng.user_create name=user1
salt '*' keystoneng.user_create name=user2 password=1234 enabled=False
salt '*' keystoneng.user_create name=user3 domain_id=b62e76fbeeff4e8fb77073f591cf211e | Below is the the instruction that describes the task:
### Input:
Create a user
CLI Example:
.. code-block:: bash
salt '*' keystoneng.user_create name=user1
salt '*' keystoneng.user_create name=user2 password=1234 enabled=False
salt '*' keystoneng.user_create name=user3 domain_id=b62e76fbeeff4e8fb77073f591cf211e
### Response:
def user_create(auth=None, **kwargs):
'''
Create a user
CLI Example:
.. code-block:: bash
salt '*' keystoneng.user_create name=user1
salt '*' keystoneng.user_create name=user2 password=1234 enabled=False
salt '*' keystoneng.user_create name=user3 domain_id=b62e76fbeeff4e8fb77073f591cf211e
'''
cloud = get_openstack_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_user(**kwargs) |
def stop(self):
"""Stop the publisher.
"""
self.publish.setsockopt(zmq.LINGER, 1)
self.publish.close()
return self | Stop the publisher. | Below is the the instruction that describes the task:
### Input:
Stop the publisher.
### Response:
def stop(self):
"""Stop the publisher.
"""
self.publish.setsockopt(zmq.LINGER, 1)
self.publish.close()
return self |
def get_scaled(self, factor):
""" Get a new time unit, scaled by the given factor """
res = TimeUnit(self)
res._factor = self._factor * factor
res._unit = self._unit
return res | Get a new time unit, scaled by the given factor | Below is the the instruction that describes the task:
### Input:
Get a new time unit, scaled by the given factor
### Response:
def get_scaled(self, factor):
""" Get a new time unit, scaled by the given factor """
res = TimeUnit(self)
res._factor = self._factor * factor
res._unit = self._unit
return res |
def compute_shader(self, source) -> 'ComputeShader':
'''
A :py:class:`ComputeShader` is a Shader Stage that is used entirely for computing arbitrary information.
While it can do rendering, it is generally used for tasks not directly related to drawing.
Args:
source (str): The source of the compute shader.
Returns:
:py:class:`ComputeShader` object
'''
res = ComputeShader.__new__(ComputeShader)
res.mglo, ls1, ls2, ls3, ls4, res._glo = self.mglo.compute_shader(source)
members = {}
for item in ls1:
obj = Uniform.__new__(Uniform)
obj.mglo, obj._location, obj._array_length, obj._dimension, obj._name = item
members[obj.name] = obj
for item in ls2:
obj = UniformBlock.__new__(UniformBlock)
obj.mglo, obj._index, obj._size, obj._name = item
members[obj.name] = obj
res._members = members
res.ctx = self
res.extra = None
return res | A :py:class:`ComputeShader` is a Shader Stage that is used entirely for computing arbitrary information.
While it can do rendering, it is generally used for tasks not directly related to drawing.
Args:
source (str): The source of the compute shader.
Returns:
:py:class:`ComputeShader` object | Below is the the instruction that describes the task:
### Input:
A :py:class:`ComputeShader` is a Shader Stage that is used entirely for computing arbitrary information.
While it can do rendering, it is generally used for tasks not directly related to drawing.
Args:
source (str): The source of the compute shader.
Returns:
:py:class:`ComputeShader` object
### Response:
def compute_shader(self, source) -> 'ComputeShader':
'''
A :py:class:`ComputeShader` is a Shader Stage that is used entirely for computing arbitrary information.
While it can do rendering, it is generally used for tasks not directly related to drawing.
Args:
source (str): The source of the compute shader.
Returns:
:py:class:`ComputeShader` object
'''
res = ComputeShader.__new__(ComputeShader)
res.mglo, ls1, ls2, ls3, ls4, res._glo = self.mglo.compute_shader(source)
members = {}
for item in ls1:
obj = Uniform.__new__(Uniform)
obj.mglo, obj._location, obj._array_length, obj._dimension, obj._name = item
members[obj.name] = obj
for item in ls2:
obj = UniformBlock.__new__(UniformBlock)
obj.mglo, obj._index, obj._size, obj._name = item
members[obj.name] = obj
res._members = members
res.ctx = self
res.extra = None
return res |
def update_with_default_values(self):
""" Goes through all the configuration fields and predefines empty ones with default values
Top level:
`dir` field is predefined with current working directory value, in case of empty string or `None`
`io_silent_fail` field if predefined with :attr:`Configuration.DEFAULT_IOSF` in case of None or empty string
Logger section:
`name` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_NAME`. Field is set to str() of itself
`level` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_LEVEL`. Field is set to str() of itself
`format` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_LEVEL`. Field is set to str() of itself
`destination` field if predefined with
Input section:
`dir` field is predefined with a relative path constructed with top level `dir` field and :attr:`Configuration.DEFAULT_INPUT_DIR`
`source` field is predefined with an empty list
`io_silent_fail` if predefined with a top level `io_silent_fail`
`logger` subsection if predefined by a top level `logger` section it substitutes all the missing values in `input` `logger` subsection
Algorithm section:
`io_silent_fail` is predefined with a top level `io_silent_fail` value
`logger` is predefined with top level `logger` configuration
`tasks` section:
`paths` is predefined by [:attr:`Configuration.DEFAULT_ALGORITHM_TASKS_PATH`]. If value is supplied,
:attr:`Configuration.DEFAULT_ALGORITHM_TASKS_PATH` is prepended to the supplied list
`stages` section: (is predefined with [])
if any values are predefined, such fields as `io_silent_fail`, `logger` are propagated to stages entries
`self_loop` value is predefined by :attr:`Configuration.DEFAULT_ALGORITHM_STAGES_SELF_LOOP`
`rounds` section: (is predefined with [])
if any values are predefined, such fields as `io_silent_fail`, `logger` are propagated to stages entries
`self_loop` value is predefined by :attr:`Configuration.DEFAULT_ALGORITHM_ROUNDS_SELF_LOOP`
`pipeline` section:
`self_loop` if predefined by :attr:`Configuration.DEFAULT_ALGORITHM_PIPELINE_SELF_LOOP`
`logger` if predefined with an algorithm->logger configuration. All non specified value are propagated respectively
`rounds` is predefined with []
Output section:
`dir` field is predefined with :attr:`Configuration.DEFAULT_OUTPUT_DIR`
`io_silent_fail` field is predefined with top level `io_silent_fail`
`logger` subsection is predefined with a top level logger section, which substitutes all of missing values from top level `logger` section
`stats` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_STATS_DIR`
`file` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_STATS_FILE`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`assembly_points` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_DIR`
`file` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_FILE`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`genome_specific` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_GENOME_SPECIFIC`
`genome_specific_file_name_pattern` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_GSFNP`
`genomes` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_GENOMES_DIR`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`output_non_glued_fragments` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_GENOMES_ONGF`
:return: Nothing, performs inplace changes
:rtype: `None`
"""
if self[self.DIR] in ("", None):
self[self.DIR] = os.getcwd()
if self[self.IOSF] in ("", None):
self[self.IOSF] = self.DEFAULT_IOSF
# logger section
if self[self.LOGGER][self.NAME] in ("", None):
self[self.LOGGER][self.NAME] = self.DEFAULT_LOGGER_NAME
self[self.LOGGER][self.NAME] = str(self[self.LOGGER][self.NAME])
if self[self.LOGGER][self.LEVEL] in ("", None):
self[self.LOGGER][self.LEVEL] = self.DEFAULT_LOGGER_LEVEL
self[self.LOGGER][self.LEVEL] = str(self[self.LOGGER][self.LEVEL])
if self[self.LOGGER][self.FORMAT] in ("", None):
self[self.LOGGER][self.FORMAT] = self.DEFAULT_LOGGER_FORMAT
self[self.LOGGER][self.FORMAT] = str(self[self.LOGGER][self.FORMAT])
if self[self.LOGGER][self.DESTINATION] in ([], "", None):
self[self.LOGGER][self.DESTINATION] = self.DEFAULT_LOGGER_DESTINATION
# input section
if self[self.INPUT][self.SOURCE] in ("", None):
self[self.INPUT][self.SOURCE] = []
if self[self.INPUT][self.DIR] in ("", None):
self[self.INPUT][self.DIR] = self.DEFAULT_INPUT_DIR
if self[self.INPUT][self.IOSF] in ("", None):
self[self.INPUT][self.IOSF] = self[self.IOSF]
self._update_logger_config(logger_to_update=self[self.INPUT][self.LOGGER],
source_logger=self[self.LOGGER])
# algorithm section
if self.LOGGER not in self[self.ALGORITHM] or self[self.ALGORITHM][self.LOGGER] in ("", None):
self[self.ALGORITHM][self.LOGGER] = {}
self._update_logger_config(logger_to_update=self[self.ALGORITHM][self.LOGGER],
source_logger=self[self.LOGGER])
if self.IOSF not in self[self.ALGORITHM] or self[self.ALGORITHM][self.IOSF] in ("", None):
self[self.ALGORITHM][self.IOSF] = self[self.IOSF]
if self.TASKS not in self[self.ALGORITHM]:
self[self.ALGORITHM][self.TASKS] = {}
if self.EXECUTABLE_CONTAINERS not in self[self.ALGORITHM]:
self[self.ALGORITHM][self.EXECUTABLE_CONTAINERS] = []
if self.PATHS not in self[self.ALGORITHM][self.TASKS] or self[self.ALGORITHM][self.TASKS][self.PATHS] in ("", None):
self[self.ALGORITHM][self.TASKS][self.PATHS] = []
self[self.ALGORITHM][self.TASKS][self.PATHS] = [self.DEFAULT_ALGORITHM_TASKS_PATH] + self[self.ALGORITHM][self.TASKS][self.PATHS]
for ecs in self[self.ALGORITHM][self.EXECUTABLE_CONTAINERS]:
if self.REFERENCE not in ecs:
ecs[self.REFERENCE] = ecs[self.NAME] + "s"
if ecs[self.REFERENCE] not in self[self.ALGORITHM]:
self[self.ALGORITHM][ecs[self.REFERENCE]] = []
for executable_container in self[self.ALGORITHM][ecs[self.REFERENCE]]:
if self.SELF_LOOP not in executable_container:
executable_container[self.SELF_LOOP] = self.DEFAULT_ALGORITHM_EC_SELF_LOOP
if self.ENTRIES not in executable_container:
executable_container[self.ENTRIES] = []
if self.PIPELINE not in self[self.ALGORITHM]:
self[self.ALGORITHM][self.PIPELINE] = {}
if self.LOGGER not in self[self.ALGORITHM][self.PIPELINE] or self[self.ALGORITHM][self.PIPELINE][self.LOGGER] in ("", None):
self[self.ALGORITHM][self.PIPELINE][self.LOGGER] = {}
self._update_logger_config(logger_to_update=self[self.ALGORITHM][self.PIPELINE][self.LOGGER],
source_logger=self[self.ALGORITHM][self.LOGGER])
if self.IOSF not in self[self.ALGORITHM][self.PIPELINE] or self[self.ALGORITHM][self.PIPELINE][self.IOSF] in ("", None):
self[self.ALGORITHM][self.PIPELINE][self.IOSF] = self[self.ALGORITHM][self.IOSF]
if self.ENTRIES not in self[self.ALGORITHM][self.PIPELINE] or self[self.ALGORITHM][self.PIPELINE][self.ENTRIES] in ("", None):
self[self.ALGORITHM][self.PIPELINE][self.ENTRIES] = []
if self.SELF_LOOP not in self[self.ALGORITHM][self.PIPELINE] or self[self.ALGORITHM][self.PIPELINE][self.SELF_LOOP] in ("", None):
self[self.ALGORITHM][self.PIPELINE][self.SELF_LOOP] = self.DEFAULT_ALGORITHM_PIPELINE_SELF_LOOP
# output section
if self[self.OUTPUT][self.DIR] in ("", None):
self[self.OUTPUT][self.DIR] = os.path.join(self[self.DIR], self.DEFAULT_OUTPUT_DIR)
if self[self.OUTPUT][self.IOSF] in ("", None):
self[self.OUTPUT][self.IOSF] = self[self.IOSF]
self._update_logger_config(logger_to_update=self[self.OUTPUT][self.LOGGER],
source_logger=self[self.LOGGER])
# output -> stats section
if self.DIR not in self[self.OUTPUT][self.STATS] or self[self.OUTPUT][self.STATS][self.DIR] in ("", None):
self[self.OUTPUT][self.STATS][self.DIR] = self.DEFAULT_OUTPUT_STATS_DIR
if self.IOSF not in self[self.OUTPUT][self.STATS] or self[self.OUTPUT][self.STATS][self.IOSF] in ("", None):
self[self.OUTPUT][self.STATS][self.IOSF] = self[self.OUTPUT][self.IOSF]
if self.FILE not in self[self.OUTPUT][self.STATS] or self[self.OUTPUT][self.STATS][self.FILE] in ("", None):
self[self.OUTPUT][self.STATS][self.FILE] = self.DEFAULT_OUTPUT_STATS_FILE
if self.LOGGER not in self[self.OUTPUT][self.STATS]:
self[self.OUTPUT][self.STATS][self.LOGGER] = {}
self._update_logger_config(logger_to_update=self[self.OUTPUT][self.STATS][self.LOGGER],
source_logger=self[self.OUTPUT][self.LOGGER])
# output -> assembly_points section
if self.DIR not in self[self.OUTPUT][self.ASSEMBLY_POINTS] or self[self.OUTPUT][self.ASSEMBLY_POINTS][self.DIR] in ("", None):
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.DIR] = self.DEFAULT_OUTPUT_AP_DIR
if self.IOSF not in self[self.OUTPUT][self.ASSEMBLY_POINTS] or self[self.OUTPUT][self.ASSEMBLY_POINTS][self.IOSF] in ("", None):
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.IOSF] = self[self.OUTPUT][self.IOSF]
if self.FILE not in self[self.OUTPUT][self.ASSEMBLY_POINTS] or self[self.OUTPUT][self.ASSEMBLY_POINTS][self.FILE] in ("", None):
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.FILE] = self.DEFAULT_OUTPUT_AP_FILE
if self.GENOME_SPECIFIC not in self[self.OUTPUT][self.ASSEMBLY_POINTS] or self[self.OUTPUT][self.ASSEMBLY_POINTS][
self.GENOME_SPECIFIC]:
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.GENOME_SPECIFIC] = self.DEFAULT_OUTPUT_AP_GENOME_SPECIFIC
if self.GENOME_SPECIFIC_FNP not in self[self.OUTPUT][self.ASSEMBLY_POINTS] or self[self.OUTPUT][self.ASSEMBLY_POINTS][
self.GENOME_SPECIFIC_FNP]:
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.GENOME_SPECIFIC_FNP] = self.DEFAULT_OUTPUT_AP_GSFNP
if self.LOGGER not in self[self.OUTPUT][self.ASSEMBLY_POINTS]:
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.LOGGER] = {}
self._update_logger_config(logger_to_update=self[self.OUTPUT][self.ASSEMBLY_POINTS][self.LOGGER],
source_logger=self[self.OUTPUT][self.LOGGER])
# output -> genomes section
if self.DIR not in self[self.OUTPUT][self.GENOMES] or self[self.OUTPUT][self.GENOMES][self.DIR] in ("", None):
self[self.OUTPUT][self.GENOMES][self.DIR] = self.DEFAULT_OUTPUT_GENOMES_DIR
if self.IOSF not in self[self.OUTPUT][self.GENOMES] or self[self.OUTPUT][self.GENOMES][self.IOSF] in ("", None):
self[self.OUTPUT][self.GENOMES][self.IOSF] = self[self.OUTPUT][self.IOSF]
if self.OUTPUT_NG_FRAGMENTS not in self[self.OUTPUT][self.GENOMES] or self[self.OUTPUT][self.GENOMES][self.OUTPUT_NG_FRAGMENTS] in (
"", None):
self[self.OUTPUT][self.GENOMES][self.OUTPUT_NG_FRAGMENTS] = self.DEFAULT_OUTPUT_GENOMES_ONGF
if self.LOGGER not in self[self.OUTPUT][self.GENOMES]:
self[self.OUTPUT][self.GENOMES][self.LOGGER] = {}
self._update_logger_config(logger_to_update=self[self.OUTPUT][self.GENOMES][self.LOGGER],
source_logger=self[self.OUTPUT][self.LOGGER]) | Goes through all the configuration fields and predefines empty ones with default values
Top level:
`dir` field is predefined with current working directory value, in case of empty string or `None`
`io_silent_fail` field if predefined with :attr:`Configuration.DEFAULT_IOSF` in case of None or empty string
Logger section:
`name` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_NAME`. Field is set to str() of itself
`level` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_LEVEL`. Field is set to str() of itself
`format` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_LEVEL`. Field is set to str() of itself
`destination` field if predefined with
Input section:
`dir` field is predefined with a relative path constructed with top level `dir` field and :attr:`Configuration.DEFAULT_INPUT_DIR`
`source` field is predefined with an empty list
`io_silent_fail` if predefined with a top level `io_silent_fail`
`logger` subsection if predefined by a top level `logger` section it substitutes all the missing values in `input` `logger` subsection
Algorithm section:
`io_silent_fail` is predefined with a top level `io_silent_fail` value
`logger` is predefined with top level `logger` configuration
`tasks` section:
`paths` is predefined by [:attr:`Configuration.DEFAULT_ALGORITHM_TASKS_PATH`]. If value is supplied,
:attr:`Configuration.DEFAULT_ALGORITHM_TASKS_PATH` is prepended to the supplied list
`stages` section: (is predefined with [])
if any values are predefined, such fields as `io_silent_fail`, `logger` are propagated to stages entries
`self_loop` value is predefined by :attr:`Configuration.DEFAULT_ALGORITHM_STAGES_SELF_LOOP`
`rounds` section: (is predefined with [])
if any values are predefined, such fields as `io_silent_fail`, `logger` are propagated to stages entries
`self_loop` value is predefined by :attr:`Configuration.DEFAULT_ALGORITHM_ROUNDS_SELF_LOOP`
`pipeline` section:
`self_loop` if predefined by :attr:`Configuration.DEFAULT_ALGORITHM_PIPELINE_SELF_LOOP`
`logger` if predefined with an algorithm->logger configuration. All non specified value are propagated respectively
`rounds` is predefined with []
Output section:
`dir` field is predefined with :attr:`Configuration.DEFAULT_OUTPUT_DIR`
`io_silent_fail` field is predefined with top level `io_silent_fail`
`logger` subsection is predefined with a top level logger section, which substitutes all of missing values from top level `logger` section
`stats` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_STATS_DIR`
`file` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_STATS_FILE`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`assembly_points` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_DIR`
`file` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_FILE`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`genome_specific` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_GENOME_SPECIFIC`
`genome_specific_file_name_pattern` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_GSFNP`
`genomes` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_GENOMES_DIR`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`output_non_glued_fragments` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_GENOMES_ONGF`
:return: Nothing, performs inplace changes
:rtype: `None` | Below is the the instruction that describes the task:
### Input:
Goes through all the configuration fields and predefines empty ones with default values
Top level:
`dir` field is predefined with current working directory value, in case of empty string or `None`
`io_silent_fail` field if predefined with :attr:`Configuration.DEFAULT_IOSF` in case of None or empty string
Logger section:
`name` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_NAME`. Field is set to str() of itself
`level` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_LEVEL`. Field is set to str() of itself
`format` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_LEVEL`. Field is set to str() of itself
`destination` field if predefined with
Input section:
`dir` field is predefined with a relative path constructed with top level `dir` field and :attr:`Configuration.DEFAULT_INPUT_DIR`
`source` field is predefined with an empty list
`io_silent_fail` if predefined with a top level `io_silent_fail`
`logger` subsection if predefined by a top level `logger` section it substitutes all the missing values in `input` `logger` subsection
Algorithm section:
`io_silent_fail` is predefined with a top level `io_silent_fail` value
`logger` is predefined with top level `logger` configuration
`tasks` section:
`paths` is predefined by [:attr:`Configuration.DEFAULT_ALGORITHM_TASKS_PATH`]. If value is supplied,
:attr:`Configuration.DEFAULT_ALGORITHM_TASKS_PATH` is prepended to the supplied list
`stages` section: (is predefined with [])
if any values are predefined, such fields as `io_silent_fail`, `logger` are propagated to stages entries
`self_loop` value is predefined by :attr:`Configuration.DEFAULT_ALGORITHM_STAGES_SELF_LOOP`
`rounds` section: (is predefined with [])
if any values are predefined, such fields as `io_silent_fail`, `logger` are propagated to stages entries
`self_loop` value is predefined by :attr:`Configuration.DEFAULT_ALGORITHM_ROUNDS_SELF_LOOP`
`pipeline` section:
`self_loop` if predefined by :attr:`Configuration.DEFAULT_ALGORITHM_PIPELINE_SELF_LOOP`
`logger` if predefined with an algorithm->logger configuration. All non specified value are propagated respectively
`rounds` is predefined with []
Output section:
`dir` field is predefined with :attr:`Configuration.DEFAULT_OUTPUT_DIR`
`io_silent_fail` field is predefined with top level `io_silent_fail`
`logger` subsection is predefined with a top level logger section, which substitutes all of missing values from top level `logger` section
`stats` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_STATS_DIR`
`file` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_STATS_FILE`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`assembly_points` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_DIR`
`file` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_FILE`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`genome_specific` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_GENOME_SPECIFIC`
`genome_specific_file_name_pattern` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_GSFNP`
`genomes` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_GENOMES_DIR`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`output_non_glued_fragments` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_GENOMES_ONGF`
:return: Nothing, performs inplace changes
:rtype: `None`
### Response:
def update_with_default_values(self):
""" Goes through all the configuration fields and predefines empty ones with default values
Top level:
`dir` field is predefined with current working directory value, in case of empty string or `None`
`io_silent_fail` field if predefined with :attr:`Configuration.DEFAULT_IOSF` in case of None or empty string
Logger section:
`name` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_NAME`. Field is set to str() of itself
`level` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_LEVEL`. Field is set to str() of itself
`format` field is predefined with :attr:`Configuration.DEFAULT_LOGGER_LEVEL`. Field is set to str() of itself
`destination` field if predefined with
Input section:
`dir` field is predefined with a relative path constructed with top level `dir` field and :attr:`Configuration.DEFAULT_INPUT_DIR`
`source` field is predefined with an empty list
`io_silent_fail` if predefined with a top level `io_silent_fail`
`logger` subsection if predefined by a top level `logger` section it substitutes all the missing values in `input` `logger` subsection
Algorithm section:
`io_silent_fail` is predefined with a top level `io_silent_fail` value
`logger` is predefined with top level `logger` configuration
`tasks` section:
`paths` is predefined by [:attr:`Configuration.DEFAULT_ALGORITHM_TASKS_PATH`]. If value is supplied,
:attr:`Configuration.DEFAULT_ALGORITHM_TASKS_PATH` is prepended to the supplied list
`stages` section: (is predefined with [])
if any values are predefined, such fields as `io_silent_fail`, `logger` are propagated to stages entries
`self_loop` value is predefined by :attr:`Configuration.DEFAULT_ALGORITHM_STAGES_SELF_LOOP`
`rounds` section: (is predefined with [])
if any values are predefined, such fields as `io_silent_fail`, `logger` are propagated to stages entries
`self_loop` value is predefined by :attr:`Configuration.DEFAULT_ALGORITHM_ROUNDS_SELF_LOOP`
`pipeline` section:
`self_loop` if predefined by :attr:`Configuration.DEFAULT_ALGORITHM_PIPELINE_SELF_LOOP`
`logger` if predefined with an algorithm->logger configuration. All non specified value are propagated respectively
`rounds` is predefined with []
Output section:
`dir` field is predefined with :attr:`Configuration.DEFAULT_OUTPUT_DIR`
`io_silent_fail` field is predefined with top level `io_silent_fail`
`logger` subsection is predefined with a top level logger section, which substitutes all of missing values from top level `logger` section
`stats` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_STATS_DIR`
`file` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_STATS_FILE`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`assembly_points` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_DIR`
`file` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_FILE`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`genome_specific` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_GENOME_SPECIFIC`
`genome_specific_file_name_pattern` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_AP_GSFNP`
`genomes` section
`io_silent_fail` field is predefined with `output->io_silent_fail` value
`dir` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_GENOMES_DIR`
`logger` is defaulted (all, or just missing parts) bu `output->logger` section
`output_non_glued_fragments` is predefined with :attr:`Configuration.DEFAULT_OUTPUT_GENOMES_ONGF`
:return: Nothing, performs inplace changes
:rtype: `None`
"""
if self[self.DIR] in ("", None):
self[self.DIR] = os.getcwd()
if self[self.IOSF] in ("", None):
self[self.IOSF] = self.DEFAULT_IOSF
# logger section
if self[self.LOGGER][self.NAME] in ("", None):
self[self.LOGGER][self.NAME] = self.DEFAULT_LOGGER_NAME
self[self.LOGGER][self.NAME] = str(self[self.LOGGER][self.NAME])
if self[self.LOGGER][self.LEVEL] in ("", None):
self[self.LOGGER][self.LEVEL] = self.DEFAULT_LOGGER_LEVEL
self[self.LOGGER][self.LEVEL] = str(self[self.LOGGER][self.LEVEL])
if self[self.LOGGER][self.FORMAT] in ("", None):
self[self.LOGGER][self.FORMAT] = self.DEFAULT_LOGGER_FORMAT
self[self.LOGGER][self.FORMAT] = str(self[self.LOGGER][self.FORMAT])
if self[self.LOGGER][self.DESTINATION] in ([], "", None):
self[self.LOGGER][self.DESTINATION] = self.DEFAULT_LOGGER_DESTINATION
# input section
if self[self.INPUT][self.SOURCE] in ("", None):
self[self.INPUT][self.SOURCE] = []
if self[self.INPUT][self.DIR] in ("", None):
self[self.INPUT][self.DIR] = self.DEFAULT_INPUT_DIR
if self[self.INPUT][self.IOSF] in ("", None):
self[self.INPUT][self.IOSF] = self[self.IOSF]
self._update_logger_config(logger_to_update=self[self.INPUT][self.LOGGER],
source_logger=self[self.LOGGER])
# algorithm section
if self.LOGGER not in self[self.ALGORITHM] or self[self.ALGORITHM][self.LOGGER] in ("", None):
self[self.ALGORITHM][self.LOGGER] = {}
self._update_logger_config(logger_to_update=self[self.ALGORITHM][self.LOGGER],
source_logger=self[self.LOGGER])
if self.IOSF not in self[self.ALGORITHM] or self[self.ALGORITHM][self.IOSF] in ("", None):
self[self.ALGORITHM][self.IOSF] = self[self.IOSF]
if self.TASKS not in self[self.ALGORITHM]:
self[self.ALGORITHM][self.TASKS] = {}
if self.EXECUTABLE_CONTAINERS not in self[self.ALGORITHM]:
self[self.ALGORITHM][self.EXECUTABLE_CONTAINERS] = []
if self.PATHS not in self[self.ALGORITHM][self.TASKS] or self[self.ALGORITHM][self.TASKS][self.PATHS] in ("", None):
self[self.ALGORITHM][self.TASKS][self.PATHS] = []
self[self.ALGORITHM][self.TASKS][self.PATHS] = [self.DEFAULT_ALGORITHM_TASKS_PATH] + self[self.ALGORITHM][self.TASKS][self.PATHS]
for ecs in self[self.ALGORITHM][self.EXECUTABLE_CONTAINERS]:
if self.REFERENCE not in ecs:
ecs[self.REFERENCE] = ecs[self.NAME] + "s"
if ecs[self.REFERENCE] not in self[self.ALGORITHM]:
self[self.ALGORITHM][ecs[self.REFERENCE]] = []
for executable_container in self[self.ALGORITHM][ecs[self.REFERENCE]]:
if self.SELF_LOOP not in executable_container:
executable_container[self.SELF_LOOP] = self.DEFAULT_ALGORITHM_EC_SELF_LOOP
if self.ENTRIES not in executable_container:
executable_container[self.ENTRIES] = []
if self.PIPELINE not in self[self.ALGORITHM]:
self[self.ALGORITHM][self.PIPELINE] = {}
if self.LOGGER not in self[self.ALGORITHM][self.PIPELINE] or self[self.ALGORITHM][self.PIPELINE][self.LOGGER] in ("", None):
self[self.ALGORITHM][self.PIPELINE][self.LOGGER] = {}
self._update_logger_config(logger_to_update=self[self.ALGORITHM][self.PIPELINE][self.LOGGER],
source_logger=self[self.ALGORITHM][self.LOGGER])
if self.IOSF not in self[self.ALGORITHM][self.PIPELINE] or self[self.ALGORITHM][self.PIPELINE][self.IOSF] in ("", None):
self[self.ALGORITHM][self.PIPELINE][self.IOSF] = self[self.ALGORITHM][self.IOSF]
if self.ENTRIES not in self[self.ALGORITHM][self.PIPELINE] or self[self.ALGORITHM][self.PIPELINE][self.ENTRIES] in ("", None):
self[self.ALGORITHM][self.PIPELINE][self.ENTRIES] = []
if self.SELF_LOOP not in self[self.ALGORITHM][self.PIPELINE] or self[self.ALGORITHM][self.PIPELINE][self.SELF_LOOP] in ("", None):
self[self.ALGORITHM][self.PIPELINE][self.SELF_LOOP] = self.DEFAULT_ALGORITHM_PIPELINE_SELF_LOOP
# output section
if self[self.OUTPUT][self.DIR] in ("", None):
self[self.OUTPUT][self.DIR] = os.path.join(self[self.DIR], self.DEFAULT_OUTPUT_DIR)
if self[self.OUTPUT][self.IOSF] in ("", None):
self[self.OUTPUT][self.IOSF] = self[self.IOSF]
self._update_logger_config(logger_to_update=self[self.OUTPUT][self.LOGGER],
source_logger=self[self.LOGGER])
# output -> stats section
if self.DIR not in self[self.OUTPUT][self.STATS] or self[self.OUTPUT][self.STATS][self.DIR] in ("", None):
self[self.OUTPUT][self.STATS][self.DIR] = self.DEFAULT_OUTPUT_STATS_DIR
if self.IOSF not in self[self.OUTPUT][self.STATS] or self[self.OUTPUT][self.STATS][self.IOSF] in ("", None):
self[self.OUTPUT][self.STATS][self.IOSF] = self[self.OUTPUT][self.IOSF]
if self.FILE not in self[self.OUTPUT][self.STATS] or self[self.OUTPUT][self.STATS][self.FILE] in ("", None):
self[self.OUTPUT][self.STATS][self.FILE] = self.DEFAULT_OUTPUT_STATS_FILE
if self.LOGGER not in self[self.OUTPUT][self.STATS]:
self[self.OUTPUT][self.STATS][self.LOGGER] = {}
self._update_logger_config(logger_to_update=self[self.OUTPUT][self.STATS][self.LOGGER],
source_logger=self[self.OUTPUT][self.LOGGER])
# output -> assembly_points section
if self.DIR not in self[self.OUTPUT][self.ASSEMBLY_POINTS] or self[self.OUTPUT][self.ASSEMBLY_POINTS][self.DIR] in ("", None):
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.DIR] = self.DEFAULT_OUTPUT_AP_DIR
if self.IOSF not in self[self.OUTPUT][self.ASSEMBLY_POINTS] or self[self.OUTPUT][self.ASSEMBLY_POINTS][self.IOSF] in ("", None):
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.IOSF] = self[self.OUTPUT][self.IOSF]
if self.FILE not in self[self.OUTPUT][self.ASSEMBLY_POINTS] or self[self.OUTPUT][self.ASSEMBLY_POINTS][self.FILE] in ("", None):
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.FILE] = self.DEFAULT_OUTPUT_AP_FILE
if self.GENOME_SPECIFIC not in self[self.OUTPUT][self.ASSEMBLY_POINTS] or self[self.OUTPUT][self.ASSEMBLY_POINTS][
self.GENOME_SPECIFIC]:
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.GENOME_SPECIFIC] = self.DEFAULT_OUTPUT_AP_GENOME_SPECIFIC
if self.GENOME_SPECIFIC_FNP not in self[self.OUTPUT][self.ASSEMBLY_POINTS] or self[self.OUTPUT][self.ASSEMBLY_POINTS][
self.GENOME_SPECIFIC_FNP]:
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.GENOME_SPECIFIC_FNP] = self.DEFAULT_OUTPUT_AP_GSFNP
if self.LOGGER not in self[self.OUTPUT][self.ASSEMBLY_POINTS]:
self[self.OUTPUT][self.ASSEMBLY_POINTS][self.LOGGER] = {}
self._update_logger_config(logger_to_update=self[self.OUTPUT][self.ASSEMBLY_POINTS][self.LOGGER],
source_logger=self[self.OUTPUT][self.LOGGER])
# output -> genomes section
if self.DIR not in self[self.OUTPUT][self.GENOMES] or self[self.OUTPUT][self.GENOMES][self.DIR] in ("", None):
self[self.OUTPUT][self.GENOMES][self.DIR] = self.DEFAULT_OUTPUT_GENOMES_DIR
if self.IOSF not in self[self.OUTPUT][self.GENOMES] or self[self.OUTPUT][self.GENOMES][self.IOSF] in ("", None):
self[self.OUTPUT][self.GENOMES][self.IOSF] = self[self.OUTPUT][self.IOSF]
if self.OUTPUT_NG_FRAGMENTS not in self[self.OUTPUT][self.GENOMES] or self[self.OUTPUT][self.GENOMES][self.OUTPUT_NG_FRAGMENTS] in (
"", None):
self[self.OUTPUT][self.GENOMES][self.OUTPUT_NG_FRAGMENTS] = self.DEFAULT_OUTPUT_GENOMES_ONGF
if self.LOGGER not in self[self.OUTPUT][self.GENOMES]:
self[self.OUTPUT][self.GENOMES][self.LOGGER] = {}
self._update_logger_config(logger_to_update=self[self.OUTPUT][self.GENOMES][self.LOGGER],
source_logger=self[self.OUTPUT][self.LOGGER]) |
def get_host_domainname(name, domains=None, **api_opts):
'''
Get host domain name
If no domains are passed, the hostname is checked for a zone in infoblox,
if no zone split on first dot.
If domains are provided, the best match out of the list is returned.
If none are found the return is None
dots at end of names are ignored.
CLI Example:
.. code-block:: bash
salt-call uwl.get_host_domainname name=localhost.t.domain.com \
domains=['domain.com', 't.domain.com.']
# returns: t.domain.com
'''
name = name.lower().rstrip('.')
if not domains:
data = get_host(name=name, **api_opts)
if data and 'zone' in data:
return data['zone'].lower()
else:
if name.count('.') > 1:
return name[name.find('.')+1:]
return name
match = ''
for d in domains:
d = d.lower().rstrip('.')
if name.endswith(d) and len(d) > len(match):
match = d
return match if match else None | Get host domain name
If no domains are passed, the hostname is checked for a zone in infoblox,
if no zone split on first dot.
If domains are provided, the best match out of the list is returned.
If none are found the return is None
dots at end of names are ignored.
CLI Example:
.. code-block:: bash
salt-call uwl.get_host_domainname name=localhost.t.domain.com \
domains=['domain.com', 't.domain.com.']
# returns: t.domain.com | Below is the the instruction that describes the task:
### Input:
Get host domain name
If no domains are passed, the hostname is checked for a zone in infoblox,
if no zone split on first dot.
If domains are provided, the best match out of the list is returned.
If none are found the return is None
dots at end of names are ignored.
CLI Example:
.. code-block:: bash
salt-call uwl.get_host_domainname name=localhost.t.domain.com \
domains=['domain.com', 't.domain.com.']
# returns: t.domain.com
### Response:
def get_host_domainname(name, domains=None, **api_opts):
'''
Get host domain name
If no domains are passed, the hostname is checked for a zone in infoblox,
if no zone split on first dot.
If domains are provided, the best match out of the list is returned.
If none are found the return is None
dots at end of names are ignored.
CLI Example:
.. code-block:: bash
salt-call uwl.get_host_domainname name=localhost.t.domain.com \
domains=['domain.com', 't.domain.com.']
# returns: t.domain.com
'''
name = name.lower().rstrip('.')
if not domains:
data = get_host(name=name, **api_opts)
if data and 'zone' in data:
return data['zone'].lower()
else:
if name.count('.') > 1:
return name[name.find('.')+1:]
return name
match = ''
for d in domains:
d = d.lower().rstrip('.')
if name.endswith(d) and len(d) > len(match):
match = d
return match if match else None |
def set_filters(query, base_filters):
"""Put together all filters we have and set them as 'and' filter
within filtered query.
:param query: elastic query being constructed
:param base_filters: all filters set outside of query (eg. resource config, sub_resource_lookup)
"""
filters = [f for f in base_filters if f is not None]
query_filter = query['query']['filtered'].get('filter', None)
if query_filter is not None:
if 'and' in query_filter:
filters.extend(query_filter['and'])
else:
filters.append(query_filter)
if filters:
query['query']['filtered']['filter'] = {'and': filters} | Put together all filters we have and set them as 'and' filter
within filtered query.
:param query: elastic query being constructed
:param base_filters: all filters set outside of query (eg. resource config, sub_resource_lookup) | Below is the the instruction that describes the task:
### Input:
Put together all filters we have and set them as 'and' filter
within filtered query.
:param query: elastic query being constructed
:param base_filters: all filters set outside of query (eg. resource config, sub_resource_lookup)
### Response:
def set_filters(query, base_filters):
"""Put together all filters we have and set them as 'and' filter
within filtered query.
:param query: elastic query being constructed
:param base_filters: all filters set outside of query (eg. resource config, sub_resource_lookup)
"""
filters = [f for f in base_filters if f is not None]
query_filter = query['query']['filtered'].get('filter', None)
if query_filter is not None:
if 'and' in query_filter:
filters.extend(query_filter['and'])
else:
filters.append(query_filter)
if filters:
query['query']['filtered']['filter'] = {'and': filters} |
def miscellaneous_menu(self, value):
"""
Setter for **self.__miscellaneous_menu** attribute.
:param value: Attribute value.
:type value: QMenu
"""
if value is not None:
assert type(value) is QMenu, "'{0}' attribute: '{1}' type is not 'QMenu'!".format(
"miscellaneous_menu", value)
self.__miscellaneous_menu = value | Setter for **self.__miscellaneous_menu** attribute.
:param value: Attribute value.
:type value: QMenu | Below is the the instruction that describes the task:
### Input:
Setter for **self.__miscellaneous_menu** attribute.
:param value: Attribute value.
:type value: QMenu
### Response:
def miscellaneous_menu(self, value):
"""
Setter for **self.__miscellaneous_menu** attribute.
:param value: Attribute value.
:type value: QMenu
"""
if value is not None:
assert type(value) is QMenu, "'{0}' attribute: '{1}' type is not 'QMenu'!".format(
"miscellaneous_menu", value)
self.__miscellaneous_menu = value |
def resource_headers(self, jobscript):
"""Given a :class:`~clusterjob.JobScript` instance, return a list of
lines that encode the resource requirements, to be added at the top of
the rendered job script
"""
lines = []
for (key, val) in jobscript.resources.items():
if key in self.resource_replacements:
pbs_key = self.resource_replacements[key]
if key == 'mem':
val = str(val) + "m"
else:
pbs_key = key
if key in ['nodes', 'threads', 'ppn']:
raise ResourcesNotSupportedError("The SGE scheduling system "
"uses 'parallel environments' to request resources "
"for parallelization. SgeBackend should be subclassed "
"for a specific cluster configuration in order to "
"encode 'nodes', 'threads', and 'ppn'.")
if key in ['-cwd', 'cwd']:
continue
if val is None:
continue
if type(val) is bool:
if val:
if not pbs_key.startswith('-'):
pbs_key = '-' + pbs_key
lines.append("%s %s" % (self.prefix, pbs_key))
else:
if not pbs_key.startswith('-'):
pbs_key = '-l %s=' % pbs_key
if pbs_key.endswith('='):
lines.append('%s %s%s' % (self.prefix, pbs_key, str(val)))
else:
lines.append('%s %s %s' % (self.prefix, pbs_key, str(val)))
lines.append("%s -cwd" % self.prefix)
return lines | Given a :class:`~clusterjob.JobScript` instance, return a list of
lines that encode the resource requirements, to be added at the top of
the rendered job script | Below is the the instruction that describes the task:
### Input:
Given a :class:`~clusterjob.JobScript` instance, return a list of
lines that encode the resource requirements, to be added at the top of
the rendered job script
### Response:
def resource_headers(self, jobscript):
"""Given a :class:`~clusterjob.JobScript` instance, return a list of
lines that encode the resource requirements, to be added at the top of
the rendered job script
"""
lines = []
for (key, val) in jobscript.resources.items():
if key in self.resource_replacements:
pbs_key = self.resource_replacements[key]
if key == 'mem':
val = str(val) + "m"
else:
pbs_key = key
if key in ['nodes', 'threads', 'ppn']:
raise ResourcesNotSupportedError("The SGE scheduling system "
"uses 'parallel environments' to request resources "
"for parallelization. SgeBackend should be subclassed "
"for a specific cluster configuration in order to "
"encode 'nodes', 'threads', and 'ppn'.")
if key in ['-cwd', 'cwd']:
continue
if val is None:
continue
if type(val) is bool:
if val:
if not pbs_key.startswith('-'):
pbs_key = '-' + pbs_key
lines.append("%s %s" % (self.prefix, pbs_key))
else:
if not pbs_key.startswith('-'):
pbs_key = '-l %s=' % pbs_key
if pbs_key.endswith('='):
lines.append('%s %s%s' % (self.prefix, pbs_key, str(val)))
else:
lines.append('%s %s %s' % (self.prefix, pbs_key, str(val)))
lines.append("%s -cwd" % self.prefix)
return lines |
def init_state(self, x):
"""
Initialize t, m, and u
"""
optim_state = {}
optim_state["t"] = 0.
optim_state["m"] = [tf.zeros_like(v) for v in x]
optim_state["u"] = [tf.zeros_like(v) for v in x]
return optim_state | Initialize t, m, and u | Below is the the instruction that describes the task:
### Input:
Initialize t, m, and u
### Response:
def init_state(self, x):
"""
Initialize t, m, and u
"""
optim_state = {}
optim_state["t"] = 0.
optim_state["m"] = [tf.zeros_like(v) for v in x]
optim_state["u"] = [tf.zeros_like(v) for v in x]
return optim_state |
def set_sample_weight(pipeline_steps, sample_weight=None):
"""Recursively iterates through all objects in the pipeline and sets sample weight.
Parameters
----------
pipeline_steps: array-like
List of (str, obj) tuples from a scikit-learn pipeline or related object
sample_weight: array-like
List of sample weight
Returns
-------
sample_weight_dict:
A dictionary of sample_weight
"""
sample_weight_dict = {}
if not isinstance(sample_weight, type(None)):
for (pname, obj) in pipeline_steps:
if inspect.getargspec(obj.fit).args.count('sample_weight'):
step_sw = pname + '__sample_weight'
sample_weight_dict[step_sw] = sample_weight
if sample_weight_dict:
return sample_weight_dict
else:
return None | Recursively iterates through all objects in the pipeline and sets sample weight.
Parameters
----------
pipeline_steps: array-like
List of (str, obj) tuples from a scikit-learn pipeline or related object
sample_weight: array-like
List of sample weight
Returns
-------
sample_weight_dict:
A dictionary of sample_weight | Below is the the instruction that describes the task:
### Input:
Recursively iterates through all objects in the pipeline and sets sample weight.
Parameters
----------
pipeline_steps: array-like
List of (str, obj) tuples from a scikit-learn pipeline or related object
sample_weight: array-like
List of sample weight
Returns
-------
sample_weight_dict:
A dictionary of sample_weight
### Response:
def set_sample_weight(pipeline_steps, sample_weight=None):
"""Recursively iterates through all objects in the pipeline and sets sample weight.
Parameters
----------
pipeline_steps: array-like
List of (str, obj) tuples from a scikit-learn pipeline or related object
sample_weight: array-like
List of sample weight
Returns
-------
sample_weight_dict:
A dictionary of sample_weight
"""
sample_weight_dict = {}
if not isinstance(sample_weight, type(None)):
for (pname, obj) in pipeline_steps:
if inspect.getargspec(obj.fit).args.count('sample_weight'):
step_sw = pname + '__sample_weight'
sample_weight_dict[step_sw] = sample_weight
if sample_weight_dict:
return sample_weight_dict
else:
return None |
def get_common_paths_ancestor(*args):
"""
Gets common paths ancestor of given paths.
Usage::
>>> get_common_paths_ancestor("/Users/JohnDoe/Documents", "/Users/JohnDoe/Documents/Test.txt")
u'/Users/JohnDoe/Documents'
:param \*args: Paths to retrieve common ancestor from.
:type \*args: [unicode]
:return: Common path ancestor.
:rtype: unicode
"""
path_ancestor = os.sep.join(get_common_ancestor(*[path.split(os.sep) for path in args]))
LOGGER.debug("> Common Paths Ancestor: '{0}'".format(path_ancestor))
return path_ancestor | Gets common paths ancestor of given paths.
Usage::
>>> get_common_paths_ancestor("/Users/JohnDoe/Documents", "/Users/JohnDoe/Documents/Test.txt")
u'/Users/JohnDoe/Documents'
:param \*args: Paths to retrieve common ancestor from.
:type \*args: [unicode]
:return: Common path ancestor.
:rtype: unicode | Below is the the instruction that describes the task:
### Input:
Gets common paths ancestor of given paths.
Usage::
>>> get_common_paths_ancestor("/Users/JohnDoe/Documents", "/Users/JohnDoe/Documents/Test.txt")
u'/Users/JohnDoe/Documents'
:param \*args: Paths to retrieve common ancestor from.
:type \*args: [unicode]
:return: Common path ancestor.
:rtype: unicode
### Response:
def get_common_paths_ancestor(*args):
"""
Gets common paths ancestor of given paths.
Usage::
>>> get_common_paths_ancestor("/Users/JohnDoe/Documents", "/Users/JohnDoe/Documents/Test.txt")
u'/Users/JohnDoe/Documents'
:param \*args: Paths to retrieve common ancestor from.
:type \*args: [unicode]
:return: Common path ancestor.
:rtype: unicode
"""
path_ancestor = os.sep.join(get_common_ancestor(*[path.split(os.sep) for path in args]))
LOGGER.debug("> Common Paths Ancestor: '{0}'".format(path_ancestor))
return path_ancestor |
def init_pipette():
"""
Finds pipettes attached to the robot currently and chooses the correct one
to add to the session.
:return: The pipette type and mount chosen for deck calibration
"""
global session
pipette_info = set_current_mount(session.adapter, session)
pipette = pipette_info['pipette']
res = {}
if pipette:
session.current_model = pipette_info['model']
if not feature_flags.use_protocol_api_v2():
mount = pipette.mount
session.current_mount = mount
else:
mount = pipette.get('mount')
session.current_mount = mount_by_name[mount]
session.pipettes[mount] = pipette
res = {'mount': mount, 'model': pipette_info['model']}
log.info("Pipette info {}".format(session.pipettes))
return res | Finds pipettes attached to the robot currently and chooses the correct one
to add to the session.
:return: The pipette type and mount chosen for deck calibration | Below is the the instruction that describes the task:
### Input:
Finds pipettes attached to the robot currently and chooses the correct one
to add to the session.
:return: The pipette type and mount chosen for deck calibration
### Response:
def init_pipette():
"""
Finds pipettes attached to the robot currently and chooses the correct one
to add to the session.
:return: The pipette type and mount chosen for deck calibration
"""
global session
pipette_info = set_current_mount(session.adapter, session)
pipette = pipette_info['pipette']
res = {}
if pipette:
session.current_model = pipette_info['model']
if not feature_flags.use_protocol_api_v2():
mount = pipette.mount
session.current_mount = mount
else:
mount = pipette.get('mount')
session.current_mount = mount_by_name[mount]
session.pipettes[mount] = pipette
res = {'mount': mount, 'model': pipette_info['model']}
log.info("Pipette info {}".format(session.pipettes))
return res |
def _from_dict(cls, _dict):
"""Initialize a QueryRelationsRelationship object from a json dictionary."""
args = {}
if 'type' in _dict:
args['type'] = _dict.get('type')
if 'frequency' in _dict:
args['frequency'] = _dict.get('frequency')
if 'arguments' in _dict:
args['arguments'] = [
QueryRelationsArgument._from_dict(x)
for x in (_dict.get('arguments'))
]
if 'evidence' in _dict:
args['evidence'] = [
QueryEvidence._from_dict(x) for x in (_dict.get('evidence'))
]
return cls(**args) | Initialize a QueryRelationsRelationship object from a json dictionary. | Below is the the instruction that describes the task:
### Input:
Initialize a QueryRelationsRelationship object from a json dictionary.
### Response:
def _from_dict(cls, _dict):
"""Initialize a QueryRelationsRelationship object from a json dictionary."""
args = {}
if 'type' in _dict:
args['type'] = _dict.get('type')
if 'frequency' in _dict:
args['frequency'] = _dict.get('frequency')
if 'arguments' in _dict:
args['arguments'] = [
QueryRelationsArgument._from_dict(x)
for x in (_dict.get('arguments'))
]
if 'evidence' in _dict:
args['evidence'] = [
QueryEvidence._from_dict(x) for x in (_dict.get('evidence'))
]
return cls(**args) |
def add_property(self, name, fn, cached=True):
"""Adds a property to the Context.
See `Mapper.add_ctx_property`, which uses this method to install
the properties added on the Mapper level.
"""
if name in self.__properties:
raise KeyError("Trying to add a property '%s' that already exists on this %s object." % (name, self.__class__.__name__))
self.__properties[name] = (fn, cached) | Adds a property to the Context.
See `Mapper.add_ctx_property`, which uses this method to install
the properties added on the Mapper level. | Below is the the instruction that describes the task:
### Input:
Adds a property to the Context.
See `Mapper.add_ctx_property`, which uses this method to install
the properties added on the Mapper level.
### Response:
def add_property(self, name, fn, cached=True):
"""Adds a property to the Context.
See `Mapper.add_ctx_property`, which uses this method to install
the properties added on the Mapper level.
"""
if name in self.__properties:
raise KeyError("Trying to add a property '%s' that already exists on this %s object." % (name, self.__class__.__name__))
self.__properties[name] = (fn, cached) |
def _flush_events(self):
"""! @brief Send all pending events to event sink."""
if self._sink is not None:
for event in self._pending_events:
self._sink.receive(event)
self._pending_events = [] | ! @brief Send all pending events to event sink. | Below is the the instruction that describes the task:
### Input:
! @brief Send all pending events to event sink.
### Response:
def _flush_events(self):
"""! @brief Send all pending events to event sink."""
if self._sink is not None:
for event in self._pending_events:
self._sink.receive(event)
self._pending_events = [] |
def dotted(self):
" Returns dotted-decimal reperesentation "
obj = libcrypto.OBJ_nid2obj(self.nid)
buf = create_string_buffer(256)
libcrypto.OBJ_obj2txt(buf, 256, obj, 1)
if pyver == 2:
return buf.value
else:
return buf.value.decode('ascii') | Returns dotted-decimal reperesentation | Below is the the instruction that describes the task:
### Input:
Returns dotted-decimal reperesentation
### Response:
def dotted(self):
" Returns dotted-decimal reperesentation "
obj = libcrypto.OBJ_nid2obj(self.nid)
buf = create_string_buffer(256)
libcrypto.OBJ_obj2txt(buf, 256, obj, 1)
if pyver == 2:
return buf.value
else:
return buf.value.decode('ascii') |
def get_package_path(name):
"""Get the path to an installed package.
name (unicode): Package name.
RETURNS (Path): Path to installed package.
"""
name = name.lower() # use lowercase version to be safe
# Here we're importing the module just to find it. This is worryingly
# indirect, but it's otherwise very difficult to find the package.
pkg = importlib.import_module(name)
return Path(pkg.__file__).parent | Get the path to an installed package.
name (unicode): Package name.
RETURNS (Path): Path to installed package. | Below is the the instruction that describes the task:
### Input:
Get the path to an installed package.
name (unicode): Package name.
RETURNS (Path): Path to installed package.
### Response:
def get_package_path(name):
"""Get the path to an installed package.
name (unicode): Package name.
RETURNS (Path): Path to installed package.
"""
name = name.lower() # use lowercase version to be safe
# Here we're importing the module just to find it. This is worryingly
# indirect, but it's otherwise very difficult to find the package.
pkg = importlib.import_module(name)
return Path(pkg.__file__).parent |
def dumpfn(obj, fn, *args, **kwargs):
"""
Dump to a json/yaml directly by filename instead of a File-like object.
For YAML, ruamel.yaml must be installed. The file type is automatically
detected. YAML is assumed if the filename contains "yaml" (lower or upper
case). Otherwise, json is always assumed.
Args:
obj (object): Object to dump.
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.dump.
\*\*kwargs: Any of the kwargs supported by json/yaml.dump.
Returns:
(object) Result of json.load.
"""
if "mpk" in os.path.basename(fn).lower():
if msgpack is None:
raise RuntimeError(
"Loading of message pack files is not "
"possible as msgpack-python is not installed.")
if "default" not in kwargs:
kwargs["default"] = default
with zopen(fn, "wb") as fp:
msgpack.dump(obj, fp, *args, **kwargs)
else:
with zopen(fn, "wt") as fp:
if "yaml" in os.path.basename(fn).lower():
if yaml is None:
raise RuntimeError("Loading of YAML files is not "
"possible as ruamel.yaml is not installed.")
if "Dumper" not in kwargs:
kwargs["Dumper"] = Dumper
yaml.dump(obj, fp, *args, **kwargs)
else:
if "cls" not in kwargs:
kwargs["cls"] = MontyEncoder
fp.write("%s" % json.dumps(obj, *args, **kwargs)) | Dump to a json/yaml directly by filename instead of a File-like object.
For YAML, ruamel.yaml must be installed. The file type is automatically
detected. YAML is assumed if the filename contains "yaml" (lower or upper
case). Otherwise, json is always assumed.
Args:
obj (object): Object to dump.
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.dump.
\*\*kwargs: Any of the kwargs supported by json/yaml.dump.
Returns:
(object) Result of json.load. | Below is the the instruction that describes the task:
### Input:
Dump to a json/yaml directly by filename instead of a File-like object.
For YAML, ruamel.yaml must be installed. The file type is automatically
detected. YAML is assumed if the filename contains "yaml" (lower or upper
case). Otherwise, json is always assumed.
Args:
obj (object): Object to dump.
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.dump.
\*\*kwargs: Any of the kwargs supported by json/yaml.dump.
Returns:
(object) Result of json.load.
### Response:
def dumpfn(obj, fn, *args, **kwargs):
"""
Dump to a json/yaml directly by filename instead of a File-like object.
For YAML, ruamel.yaml must be installed. The file type is automatically
detected. YAML is assumed if the filename contains "yaml" (lower or upper
case). Otherwise, json is always assumed.
Args:
obj (object): Object to dump.
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.dump.
\*\*kwargs: Any of the kwargs supported by json/yaml.dump.
Returns:
(object) Result of json.load.
"""
if "mpk" in os.path.basename(fn).lower():
if msgpack is None:
raise RuntimeError(
"Loading of message pack files is not "
"possible as msgpack-python is not installed.")
if "default" not in kwargs:
kwargs["default"] = default
with zopen(fn, "wb") as fp:
msgpack.dump(obj, fp, *args, **kwargs)
else:
with zopen(fn, "wt") as fp:
if "yaml" in os.path.basename(fn).lower():
if yaml is None:
raise RuntimeError("Loading of YAML files is not "
"possible as ruamel.yaml is not installed.")
if "Dumper" not in kwargs:
kwargs["Dumper"] = Dumper
yaml.dump(obj, fp, *args, **kwargs)
else:
if "cls" not in kwargs:
kwargs["cls"] = MontyEncoder
fp.write("%s" % json.dumps(obj, *args, **kwargs)) |
def clean(self, tol=None):
"""
Clean actor's polydata. Can also be used to decimate a mesh if ``tol`` is large.
If ``tol=None`` only removes coincident points.
:param tol: defines how far should be the points from each other in terms of fraction
of the bounding box length.
.. hint:: |moving_least_squares1D| |moving_least_squares1D.py|_
|recosurface| |recosurface.py|_
"""
poly = self.polydata(False)
cleanPolyData = vtk.vtkCleanPolyData()
cleanPolyData.PointMergingOn()
cleanPolyData.ConvertLinesToPointsOn()
cleanPolyData.ConvertPolysToLinesOn()
cleanPolyData.SetInputData(poly)
if tol:
cleanPolyData.SetTolerance(tol)
cleanPolyData.Update()
return self.updateMesh(cleanPolyData.GetOutput()) | Clean actor's polydata. Can also be used to decimate a mesh if ``tol`` is large.
If ``tol=None`` only removes coincident points.
:param tol: defines how far should be the points from each other in terms of fraction
of the bounding box length.
.. hint:: |moving_least_squares1D| |moving_least_squares1D.py|_
|recosurface| |recosurface.py|_ | Below is the the instruction that describes the task:
### Input:
Clean actor's polydata. Can also be used to decimate a mesh if ``tol`` is large.
If ``tol=None`` only removes coincident points.
:param tol: defines how far should be the points from each other in terms of fraction
of the bounding box length.
.. hint:: |moving_least_squares1D| |moving_least_squares1D.py|_
|recosurface| |recosurface.py|_
### Response:
def clean(self, tol=None):
"""
Clean actor's polydata. Can also be used to decimate a mesh if ``tol`` is large.
If ``tol=None`` only removes coincident points.
:param tol: defines how far should be the points from each other in terms of fraction
of the bounding box length.
.. hint:: |moving_least_squares1D| |moving_least_squares1D.py|_
|recosurface| |recosurface.py|_
"""
poly = self.polydata(False)
cleanPolyData = vtk.vtkCleanPolyData()
cleanPolyData.PointMergingOn()
cleanPolyData.ConvertLinesToPointsOn()
cleanPolyData.ConvertPolysToLinesOn()
cleanPolyData.SetInputData(poly)
if tol:
cleanPolyData.SetTolerance(tol)
cleanPolyData.Update()
return self.updateMesh(cleanPolyData.GetOutput()) |
def get_site_by_id(self, id):
"""
Looks up a site by ID and returns a TSquareSite representing that
object, or throws an exception if no such site is found.
@param id - The entityID of the site to look up
@returns A TSquareSite object
"""
response = self._session.get(BASE_URL_TSQUARE + '/site/{}.json'.format(id))
response.raise_for_status()
site_data = response.json()
return TSquareSite(**site_data) | Looks up a site by ID and returns a TSquareSite representing that
object, or throws an exception if no such site is found.
@param id - The entityID of the site to look up
@returns A TSquareSite object | Below is the the instruction that describes the task:
### Input:
Looks up a site by ID and returns a TSquareSite representing that
object, or throws an exception if no such site is found.
@param id - The entityID of the site to look up
@returns A TSquareSite object
### Response:
def get_site_by_id(self, id):
"""
Looks up a site by ID and returns a TSquareSite representing that
object, or throws an exception if no such site is found.
@param id - The entityID of the site to look up
@returns A TSquareSite object
"""
response = self._session.get(BASE_URL_TSQUARE + '/site/{}.json'.format(id))
response.raise_for_status()
site_data = response.json()
return TSquareSite(**site_data) |
def cmd_switch_workdir(new_workdir):
"""
Arguments: <new work directory path>
Change current Paperwork's work directory.
Does *not* update the index.
You should run 'paperwork-shell rescan' after this command.
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"old_workdir": "file:///home/jflesch/papers",
"new_workdir": "file:///tmp/papers",
}
"""
new_workdir = FS.safe(new_workdir)
if not FS.exists(new_workdir) or not FS.isdir(new_workdir):
sys.stderr.write("New work directory {} doesn't exists".format(
new_workdir
))
return
pconfig = config.PaperworkConfig()
pconfig.read()
r = {
'old_workdir': pconfig.settings['workdir'].value,
'new_workdir': new_workdir
}
pconfig.settings['workdir'].value = new_workdir
pconfig.write()
reply(r) | Arguments: <new work directory path>
Change current Paperwork's work directory.
Does *not* update the index.
You should run 'paperwork-shell rescan' after this command.
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"old_workdir": "file:///home/jflesch/papers",
"new_workdir": "file:///tmp/papers",
} | Below is the the instruction that describes the task:
### Input:
Arguments: <new work directory path>
Change current Paperwork's work directory.
Does *not* update the index.
You should run 'paperwork-shell rescan' after this command.
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"old_workdir": "file:///home/jflesch/papers",
"new_workdir": "file:///tmp/papers",
}
### Response:
def cmd_switch_workdir(new_workdir):
"""
Arguments: <new work directory path>
Change current Paperwork's work directory.
Does *not* update the index.
You should run 'paperwork-shell rescan' after this command.
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"old_workdir": "file:///home/jflesch/papers",
"new_workdir": "file:///tmp/papers",
}
"""
new_workdir = FS.safe(new_workdir)
if not FS.exists(new_workdir) or not FS.isdir(new_workdir):
sys.stderr.write("New work directory {} doesn't exists".format(
new_workdir
))
return
pconfig = config.PaperworkConfig()
pconfig.read()
r = {
'old_workdir': pconfig.settings['workdir'].value,
'new_workdir': new_workdir
}
pconfig.settings['workdir'].value = new_workdir
pconfig.write()
reply(r) |
def line_is_comment(line: str) -> bool:
"""
From FORTRAN Language Reference
(https://docs.oracle.com/cd/E19957-01/805-4939/z40007332024/index.html):
A line with a c, C, *, d, D, or ! in column one is a comment line, except
that if the -xld option is set, then the lines starting with D or d are
compiled as debug lines. The d, D, and ! are nonstandard.
If you put an exclamation mark (!) in any column of the statement field,
except within character literals, then everything after the ! on that
line is a comment.
A totally blank line is a comment line.
Args:
line
Returns:
True iff line is a comment, False otherwise.
"""
if line[0] in "cCdD*!":
return True
llstr = line.strip()
if len(llstr) == 0 or llstr[0] == "!":
return True
return False | From FORTRAN Language Reference
(https://docs.oracle.com/cd/E19957-01/805-4939/z40007332024/index.html):
A line with a c, C, *, d, D, or ! in column one is a comment line, except
that if the -xld option is set, then the lines starting with D or d are
compiled as debug lines. The d, D, and ! are nonstandard.
If you put an exclamation mark (!) in any column of the statement field,
except within character literals, then everything after the ! on that
line is a comment.
A totally blank line is a comment line.
Args:
line
Returns:
True iff line is a comment, False otherwise. | Below is the the instruction that describes the task:
### Input:
From FORTRAN Language Reference
(https://docs.oracle.com/cd/E19957-01/805-4939/z40007332024/index.html):
A line with a c, C, *, d, D, or ! in column one is a comment line, except
that if the -xld option is set, then the lines starting with D or d are
compiled as debug lines. The d, D, and ! are nonstandard.
If you put an exclamation mark (!) in any column of the statement field,
except within character literals, then everything after the ! on that
line is a comment.
A totally blank line is a comment line.
Args:
line
Returns:
True iff line is a comment, False otherwise.
### Response:
def line_is_comment(line: str) -> bool:
"""
From FORTRAN Language Reference
(https://docs.oracle.com/cd/E19957-01/805-4939/z40007332024/index.html):
A line with a c, C, *, d, D, or ! in column one is a comment line, except
that if the -xld option is set, then the lines starting with D or d are
compiled as debug lines. The d, D, and ! are nonstandard.
If you put an exclamation mark (!) in any column of the statement field,
except within character literals, then everything after the ! on that
line is a comment.
A totally blank line is a comment line.
Args:
line
Returns:
True iff line is a comment, False otherwise.
"""
if line[0] in "cCdD*!":
return True
llstr = line.strip()
if len(llstr) == 0 or llstr[0] == "!":
return True
return False |
async def delete_cas(self, key, *, index):
"""Deletes the Key with check-and-set semantics.
Parameters:
key (str): Key to delete
index (ObjectIndex): Index ID
Response:
bool: ``True`` on success
The Key will only be deleted if its current modify index matches the
supplied Index.
"""
index = extract_attr(index, keys=["ModifyIndex", "Index"])
response = await self._discard(key, cas=index)
return response.body is True | Deletes the Key with check-and-set semantics.
Parameters:
key (str): Key to delete
index (ObjectIndex): Index ID
Response:
bool: ``True`` on success
The Key will only be deleted if its current modify index matches the
supplied Index. | Below is the the instruction that describes the task:
### Input:
Deletes the Key with check-and-set semantics.
Parameters:
key (str): Key to delete
index (ObjectIndex): Index ID
Response:
bool: ``True`` on success
The Key will only be deleted if its current modify index matches the
supplied Index.
### Response:
async def delete_cas(self, key, *, index):
"""Deletes the Key with check-and-set semantics.
Parameters:
key (str): Key to delete
index (ObjectIndex): Index ID
Response:
bool: ``True`` on success
The Key will only be deleted if its current modify index matches the
supplied Index.
"""
index = extract_attr(index, keys=["ModifyIndex", "Index"])
response = await self._discard(key, cas=index)
return response.body is True |
def set_env(settings=None, setup_dir=''):
"""
Used in management commands or at the module level of a fabfile to
integrate woven project django.conf settings into fabric, and set the local current
working directory to the distribution root (where setup.py lives).
``settings`` is your django settings module to pass in
if you want to call this from a fabric script.
``setup_dir`` is an optional path to the directory containing setup.py
This would be used in instances where setup.py was not above the cwd
This function is used to set the environment for all hosts
"""
#switch the working directory to the distribution root where setup.py is
if hasattr(env, 'setup_path') and env.setup_path:
setup_path = env.setup_path
else:
with fab_settings(fabfile='setup.py'):
if setup_dir:
setup_path = os.path.join(setup_dir,'setup.py')
else:
setup_path = find_fabfile()
if not setup_path:
print 'Error: You must have a setup.py file in the current or a parent folder'
sys.exit(1)
local_working_dir = os.path.split(setup_path)[0]
os.chdir(local_working_dir)
setup = run_setup('setup.py',stop_after="init")
if setup.get_name() == 'UNKNOWN' or setup.get_version()=='0.0.0' or not setup.packages:
print "ERROR: You must define a minimum of name, version and packages in your setup.py"
sys.exit(1)
#project env variables for deployment
env.project_name = setup.get_name() #project_name()
env.project_full_version = setup.get_version()#local('python setup.py --version').rstrip()
env.project_version = _parse_project_version(env.project_full_version)
env.project_fullname = '-'.join([env.project_name,env.project_version])
env.project_package_name = setup.packages[0]
env.patch = False
#django settings are passed in by the command
#We'll assume that if the settings aren't passed in we're running from a fabfile
if not settings:
sys.path.insert(0,local_working_dir)
#import global settings
project_settings = import_module(env.project_name+'.settings')
else:
project_settings = settings
#If sqlite is used we can manage the database on first deployment
env.DEFAULT_DATABASE_ENGINE = project_settings.DATABASES['default']['ENGINE']
env.DEFAULT_DATABASE_NAME = project_settings.DATABASES['default']['NAME']
#overwrite with main sitesettings module
#just for MEDIA_URL, ADMIN_MEDIA_PREFIX, and STATIC_URL
#if this settings file exists
try:
site_settings = import_module('.'.join([env.project_name,'sitesettings.settings']))
project_settings.MEDIA_URL = site_settings.MEDIA_URL
project_settings.ADMIN_MEDIA_PREFIX = site_settings.ADMIN_MEDIA_PREFIX
project_settings.DATABASES = site_settings.DATABASES
if hasattr(site_settings,'STATIC_URL'):
project_settings.STATIC_URL = site_settings.STATIC_URL
else:
project_settings.STATIC_URL = project_settings.ADMIN_MEDIA_PREFIX
except ImportError:
pass
#update woven_env from project_settings
local_settings = dir(project_settings)
#only get settings that woven uses
for setting in local_settings:
if setting.isupper() and hasattr(woven_env,setting):
s = getattr(project_settings,setting,'')
woven_env[setting] = s
#upate the fabric env with all the woven settings
env.update(woven_env)
#set any user/password defaults if they are not supplied
#Fabric would get the user from the options by default as the system user
#We will overwrite that
if woven_env.HOST_USER:
env.user = woven_env.HOST_USER
env.password = woven_env.HOST_PASSWORD
#set the hosts if they aren't already
if not env.hosts: env.hosts = woven_env.HOSTS
if not env.roledefs: env.roledefs = woven_env.ROLEDEFS
#reverse_lookup hosts to roles
role_lookup = {}
for role in env.roles:
r_hosts = env.roledefs[role]
for host in r_hosts:
#since port is not handled by fabric.main.normalize we'll do it ourselves
role_lookup['%s:%s'% (host,str(woven_env.HOST_SSH_PORT))]=role
#now add any hosts that aren't already defined in roles
for host in env.hosts:
host_string = '%s:%s'% (host,str(woven_env.HOST_SSH_PORT))
if host_string not in role_lookup.keys():
role_lookup[host_string] = ''
env.role_lookup = role_lookup
env.hosts = role_lookup.keys()
#remove any unneeded db adaptors - except sqlite
remove_backends = ['postgresql_psycopg2', 'mysql']
for db in project_settings.DATABASES:
engine = project_settings.DATABASES[db]['ENGINE'].split('.')[-1]
if engine in remove_backends: remove_backends.remove(engine)
for backend in remove_backends:
if backend == 'postgresql_psycopg2': rm = 'python-psycopg2'
elif backend == 'mysql': rm = 'python-mysqldb'
env.HOST_BASE_PACKAGES.remove(rm)
#packages can be just the base + extra packages
#or role dependent we need to just map out the packages to hosts and roles here
packages = {}
all_packages = set([])
for role in env.roles:
packages[role]=env.ROLE_PACKAGES.get(role,[])
if not packages[role]:
packages[role] = env.HOST_BASE_PACKAGES + env.HOST_EXTRA_PACKAGES
all_packages = set(packages[role]) | all_packages
#no role
packages[''] = env.HOST_BASE_PACKAGES + env.HOST_EXTRA_PACKAGES
all_packages = set(packages['']) | all_packages
#conveniently add gunicorn ppa
if 'gunicorn' in all_packages:
if 'ppa:bchesneau/gunicorn' not in env.LINUX_PACKAGE_REPOSITORIES:
env.LINUX_PACKAGE_REPOSITORIES.append('ppa:bchesneau/gunicorn')
env.packages = packages
#sanity check for unwanted combinations in the empty role
u = set(packages[''])
wsgi = u & set(['gunicorn','uwsgi'])
if wsgi and 'apache2' in u:
u = u - set(['apache2','libapache2-mod-wsgi'])
#Used to detect certain apps eg South, static_builder
env.INSTALLED_APPS = project_settings.INSTALLED_APPS
env.packages[''] = list(u)
#per host
env.installed_packages = {}
env.uninstalled_packages = {}
#UFW firewall rules
firewall_rules = {}
for role in env.roles:
firewall_rules[role]= env.ROLE_UFW_RULES.get(role,[])
firewall_rules['']=env.UFW_RULES
env.firewall_rules = firewall_rules
#Now update the env with any settings that are not defined by woven but may
#be used by woven or fabric
env.MEDIA_ROOT = project_settings.MEDIA_ROOT
env.MEDIA_URL = project_settings.MEDIA_URL
try:
env.ADMIN_MEDIA_PREFIX = project_settings.ADMIN_MEDIA_PREFIX
except AttributeError:
env.ADMIN_MEDIA_PREFIX = ''
if not env.STATIC_URL:
env.STATIC_URL = project_settings.ADMIN_MEDIA_PREFIX
env.TEMPLATE_DIRS = project_settings.TEMPLATE_DIRS
#Set the server /etc/timezone
env.TIME_ZONE = project_settings.TIME_ZONE
#Used to detect certain apps eg South, static_builder
env.INSTALLED_APPS = project_settings.INSTALLED_APPS
#SSH key
if not hasattr(env,'key_filename') and not env.key_filename and env.SSH_KEY_FILENAME:
env.key_filename = env.SSH_KEY_FILENAME
elif not hasattr(env,'key_filename'):
env.key_filename = None
#noinput
if not hasattr(env,'INTERACTIVE'):
env.INTERACTIVE = True
if not hasattr(env,'verbosity'):
env.verbosity = 1
#overwrite existing settings
if not hasattr(env,'overwrite'):
env.overwrite=False
#South integration defaults
env.nomigration = False
env.manualmigration = False
env.migration = ''
env.root_disabled = False
#Sites
env.sites = {}
env.shell = '/bin/bash --noprofile -l -c' | Used in management commands or at the module level of a fabfile to
integrate woven project django.conf settings into fabric, and set the local current
working directory to the distribution root (where setup.py lives).
``settings`` is your django settings module to pass in
if you want to call this from a fabric script.
``setup_dir`` is an optional path to the directory containing setup.py
This would be used in instances where setup.py was not above the cwd
This function is used to set the environment for all hosts | Below is the the instruction that describes the task:
### Input:
Used in management commands or at the module level of a fabfile to
integrate woven project django.conf settings into fabric, and set the local current
working directory to the distribution root (where setup.py lives).
``settings`` is your django settings module to pass in
if you want to call this from a fabric script.
``setup_dir`` is an optional path to the directory containing setup.py
This would be used in instances where setup.py was not above the cwd
This function is used to set the environment for all hosts
### Response:
def set_env(settings=None, setup_dir=''):
"""
Used in management commands or at the module level of a fabfile to
integrate woven project django.conf settings into fabric, and set the local current
working directory to the distribution root (where setup.py lives).
``settings`` is your django settings module to pass in
if you want to call this from a fabric script.
``setup_dir`` is an optional path to the directory containing setup.py
This would be used in instances where setup.py was not above the cwd
This function is used to set the environment for all hosts
"""
#switch the working directory to the distribution root where setup.py is
if hasattr(env, 'setup_path') and env.setup_path:
setup_path = env.setup_path
else:
with fab_settings(fabfile='setup.py'):
if setup_dir:
setup_path = os.path.join(setup_dir,'setup.py')
else:
setup_path = find_fabfile()
if not setup_path:
print 'Error: You must have a setup.py file in the current or a parent folder'
sys.exit(1)
local_working_dir = os.path.split(setup_path)[0]
os.chdir(local_working_dir)
setup = run_setup('setup.py',stop_after="init")
if setup.get_name() == 'UNKNOWN' or setup.get_version()=='0.0.0' or not setup.packages:
print "ERROR: You must define a minimum of name, version and packages in your setup.py"
sys.exit(1)
#project env variables for deployment
env.project_name = setup.get_name() #project_name()
env.project_full_version = setup.get_version()#local('python setup.py --version').rstrip()
env.project_version = _parse_project_version(env.project_full_version)
env.project_fullname = '-'.join([env.project_name,env.project_version])
env.project_package_name = setup.packages[0]
env.patch = False
#django settings are passed in by the command
#We'll assume that if the settings aren't passed in we're running from a fabfile
if not settings:
sys.path.insert(0,local_working_dir)
#import global settings
project_settings = import_module(env.project_name+'.settings')
else:
project_settings = settings
#If sqlite is used we can manage the database on first deployment
env.DEFAULT_DATABASE_ENGINE = project_settings.DATABASES['default']['ENGINE']
env.DEFAULT_DATABASE_NAME = project_settings.DATABASES['default']['NAME']
#overwrite with main sitesettings module
#just for MEDIA_URL, ADMIN_MEDIA_PREFIX, and STATIC_URL
#if this settings file exists
try:
site_settings = import_module('.'.join([env.project_name,'sitesettings.settings']))
project_settings.MEDIA_URL = site_settings.MEDIA_URL
project_settings.ADMIN_MEDIA_PREFIX = site_settings.ADMIN_MEDIA_PREFIX
project_settings.DATABASES = site_settings.DATABASES
if hasattr(site_settings,'STATIC_URL'):
project_settings.STATIC_URL = site_settings.STATIC_URL
else:
project_settings.STATIC_URL = project_settings.ADMIN_MEDIA_PREFIX
except ImportError:
pass
#update woven_env from project_settings
local_settings = dir(project_settings)
#only get settings that woven uses
for setting in local_settings:
if setting.isupper() and hasattr(woven_env,setting):
s = getattr(project_settings,setting,'')
woven_env[setting] = s
#upate the fabric env with all the woven settings
env.update(woven_env)
#set any user/password defaults if they are not supplied
#Fabric would get the user from the options by default as the system user
#We will overwrite that
if woven_env.HOST_USER:
env.user = woven_env.HOST_USER
env.password = woven_env.HOST_PASSWORD
#set the hosts if they aren't already
if not env.hosts: env.hosts = woven_env.HOSTS
if not env.roledefs: env.roledefs = woven_env.ROLEDEFS
#reverse_lookup hosts to roles
role_lookup = {}
for role in env.roles:
r_hosts = env.roledefs[role]
for host in r_hosts:
#since port is not handled by fabric.main.normalize we'll do it ourselves
role_lookup['%s:%s'% (host,str(woven_env.HOST_SSH_PORT))]=role
#now add any hosts that aren't already defined in roles
for host in env.hosts:
host_string = '%s:%s'% (host,str(woven_env.HOST_SSH_PORT))
if host_string not in role_lookup.keys():
role_lookup[host_string] = ''
env.role_lookup = role_lookup
env.hosts = role_lookup.keys()
#remove any unneeded db adaptors - except sqlite
remove_backends = ['postgresql_psycopg2', 'mysql']
for db in project_settings.DATABASES:
engine = project_settings.DATABASES[db]['ENGINE'].split('.')[-1]
if engine in remove_backends: remove_backends.remove(engine)
for backend in remove_backends:
if backend == 'postgresql_psycopg2': rm = 'python-psycopg2'
elif backend == 'mysql': rm = 'python-mysqldb'
env.HOST_BASE_PACKAGES.remove(rm)
#packages can be just the base + extra packages
#or role dependent we need to just map out the packages to hosts and roles here
packages = {}
all_packages = set([])
for role in env.roles:
packages[role]=env.ROLE_PACKAGES.get(role,[])
if not packages[role]:
packages[role] = env.HOST_BASE_PACKAGES + env.HOST_EXTRA_PACKAGES
all_packages = set(packages[role]) | all_packages
#no role
packages[''] = env.HOST_BASE_PACKAGES + env.HOST_EXTRA_PACKAGES
all_packages = set(packages['']) | all_packages
#conveniently add gunicorn ppa
if 'gunicorn' in all_packages:
if 'ppa:bchesneau/gunicorn' not in env.LINUX_PACKAGE_REPOSITORIES:
env.LINUX_PACKAGE_REPOSITORIES.append('ppa:bchesneau/gunicorn')
env.packages = packages
#sanity check for unwanted combinations in the empty role
u = set(packages[''])
wsgi = u & set(['gunicorn','uwsgi'])
if wsgi and 'apache2' in u:
u = u - set(['apache2','libapache2-mod-wsgi'])
#Used to detect certain apps eg South, static_builder
env.INSTALLED_APPS = project_settings.INSTALLED_APPS
env.packages[''] = list(u)
#per host
env.installed_packages = {}
env.uninstalled_packages = {}
#UFW firewall rules
firewall_rules = {}
for role in env.roles:
firewall_rules[role]= env.ROLE_UFW_RULES.get(role,[])
firewall_rules['']=env.UFW_RULES
env.firewall_rules = firewall_rules
#Now update the env with any settings that are not defined by woven but may
#be used by woven or fabric
env.MEDIA_ROOT = project_settings.MEDIA_ROOT
env.MEDIA_URL = project_settings.MEDIA_URL
try:
env.ADMIN_MEDIA_PREFIX = project_settings.ADMIN_MEDIA_PREFIX
except AttributeError:
env.ADMIN_MEDIA_PREFIX = ''
if not env.STATIC_URL:
env.STATIC_URL = project_settings.ADMIN_MEDIA_PREFIX
env.TEMPLATE_DIRS = project_settings.TEMPLATE_DIRS
#Set the server /etc/timezone
env.TIME_ZONE = project_settings.TIME_ZONE
#Used to detect certain apps eg South, static_builder
env.INSTALLED_APPS = project_settings.INSTALLED_APPS
#SSH key
if not hasattr(env,'key_filename') and not env.key_filename and env.SSH_KEY_FILENAME:
env.key_filename = env.SSH_KEY_FILENAME
elif not hasattr(env,'key_filename'):
env.key_filename = None
#noinput
if not hasattr(env,'INTERACTIVE'):
env.INTERACTIVE = True
if not hasattr(env,'verbosity'):
env.verbosity = 1
#overwrite existing settings
if not hasattr(env,'overwrite'):
env.overwrite=False
#South integration defaults
env.nomigration = False
env.manualmigration = False
env.migration = ''
env.root_disabled = False
#Sites
env.sites = {}
env.shell = '/bin/bash --noprofile -l -c' |
def as_dict(self):
"""
Returns dict representations of Xmu object
"""
d = MSONable.as_dict(self)
d["data"] = self.data.tolist()
return d | Returns dict representations of Xmu object | Below is the the instruction that describes the task:
### Input:
Returns dict representations of Xmu object
### Response:
def as_dict(self):
"""
Returns dict representations of Xmu object
"""
d = MSONable.as_dict(self)
d["data"] = self.data.tolist()
return d |
def class_balance(y_train, y_test=None, ax=None, labels=None, **kwargs):
"""Quick method:
One of the biggest challenges for classification models is an imbalance of
classes in the training data. This function vizualizes the relationship of
the support for each class in both the training and test data by
displaying how frequently each class occurs as a bar graph.
The figure can be displayed in two modes:
1. Balance mode: show the frequency of each class in the dataset.
2. Compare mode: show the relationship of support in train and test data.
Balance mode is the default if only y_train is specified. Compare mode
happens when both y_train and y_test are specified.
Parameters
----------
y_train : array-like
Array or list of shape (n,) that containes discrete data.
y_test : array-like, optional
Array or list of shape (m,) that contains discrete data. If
specified, the bar chart will be drawn in compare mode.
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
labels: list, optional
A list of class names for the x-axis if the target is already encoded.
Ensure that the labels are ordered lexicographically with respect to
the values in the target. A common use case is to pass
LabelEncoder.classes\_ as this parameter. If not specified, the labels
in the data will be used.
kwargs: dict, optional
Keyword arguments passed to the super class. Here, used
to colorize the bars in the histogram.
Returns
-------
ax : matplotlib axes
Returns the axes that the class balance plot was drawn on.
"""
# Instantiate the visualizer
visualizer = ClassBalance(ax=ax, labels=labels, **kwargs)
# Fit and transform the visualizer (calls draw)
visualizer.fit(y_train, y_test)
visualizer.finalize()
# Return the axes object on the visualizer
return visualizer.ax | Quick method:
One of the biggest challenges for classification models is an imbalance of
classes in the training data. This function vizualizes the relationship of
the support for each class in both the training and test data by
displaying how frequently each class occurs as a bar graph.
The figure can be displayed in two modes:
1. Balance mode: show the frequency of each class in the dataset.
2. Compare mode: show the relationship of support in train and test data.
Balance mode is the default if only y_train is specified. Compare mode
happens when both y_train and y_test are specified.
Parameters
----------
y_train : array-like
Array or list of shape (n,) that containes discrete data.
y_test : array-like, optional
Array or list of shape (m,) that contains discrete data. If
specified, the bar chart will be drawn in compare mode.
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
labels: list, optional
A list of class names for the x-axis if the target is already encoded.
Ensure that the labels are ordered lexicographically with respect to
the values in the target. A common use case is to pass
LabelEncoder.classes\_ as this parameter. If not specified, the labels
in the data will be used.
kwargs: dict, optional
Keyword arguments passed to the super class. Here, used
to colorize the bars in the histogram.
Returns
-------
ax : matplotlib axes
Returns the axes that the class balance plot was drawn on. | Below is the the instruction that describes the task:
### Input:
Quick method:
One of the biggest challenges for classification models is an imbalance of
classes in the training data. This function vizualizes the relationship of
the support for each class in both the training and test data by
displaying how frequently each class occurs as a bar graph.
The figure can be displayed in two modes:
1. Balance mode: show the frequency of each class in the dataset.
2. Compare mode: show the relationship of support in train and test data.
Balance mode is the default if only y_train is specified. Compare mode
happens when both y_train and y_test are specified.
Parameters
----------
y_train : array-like
Array or list of shape (n,) that containes discrete data.
y_test : array-like, optional
Array or list of shape (m,) that contains discrete data. If
specified, the bar chart will be drawn in compare mode.
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
labels: list, optional
A list of class names for the x-axis if the target is already encoded.
Ensure that the labels are ordered lexicographically with respect to
the values in the target. A common use case is to pass
LabelEncoder.classes\_ as this parameter. If not specified, the labels
in the data will be used.
kwargs: dict, optional
Keyword arguments passed to the super class. Here, used
to colorize the bars in the histogram.
Returns
-------
ax : matplotlib axes
Returns the axes that the class balance plot was drawn on.
### Response:
def class_balance(y_train, y_test=None, ax=None, labels=None, **kwargs):
"""Quick method:
One of the biggest challenges for classification models is an imbalance of
classes in the training data. This function vizualizes the relationship of
the support for each class in both the training and test data by
displaying how frequently each class occurs as a bar graph.
The figure can be displayed in two modes:
1. Balance mode: show the frequency of each class in the dataset.
2. Compare mode: show the relationship of support in train and test data.
Balance mode is the default if only y_train is specified. Compare mode
happens when both y_train and y_test are specified.
Parameters
----------
y_train : array-like
Array or list of shape (n,) that containes discrete data.
y_test : array-like, optional
Array or list of shape (m,) that contains discrete data. If
specified, the bar chart will be drawn in compare mode.
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
labels: list, optional
A list of class names for the x-axis if the target is already encoded.
Ensure that the labels are ordered lexicographically with respect to
the values in the target. A common use case is to pass
LabelEncoder.classes\_ as this parameter. If not specified, the labels
in the data will be used.
kwargs: dict, optional
Keyword arguments passed to the super class. Here, used
to colorize the bars in the histogram.
Returns
-------
ax : matplotlib axes
Returns the axes that the class balance plot was drawn on.
"""
# Instantiate the visualizer
visualizer = ClassBalance(ax=ax, labels=labels, **kwargs)
# Fit and transform the visualizer (calls draw)
visualizer.fit(y_train, y_test)
visualizer.finalize()
# Return the axes object on the visualizer
return visualizer.ax |
def linkify_templates(self):
"""
Link all templates, and create the template graph too
:return: None
"""
# First we create a list of all templates
for i in itertools.chain(iter(list(self.items.values())),
iter(list(self.templates.values()))):
self.linkify_item_templates(i)
for i in self:
i.tags = self.get_all_tags(i) | Link all templates, and create the template graph too
:return: None | Below is the the instruction that describes the task:
### Input:
Link all templates, and create the template graph too
:return: None
### Response:
def linkify_templates(self):
"""
Link all templates, and create the template graph too
:return: None
"""
# First we create a list of all templates
for i in itertools.chain(iter(list(self.items.values())),
iter(list(self.templates.values()))):
self.linkify_item_templates(i)
for i in self:
i.tags = self.get_all_tags(i) |
def p_integerdecl_signed(self, p):
'integerdecl : INTEGER SIGNED integernamelist SEMICOLON'
intlist = [Integer(r,
Width(msb=IntConst('31', lineno=p.lineno(3)),
lsb=IntConst('0', lineno=p.lineno(3)),
lineno=p.lineno(3)),
signed=True, lineno=p.lineno(3)) for r in p[2]]
p[0] = Decl(tuple(intlist), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | integerdecl : INTEGER SIGNED integernamelist SEMICOLON | Below is the the instruction that describes the task:
### Input:
integerdecl : INTEGER SIGNED integernamelist SEMICOLON
### Response:
def p_integerdecl_signed(self, p):
'integerdecl : INTEGER SIGNED integernamelist SEMICOLON'
intlist = [Integer(r,
Width(msb=IntConst('31', lineno=p.lineno(3)),
lsb=IntConst('0', lineno=p.lineno(3)),
lineno=p.lineno(3)),
signed=True, lineno=p.lineno(3)) for r in p[2]]
p[0] = Decl(tuple(intlist), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def get_post(self, rel_url, include_draft=False):
"""
Get post for given relative url from filesystem.
Possible input:
- 2017/01/01/my-post/
- 2017/01/01/my-post/index.html
:param rel_url: relative url
:param include_draft: return draft post or not
:return: a Post object
"""
raw_rel_url = str(rel_url)
if rel_url.endswith('/index.html'):
rel_url = rel_url.rsplit('/', 1)[
0] + '/' # remove the trailing 'index.html'
post_filename = rel_url[:-1].replace('/', '-')
post_file_path, post_file_ext = FileStorage.search_instance_file(
'posts', post_filename)
if post_file_path is None or post_file_ext is None or \
get_standard_format_name(post_file_ext) is None:
# no such post
return None
# construct the post object
post = Post()
post.rel_url = raw_rel_url
# 'rel_url' contains no trailing 'index.html'
post.unique_key = '/post/' + rel_url
post.format = get_standard_format_name(post_file_ext)
post.meta, post.raw_content = FileStorage.read_file(post_file_path)
return post if include_draft or not post.is_draft else None | Get post for given relative url from filesystem.
Possible input:
- 2017/01/01/my-post/
- 2017/01/01/my-post/index.html
:param rel_url: relative url
:param include_draft: return draft post or not
:return: a Post object | Below is the the instruction that describes the task:
### Input:
Get post for given relative url from filesystem.
Possible input:
- 2017/01/01/my-post/
- 2017/01/01/my-post/index.html
:param rel_url: relative url
:param include_draft: return draft post or not
:return: a Post object
### Response:
def get_post(self, rel_url, include_draft=False):
"""
Get post for given relative url from filesystem.
Possible input:
- 2017/01/01/my-post/
- 2017/01/01/my-post/index.html
:param rel_url: relative url
:param include_draft: return draft post or not
:return: a Post object
"""
raw_rel_url = str(rel_url)
if rel_url.endswith('/index.html'):
rel_url = rel_url.rsplit('/', 1)[
0] + '/' # remove the trailing 'index.html'
post_filename = rel_url[:-1].replace('/', '-')
post_file_path, post_file_ext = FileStorage.search_instance_file(
'posts', post_filename)
if post_file_path is None or post_file_ext is None or \
get_standard_format_name(post_file_ext) is None:
# no such post
return None
# construct the post object
post = Post()
post.rel_url = raw_rel_url
# 'rel_url' contains no trailing 'index.html'
post.unique_key = '/post/' + rel_url
post.format = get_standard_format_name(post_file_ext)
post.meta, post.raw_content = FileStorage.read_file(post_file_path)
return post if include_draft or not post.is_draft else None |
def narrow(self, **kwargs):
"""Up-to including"""
from_date = kwargs.pop('from_date', None)
to_date = kwargs.pop('to_date', None)
date = kwargs.pop('date', None)
qs = self
if from_date:
qs = qs.filter(date__gte=from_date)
if to_date:
qs = qs.filter(date__lte=to_date)
if date:
qs = qs.filter(date=date)
return super(ByDateQuerySetMixin, qs).narrow(**kwargs) | Up-to including | Below is the the instruction that describes the task:
### Input:
Up-to including
### Response:
def narrow(self, **kwargs):
"""Up-to including"""
from_date = kwargs.pop('from_date', None)
to_date = kwargs.pop('to_date', None)
date = kwargs.pop('date', None)
qs = self
if from_date:
qs = qs.filter(date__gte=from_date)
if to_date:
qs = qs.filter(date__lte=to_date)
if date:
qs = qs.filter(date=date)
return super(ByDateQuerySetMixin, qs).narrow(**kwargs) |
def get_billing_report_firmware_updates(self, month, **kwargs): # noqa: E501
"""Get raw billing data of the firmware updates for the month. # noqa: E501
Fetch raw billing data of the firmware updates for the currently authenticated commercial non-subtenant account. This is supplementary data for the billing report. The raw billing data of the firmware updates for subtenant accounts are included in their aggregator's raw billing data of the firmware updates. The endpoint returns the URL to download the gzipped CSV file. The first line is the header providing information on the firmware updates. For example, the ID of an firmware update. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v3/billing-report-firmware-updates?month=2018-07 -H 'authorization: Bearer {api-key}' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_billing_report_firmware_updates(month, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str month: Queried year and month of billing report. (required)
:return: BillingReportRawDataResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_billing_report_firmware_updates_with_http_info(month, **kwargs) # noqa: E501
else:
(data) = self.get_billing_report_firmware_updates_with_http_info(month, **kwargs) # noqa: E501
return data | Get raw billing data of the firmware updates for the month. # noqa: E501
Fetch raw billing data of the firmware updates for the currently authenticated commercial non-subtenant account. This is supplementary data for the billing report. The raw billing data of the firmware updates for subtenant accounts are included in their aggregator's raw billing data of the firmware updates. The endpoint returns the URL to download the gzipped CSV file. The first line is the header providing information on the firmware updates. For example, the ID of an firmware update. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v3/billing-report-firmware-updates?month=2018-07 -H 'authorization: Bearer {api-key}' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_billing_report_firmware_updates(month, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str month: Queried year and month of billing report. (required)
:return: BillingReportRawDataResponse
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Get raw billing data of the firmware updates for the month. # noqa: E501
Fetch raw billing data of the firmware updates for the currently authenticated commercial non-subtenant account. This is supplementary data for the billing report. The raw billing data of the firmware updates for subtenant accounts are included in their aggregator's raw billing data of the firmware updates. The endpoint returns the URL to download the gzipped CSV file. The first line is the header providing information on the firmware updates. For example, the ID of an firmware update. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v3/billing-report-firmware-updates?month=2018-07 -H 'authorization: Bearer {api-key}' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_billing_report_firmware_updates(month, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str month: Queried year and month of billing report. (required)
:return: BillingReportRawDataResponse
If the method is called asynchronously,
returns the request thread.
### Response:
def get_billing_report_firmware_updates(self, month, **kwargs): # noqa: E501
"""Get raw billing data of the firmware updates for the month. # noqa: E501
Fetch raw billing data of the firmware updates for the currently authenticated commercial non-subtenant account. This is supplementary data for the billing report. The raw billing data of the firmware updates for subtenant accounts are included in their aggregator's raw billing data of the firmware updates. The endpoint returns the URL to download the gzipped CSV file. The first line is the header providing information on the firmware updates. For example, the ID of an firmware update. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v3/billing-report-firmware-updates?month=2018-07 -H 'authorization: Bearer {api-key}' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_billing_report_firmware_updates(month, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str month: Queried year and month of billing report. (required)
:return: BillingReportRawDataResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_billing_report_firmware_updates_with_http_info(month, **kwargs) # noqa: E501
else:
(data) = self.get_billing_report_firmware_updates_with_http_info(month, **kwargs) # noqa: E501
return data |
def aligned_array(size, dtype, align=64):
"""Returns an array of a given size that is 64-byte aligned.
The returned array can be efficiently copied into GPU memory by TensorFlow.
"""
n = size * dtype.itemsize
empty = np.empty(n + (align - 1), dtype=np.uint8)
data_align = empty.ctypes.data % align
offset = 0 if data_align == 0 else (align - data_align)
output = empty[offset:offset + n].view(dtype)
assert len(output) == size, len(output)
assert output.ctypes.data % align == 0, output.ctypes.data
return output | Returns an array of a given size that is 64-byte aligned.
The returned array can be efficiently copied into GPU memory by TensorFlow. | Below is the the instruction that describes the task:
### Input:
Returns an array of a given size that is 64-byte aligned.
The returned array can be efficiently copied into GPU memory by TensorFlow.
### Response:
def aligned_array(size, dtype, align=64):
"""Returns an array of a given size that is 64-byte aligned.
The returned array can be efficiently copied into GPU memory by TensorFlow.
"""
n = size * dtype.itemsize
empty = np.empty(n + (align - 1), dtype=np.uint8)
data_align = empty.ctypes.data % align
offset = 0 if data_align == 0 else (align - data_align)
output = empty[offset:offset + n].view(dtype)
assert len(output) == size, len(output)
assert output.ctypes.data % align == 0, output.ctypes.data
return output |
async def getChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#getchatmember """
p = _strip(locals())
return await self._api_request('getChatMember', _rectify(p)) | See: https://core.telegram.org/bots/api#getchatmember | Below is the the instruction that describes the task:
### Input:
See: https://core.telegram.org/bots/api#getchatmember
### Response:
async def getChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#getchatmember """
p = _strip(locals())
return await self._api_request('getChatMember', _rectify(p)) |
def simplifyTempDfa (tempStates):
"""simplifyTempDfa (tempStates)
"""
changes = True
deletedStates = []
while changes:
changes = False
for i in range(1, len(tempStates)):
if i in deletedStates:
continue
for j in range(0, i):
if j in deletedStates:
continue
if sameState(tempStates[i], tempStates[j]):
deletedStates.append(i)
for k in range(0, len(tempStates)):
if k in deletedStates:
continue
for arc in tempStates[k][1]:
if arc[1] == i:
arc[1] = j
changes = True
break
for stateIndex in deletedStates:
tempStates[stateIndex] = None
return tempStates | simplifyTempDfa (tempStates) | Below is the the instruction that describes the task:
### Input:
simplifyTempDfa (tempStates)
### Response:
def simplifyTempDfa (tempStates):
"""simplifyTempDfa (tempStates)
"""
changes = True
deletedStates = []
while changes:
changes = False
for i in range(1, len(tempStates)):
if i in deletedStates:
continue
for j in range(0, i):
if j in deletedStates:
continue
if sameState(tempStates[i], tempStates[j]):
deletedStates.append(i)
for k in range(0, len(tempStates)):
if k in deletedStates:
continue
for arc in tempStates[k][1]:
if arc[1] == i:
arc[1] = j
changes = True
break
for stateIndex in deletedStates:
tempStates[stateIndex] = None
return tempStates |
def kgen(filename='POSCAR', directory=None, make_folders=False, symprec=0.01,
kpts_per_split=None, ibzkpt=None, spg=None, density=60,
mode='bradcrack', cart_coords=False, kpt_list=None, labels=None):
"""Generate KPOINTS files for VASP band structure calculations.
This script provides a wrapper around several frameworks used to generate
k-points along a high-symmetry path. The paths found in Bradley and
Cracknell, SeeK-path, and pymatgen are all supported.
It is important to note that the standard primitive cell symmetry is
different between SeeK-path and pymatgen. If the correct the structure
is not used, the high-symmetry points (and band path) may be invalid.
Args:
filename (:obj:`str`, optional): Path to VASP structure file. Default
is ``POSCAR``.
directory (:obj:`str`, optional): The output file directory.
make_folders (:obj:`bool`, optional): Generate folders and copy in
required files (INCAR, POTCAR, POSCAR, and possibly CHGCAR) from
the current directory.
symprec (:obj:`float`, optional): The precision used for determining
the cell symmetry.
kpts_per_split (:obj:`int`, optional): If set, the k-points are split
into separate k-point files (or folders) each containing the number
of k-points specified. This is useful for hybrid band structure
calculations where it is often intractable to calculate all
k-points in the same calculation.
ibzkpt (:obj:`str`, optional): Path to IBZKPT file. If set, the
generated k-points will be appended to the k-points in this file
and given a weight of 0. This is necessary for hybrid band
structure calculations.
spg (:obj:`str` or :obj:`int`, optional): The space group international
number or symbol to override the symmetry determined by spglib.
This is not recommended and only provided for testing purposes.
This option will only take effect when ``mode = 'bradcrack'``.
line_density (:obj:`int`, optional): Density of k-points along the
path.
mode (:obj:`str`, optional): Method used for calculating the
high-symmetry path. The options are:
bradcrack
Use the paths from Bradley and Cracknell. See [brad]_.
pymatgen
Use the paths from pymatgen. See [curt]_.
seekpath
Use the paths from SeeK-path. See [seek]_.
cart_coords (:obj:`bool`, optional): Whether the k-points are returned
in cartesian or reciprocal coordinates. Defaults to ``False``
(fractional coordinates).
kpt_list (:obj:`list`, optional): List of k-points to use, formatted as
a list of subpaths, each containing a list of fractional k-points.
For example::
[ [[0., 0., 0.], [0., 0., 0.5]],
[[0.5, 0., 0.], [0.5, 0.5, 0.]] ]
Will return points along ``0 0 0 -> 0 0 1/2 | 1/2 0 0
-> 1/2 1/2 0``
path_labels (:obj:`list`, optional): The k-point labels. These should
be provided as a :obj:`list` of :obj:`str` for each subpath of the
overall path. For example::
[ ['Gamma', 'Z'], ['X', 'M'] ]
combined with the above example for ``kpt_list`` would indicate the
path: Gamma -> Z | X -> M. If no labels are provided, letters from
A -> Z will be used instead. If a label begins with '@' it will be
concealed when plotting with sumo-bandplot.
"""
poscar = Poscar.from_file(filename)
kpath, kpoints, labels = get_path_data(poscar.structure, mode=mode,
symprec=symprec, kpt_list=kpt_list,
labels=labels, spg=spg,
line_density=density)
logging.info('\nk-point label indices:')
for i, label in enumerate(labels):
if label:
logging.info('\t{}: {}'.format(label, i+1))
if not kpt_list and not np.allclose(poscar.structure.lattice.matrix,
kpath.prim.lattice.matrix):
prim_filename = '{}_prim'.format(os.path.basename(filename))
kpath.prim.to(filename=prim_filename)
logging.error("\nWARNING: The input structure does not match the "
"expected standard\nprimitive symmetry, the path may be "
"incorrect! Use at your own risk.\n\nThe correct "
"symmetry primitive structure has been saved as {}.".
format(prim_filename))
ibz = _parse_ibzkpt(ibzkpt)
if make_folders and ibz and kpts_per_split is None:
logging.info("\nFound {} total kpoints in path, do you want to "
"split them up? (y/n)".format(len(kpoints)))
if input()[0].lower() == 'y':
logging.info("How many kpoints per file?")
kpts_per_split = int(input())
write_kpoint_files(filename, kpoints, labels, make_folders=make_folders,
ibzkpt=ibz, kpts_per_split=kpts_per_split,
directory=directory, cart_coords=cart_coords) | Generate KPOINTS files for VASP band structure calculations.
This script provides a wrapper around several frameworks used to generate
k-points along a high-symmetry path. The paths found in Bradley and
Cracknell, SeeK-path, and pymatgen are all supported.
It is important to note that the standard primitive cell symmetry is
different between SeeK-path and pymatgen. If the correct the structure
is not used, the high-symmetry points (and band path) may be invalid.
Args:
filename (:obj:`str`, optional): Path to VASP structure file. Default
is ``POSCAR``.
directory (:obj:`str`, optional): The output file directory.
make_folders (:obj:`bool`, optional): Generate folders and copy in
required files (INCAR, POTCAR, POSCAR, and possibly CHGCAR) from
the current directory.
symprec (:obj:`float`, optional): The precision used for determining
the cell symmetry.
kpts_per_split (:obj:`int`, optional): If set, the k-points are split
into separate k-point files (or folders) each containing the number
of k-points specified. This is useful for hybrid band structure
calculations where it is often intractable to calculate all
k-points in the same calculation.
ibzkpt (:obj:`str`, optional): Path to IBZKPT file. If set, the
generated k-points will be appended to the k-points in this file
and given a weight of 0. This is necessary for hybrid band
structure calculations.
spg (:obj:`str` or :obj:`int`, optional): The space group international
number or symbol to override the symmetry determined by spglib.
This is not recommended and only provided for testing purposes.
This option will only take effect when ``mode = 'bradcrack'``.
line_density (:obj:`int`, optional): Density of k-points along the
path.
mode (:obj:`str`, optional): Method used for calculating the
high-symmetry path. The options are:
bradcrack
Use the paths from Bradley and Cracknell. See [brad]_.
pymatgen
Use the paths from pymatgen. See [curt]_.
seekpath
Use the paths from SeeK-path. See [seek]_.
cart_coords (:obj:`bool`, optional): Whether the k-points are returned
in cartesian or reciprocal coordinates. Defaults to ``False``
(fractional coordinates).
kpt_list (:obj:`list`, optional): List of k-points to use, formatted as
a list of subpaths, each containing a list of fractional k-points.
For example::
[ [[0., 0., 0.], [0., 0., 0.5]],
[[0.5, 0., 0.], [0.5, 0.5, 0.]] ]
Will return points along ``0 0 0 -> 0 0 1/2 | 1/2 0 0
-> 1/2 1/2 0``
path_labels (:obj:`list`, optional): The k-point labels. These should
be provided as a :obj:`list` of :obj:`str` for each subpath of the
overall path. For example::
[ ['Gamma', 'Z'], ['X', 'M'] ]
combined with the above example for ``kpt_list`` would indicate the
path: Gamma -> Z | X -> M. If no labels are provided, letters from
A -> Z will be used instead. If a label begins with '@' it will be
concealed when plotting with sumo-bandplot. | Below is the the instruction that describes the task:
### Input:
Generate KPOINTS files for VASP band structure calculations.
This script provides a wrapper around several frameworks used to generate
k-points along a high-symmetry path. The paths found in Bradley and
Cracknell, SeeK-path, and pymatgen are all supported.
It is important to note that the standard primitive cell symmetry is
different between SeeK-path and pymatgen. If the correct the structure
is not used, the high-symmetry points (and band path) may be invalid.
Args:
filename (:obj:`str`, optional): Path to VASP structure file. Default
is ``POSCAR``.
directory (:obj:`str`, optional): The output file directory.
make_folders (:obj:`bool`, optional): Generate folders and copy in
required files (INCAR, POTCAR, POSCAR, and possibly CHGCAR) from
the current directory.
symprec (:obj:`float`, optional): The precision used for determining
the cell symmetry.
kpts_per_split (:obj:`int`, optional): If set, the k-points are split
into separate k-point files (or folders) each containing the number
of k-points specified. This is useful for hybrid band structure
calculations where it is often intractable to calculate all
k-points in the same calculation.
ibzkpt (:obj:`str`, optional): Path to IBZKPT file. If set, the
generated k-points will be appended to the k-points in this file
and given a weight of 0. This is necessary for hybrid band
structure calculations.
spg (:obj:`str` or :obj:`int`, optional): The space group international
number or symbol to override the symmetry determined by spglib.
This is not recommended and only provided for testing purposes.
This option will only take effect when ``mode = 'bradcrack'``.
line_density (:obj:`int`, optional): Density of k-points along the
path.
mode (:obj:`str`, optional): Method used for calculating the
high-symmetry path. The options are:
bradcrack
Use the paths from Bradley and Cracknell. See [brad]_.
pymatgen
Use the paths from pymatgen. See [curt]_.
seekpath
Use the paths from SeeK-path. See [seek]_.
cart_coords (:obj:`bool`, optional): Whether the k-points are returned
in cartesian or reciprocal coordinates. Defaults to ``False``
(fractional coordinates).
kpt_list (:obj:`list`, optional): List of k-points to use, formatted as
a list of subpaths, each containing a list of fractional k-points.
For example::
[ [[0., 0., 0.], [0., 0., 0.5]],
[[0.5, 0., 0.], [0.5, 0.5, 0.]] ]
Will return points along ``0 0 0 -> 0 0 1/2 | 1/2 0 0
-> 1/2 1/2 0``
path_labels (:obj:`list`, optional): The k-point labels. These should
be provided as a :obj:`list` of :obj:`str` for each subpath of the
overall path. For example::
[ ['Gamma', 'Z'], ['X', 'M'] ]
combined with the above example for ``kpt_list`` would indicate the
path: Gamma -> Z | X -> M. If no labels are provided, letters from
A -> Z will be used instead. If a label begins with '@' it will be
concealed when plotting with sumo-bandplot.
### Response:
def kgen(filename='POSCAR', directory=None, make_folders=False, symprec=0.01,
kpts_per_split=None, ibzkpt=None, spg=None, density=60,
mode='bradcrack', cart_coords=False, kpt_list=None, labels=None):
"""Generate KPOINTS files for VASP band structure calculations.
This script provides a wrapper around several frameworks used to generate
k-points along a high-symmetry path. The paths found in Bradley and
Cracknell, SeeK-path, and pymatgen are all supported.
It is important to note that the standard primitive cell symmetry is
different between SeeK-path and pymatgen. If the correct the structure
is not used, the high-symmetry points (and band path) may be invalid.
Args:
filename (:obj:`str`, optional): Path to VASP structure file. Default
is ``POSCAR``.
directory (:obj:`str`, optional): The output file directory.
make_folders (:obj:`bool`, optional): Generate folders and copy in
required files (INCAR, POTCAR, POSCAR, and possibly CHGCAR) from
the current directory.
symprec (:obj:`float`, optional): The precision used for determining
the cell symmetry.
kpts_per_split (:obj:`int`, optional): If set, the k-points are split
into separate k-point files (or folders) each containing the number
of k-points specified. This is useful for hybrid band structure
calculations where it is often intractable to calculate all
k-points in the same calculation.
ibzkpt (:obj:`str`, optional): Path to IBZKPT file. If set, the
generated k-points will be appended to the k-points in this file
and given a weight of 0. This is necessary for hybrid band
structure calculations.
spg (:obj:`str` or :obj:`int`, optional): The space group international
number or symbol to override the symmetry determined by spglib.
This is not recommended and only provided for testing purposes.
This option will only take effect when ``mode = 'bradcrack'``.
line_density (:obj:`int`, optional): Density of k-points along the
path.
mode (:obj:`str`, optional): Method used for calculating the
high-symmetry path. The options are:
bradcrack
Use the paths from Bradley and Cracknell. See [brad]_.
pymatgen
Use the paths from pymatgen. See [curt]_.
seekpath
Use the paths from SeeK-path. See [seek]_.
cart_coords (:obj:`bool`, optional): Whether the k-points are returned
in cartesian or reciprocal coordinates. Defaults to ``False``
(fractional coordinates).
kpt_list (:obj:`list`, optional): List of k-points to use, formatted as
a list of subpaths, each containing a list of fractional k-points.
For example::
[ [[0., 0., 0.], [0., 0., 0.5]],
[[0.5, 0., 0.], [0.5, 0.5, 0.]] ]
Will return points along ``0 0 0 -> 0 0 1/2 | 1/2 0 0
-> 1/2 1/2 0``
path_labels (:obj:`list`, optional): The k-point labels. These should
be provided as a :obj:`list` of :obj:`str` for each subpath of the
overall path. For example::
[ ['Gamma', 'Z'], ['X', 'M'] ]
combined with the above example for ``kpt_list`` would indicate the
path: Gamma -> Z | X -> M. If no labels are provided, letters from
A -> Z will be used instead. If a label begins with '@' it will be
concealed when plotting with sumo-bandplot.
"""
poscar = Poscar.from_file(filename)
kpath, kpoints, labels = get_path_data(poscar.structure, mode=mode,
symprec=symprec, kpt_list=kpt_list,
labels=labels, spg=spg,
line_density=density)
logging.info('\nk-point label indices:')
for i, label in enumerate(labels):
if label:
logging.info('\t{}: {}'.format(label, i+1))
if not kpt_list and not np.allclose(poscar.structure.lattice.matrix,
kpath.prim.lattice.matrix):
prim_filename = '{}_prim'.format(os.path.basename(filename))
kpath.prim.to(filename=prim_filename)
logging.error("\nWARNING: The input structure does not match the "
"expected standard\nprimitive symmetry, the path may be "
"incorrect! Use at your own risk.\n\nThe correct "
"symmetry primitive structure has been saved as {}.".
format(prim_filename))
ibz = _parse_ibzkpt(ibzkpt)
if make_folders and ibz and kpts_per_split is None:
logging.info("\nFound {} total kpoints in path, do you want to "
"split them up? (y/n)".format(len(kpoints)))
if input()[0].lower() == 'y':
logging.info("How many kpoints per file?")
kpts_per_split = int(input())
write_kpoint_files(filename, kpoints, labels, make_folders=make_folders,
ibzkpt=ibz, kpts_per_split=kpts_per_split,
directory=directory, cart_coords=cart_coords) |
def yaml_loc_join(l, n):
'''
YAML loader to join paths
The keywords come directly from :func:`util.locations.get_locations`.
See there!
:returns:
A `path seperator` (``/``) joined string |yaml_loader_returns|
.. seealso:: |yaml_loader_seealso|
'''
from photon.util.locations import get_locations
locations = get_locations()
s = l.construct_sequence(n)
for num, seq in enumerate(s):
if seq in locations:
s[num] = '%s' % (locations[seq])
return _path.join(*s) | YAML loader to join paths
The keywords come directly from :func:`util.locations.get_locations`.
See there!
:returns:
A `path seperator` (``/``) joined string |yaml_loader_returns|
.. seealso:: |yaml_loader_seealso| | Below is the the instruction that describes the task:
### Input:
YAML loader to join paths
The keywords come directly from :func:`util.locations.get_locations`.
See there!
:returns:
A `path seperator` (``/``) joined string |yaml_loader_returns|
.. seealso:: |yaml_loader_seealso|
### Response:
def yaml_loc_join(l, n):
'''
YAML loader to join paths
The keywords come directly from :func:`util.locations.get_locations`.
See there!
:returns:
A `path seperator` (``/``) joined string |yaml_loader_returns|
.. seealso:: |yaml_loader_seealso|
'''
from photon.util.locations import get_locations
locations = get_locations()
s = l.construct_sequence(n)
for num, seq in enumerate(s):
if seq in locations:
s[num] = '%s' % (locations[seq])
return _path.join(*s) |
def comment_delete(self, comment_id):
"""Remove a specific comment (Requires login).
Parameters:
comment_id (int): The id number of the comment to remove.
"""
return self._get('comments/{0}.json'.format(comment_id),
method='DELETE', auth=True) | Remove a specific comment (Requires login).
Parameters:
comment_id (int): The id number of the comment to remove. | Below is the the instruction that describes the task:
### Input:
Remove a specific comment (Requires login).
Parameters:
comment_id (int): The id number of the comment to remove.
### Response:
def comment_delete(self, comment_id):
"""Remove a specific comment (Requires login).
Parameters:
comment_id (int): The id number of the comment to remove.
"""
return self._get('comments/{0}.json'.format(comment_id),
method='DELETE', auth=True) |
async def connection_exists(ssid: str) -> Optional[str]:
""" If there is already a connection for this ssid, return the name of
the connection; if there is not, return None.
"""
nmcli_conns = await connections()
for wifi in [c['name']
for c in nmcli_conns if c['type'] == 'wireless']:
res, _ = await _call(['-t', '-f', '802-11-wireless.ssid',
'-m', 'tabular',
'connection', 'show', wifi])
if res == ssid:
return wifi
return None | If there is already a connection for this ssid, return the name of
the connection; if there is not, return None. | Below is the the instruction that describes the task:
### Input:
If there is already a connection for this ssid, return the name of
the connection; if there is not, return None.
### Response:
async def connection_exists(ssid: str) -> Optional[str]:
""" If there is already a connection for this ssid, return the name of
the connection; if there is not, return None.
"""
nmcli_conns = await connections()
for wifi in [c['name']
for c in nmcli_conns if c['type'] == 'wireless']:
res, _ = await _call(['-t', '-f', '802-11-wireless.ssid',
'-m', 'tabular',
'connection', 'show', wifi])
if res == ssid:
return wifi
return None |
def __train(self, n_neighbors=3):
"""
Train the classifier implementing the `k-nearest neighbors vote <http://scikit-learn.org/stable/modules/\
generated/sklearn.neighbors.KNeighborsClassifier.html>`_
:param n_clusters: the number of clusters
:type n_clusters: int
"""
# m = self.labels.drop(['id','MDS_UPDRSIII'], axis=1).values
# print(itemfreq(m))
#
# for i, row in enumerate(self.labels.drop(['id','MDS_UPDRSIII'], axis=1).values):
# print(np.bincount(row))
try:
for obs in self.observations:
features, ids = self.__get_features_for_observation(observation=obs, skip_id=3497,
last_column_is_id=True)
normalised_data = whiten(features)
x = pd.DataFrame(normalised_data)
y = self.labels[obs].values
# x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=42)
knn = KNeighborsClassifier(n_neighbors=n_neighbors, weights='distance')
# knn.fit(x_train, y_train)
knn.fit(x, y)
# print('Accuracy of K-NN classifier: {:.2f}'.format(knn.score(x, y)))
# print('Accuracy of K-NN classifier on training set: {:.2f}'.format(knn.score(x_train, y_train)))
# print('Accuracy of K-NN classifier on test set: {:.2f}'.format(knn.score(x_test, y_test)))
# print('------')
if not self.knns:
self.knns = [[obs, knn]]
else:
self.knns.append([obs, knn])
except IOError as e:
ierr = "({}): {}".format(e.errno, e.strerror)
logging.error("Error training Clinical UPDRS, file not found, I/O error %s", ierr)
except ValueError as verr:
logging.error("Error training Clinical UPDRS ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on training Clinical UPDRS init: %s", sys.exc_info()[0]) | Train the classifier implementing the `k-nearest neighbors vote <http://scikit-learn.org/stable/modules/\
generated/sklearn.neighbors.KNeighborsClassifier.html>`_
:param n_clusters: the number of clusters
:type n_clusters: int | Below is the the instruction that describes the task:
### Input:
Train the classifier implementing the `k-nearest neighbors vote <http://scikit-learn.org/stable/modules/\
generated/sklearn.neighbors.KNeighborsClassifier.html>`_
:param n_clusters: the number of clusters
:type n_clusters: int
### Response:
def __train(self, n_neighbors=3):
"""
Train the classifier implementing the `k-nearest neighbors vote <http://scikit-learn.org/stable/modules/\
generated/sklearn.neighbors.KNeighborsClassifier.html>`_
:param n_clusters: the number of clusters
:type n_clusters: int
"""
# m = self.labels.drop(['id','MDS_UPDRSIII'], axis=1).values
# print(itemfreq(m))
#
# for i, row in enumerate(self.labels.drop(['id','MDS_UPDRSIII'], axis=1).values):
# print(np.bincount(row))
try:
for obs in self.observations:
features, ids = self.__get_features_for_observation(observation=obs, skip_id=3497,
last_column_is_id=True)
normalised_data = whiten(features)
x = pd.DataFrame(normalised_data)
y = self.labels[obs].values
# x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=42)
knn = KNeighborsClassifier(n_neighbors=n_neighbors, weights='distance')
# knn.fit(x_train, y_train)
knn.fit(x, y)
# print('Accuracy of K-NN classifier: {:.2f}'.format(knn.score(x, y)))
# print('Accuracy of K-NN classifier on training set: {:.2f}'.format(knn.score(x_train, y_train)))
# print('Accuracy of K-NN classifier on test set: {:.2f}'.format(knn.score(x_test, y_test)))
# print('------')
if not self.knns:
self.knns = [[obs, knn]]
else:
self.knns.append([obs, knn])
except IOError as e:
ierr = "({}): {}".format(e.errno, e.strerror)
logging.error("Error training Clinical UPDRS, file not found, I/O error %s", ierr)
except ValueError as verr:
logging.error("Error training Clinical UPDRS ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on training Clinical UPDRS init: %s", sys.exc_info()[0]) |
def _mutate(self, condition, situation):
"""Create a new condition from the given one by probabilistically
applying point-wise mutations. Bits that were originally wildcarded
in the parent condition acquire their values from the provided
situation, to ensure the child condition continues to match it."""
# Go through each position in the condition, randomly flipping
# whether the position is a value (0 or 1) or a wildcard (#). We do
# this in a new list because the original condition's mask is
# immutable.
mutation_points = bitstrings.BitString.random(
len(condition.mask),
self.mutation_probability
)
mask = condition.mask ^ mutation_points
# The bits that aren't wildcards always have the same value as the
# situation, which ensures that the mutated condition still matches
# the situation.
if isinstance(situation, bitstrings.BitCondition):
mask &= situation.mask
return bitstrings.BitCondition(situation.bits, mask)
return bitstrings.BitCondition(situation, mask) | Create a new condition from the given one by probabilistically
applying point-wise mutations. Bits that were originally wildcarded
in the parent condition acquire their values from the provided
situation, to ensure the child condition continues to match it. | Below is the the instruction that describes the task:
### Input:
Create a new condition from the given one by probabilistically
applying point-wise mutations. Bits that were originally wildcarded
in the parent condition acquire their values from the provided
situation, to ensure the child condition continues to match it.
### Response:
def _mutate(self, condition, situation):
"""Create a new condition from the given one by probabilistically
applying point-wise mutations. Bits that were originally wildcarded
in the parent condition acquire their values from the provided
situation, to ensure the child condition continues to match it."""
# Go through each position in the condition, randomly flipping
# whether the position is a value (0 or 1) or a wildcard (#). We do
# this in a new list because the original condition's mask is
# immutable.
mutation_points = bitstrings.BitString.random(
len(condition.mask),
self.mutation_probability
)
mask = condition.mask ^ mutation_points
# The bits that aren't wildcards always have the same value as the
# situation, which ensures that the mutated condition still matches
# the situation.
if isinstance(situation, bitstrings.BitCondition):
mask &= situation.mask
return bitstrings.BitCondition(situation.bits, mask)
return bitstrings.BitCondition(situation, mask) |
def import_complex_gateway_to_graph(diagram_graph, process_id, process_attributes, element):
"""
Adds to graph the new element that represents BPMN complex gateway.
In addition to attributes inherited from Gateway type, complex gateway
has additional attribute default flow (default value - none).
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'complexGateway' element.
"""
element_id = element.getAttribute(consts.Consts.id)
BpmnDiagramGraphImport.import_gateway_to_graph(diagram_graph, process_id, process_attributes, element)
diagram_graph.node[element_id][consts.Consts.default] = element.getAttribute(consts.Consts.default) \
if element.hasAttribute(consts.Consts.default) else None | Adds to graph the new element that represents BPMN complex gateway.
In addition to attributes inherited from Gateway type, complex gateway
has additional attribute default flow (default value - none).
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'complexGateway' element. | Below is the the instruction that describes the task:
### Input:
Adds to graph the new element that represents BPMN complex gateway.
In addition to attributes inherited from Gateway type, complex gateway
has additional attribute default flow (default value - none).
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'complexGateway' element.
### Response:
def import_complex_gateway_to_graph(diagram_graph, process_id, process_attributes, element):
"""
Adds to graph the new element that represents BPMN complex gateway.
In addition to attributes inherited from Gateway type, complex gateway
has additional attribute default flow (default value - none).
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'complexGateway' element.
"""
element_id = element.getAttribute(consts.Consts.id)
BpmnDiagramGraphImport.import_gateway_to_graph(diagram_graph, process_id, process_attributes, element)
diagram_graph.node[element_id][consts.Consts.default] = element.getAttribute(consts.Consts.default) \
if element.hasAttribute(consts.Consts.default) else None |
def _check_image(self, X):
"""
Checks the image size and its compatibility with classifier's receptive field.
At this moment it is required that image size = K * receptive_field. This will
be relaxed in future with the introduction of padding.
"""
if (len(X.shape) < 3) or (len(X.shape) > 4):
raise ValueError('Input has to have shape [n_samples, n_pixels_y, n_pixels_x] '
'or [n_samples, n_pixels_y, n_pixels_x, n_bands].')
self._samples = X.shape[0]
self._image_size = X.shape[1:3]
if (self._image_size[0] % self.receptive_field[0]) or (self._image_size[0] % self.receptive_field[0]):
raise ValueError('Image (%d,%d) and receptive fields (%d,%d) mismatch.\n'
'Resize your image to be divisible with receptive field.'
% (self._image_size[0], self._image_size[0], self.receptive_field[0],
self.receptive_field[1])) | Checks the image size and its compatibility with classifier's receptive field.
At this moment it is required that image size = K * receptive_field. This will
be relaxed in future with the introduction of padding. | Below is the the instruction that describes the task:
### Input:
Checks the image size and its compatibility with classifier's receptive field.
At this moment it is required that image size = K * receptive_field. This will
be relaxed in future with the introduction of padding.
### Response:
def _check_image(self, X):
"""
Checks the image size and its compatibility with classifier's receptive field.
At this moment it is required that image size = K * receptive_field. This will
be relaxed in future with the introduction of padding.
"""
if (len(X.shape) < 3) or (len(X.shape) > 4):
raise ValueError('Input has to have shape [n_samples, n_pixels_y, n_pixels_x] '
'or [n_samples, n_pixels_y, n_pixels_x, n_bands].')
self._samples = X.shape[0]
self._image_size = X.shape[1:3]
if (self._image_size[0] % self.receptive_field[0]) or (self._image_size[0] % self.receptive_field[0]):
raise ValueError('Image (%d,%d) and receptive fields (%d,%d) mismatch.\n'
'Resize your image to be divisible with receptive field.'
% (self._image_size[0], self._image_size[0], self.receptive_field[0],
self.receptive_field[1])) |
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args | Return the arguments to be passed to the base cache key returned by `get_base_cache_key`. | Below is the the instruction that describes the task:
### Input:
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
### Response:
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args |
def execute(self, command,
istream=None,
with_extended_output=False,
with_exceptions=True,
as_process=False,
output_stream=None,
stdout_as_string=True,
kill_after_timeout=None,
with_stdout=True,
universal_newlines=False,
shell=None,
env=None,
max_chunk_size=io.DEFAULT_BUFFER_SIZE,
**subprocess_kwargs
):
"""Handles executing the command on the shell and consumes and returns
the returned information (stdout)
:param command:
The command argument list to execute.
It should be a string, or a sequence of program arguments. The
program to execute is the first item in the args sequence or string.
:param istream:
Standard input filehandle passed to subprocess.Popen.
:param with_extended_output:
Whether to return a (status, stdout, stderr) tuple.
:param with_exceptions:
Whether to raise an exception when git returns a non-zero status.
:param as_process:
Whether to return the created process instance directly from which
streams can be read on demand. This will render with_extended_output and
with_exceptions ineffective - the caller will have
to deal with the details himself.
It is important to note that the process will be placed into an AutoInterrupt
wrapper that will interrupt the process once it goes out of scope. If you
use the command in iterators, you should pass the whole process instance
instead of a single stream.
:param output_stream:
If set to a file-like object, data produced by the git command will be
output to the given stream directly.
This feature only has any effect if as_process is False. Processes will
always be created with a pipe due to issues with subprocess.
This merely is a workaround as data will be copied from the
output pipe to the given output stream directly.
Judging from the implementation, you shouldn't use this flag !
:param stdout_as_string:
if False, the commands standard output will be bytes. Otherwise, it will be
decoded into a string using the default encoding (usually utf-8).
The latter can fail, if the output contains binary data.
:param env:
A dictionary of environment variables to be passed to `subprocess.Popen`.
:param max_chunk_size:
Maximum number of bytes in one chunk of data passed to the output_stream in
one invocation of write() method. If the given number is not positive then
the default value is used.
:param subprocess_kwargs:
Keyword arguments to be passed to subprocess.Popen. Please note that
some of the valid kwargs are already set by this method, the ones you
specify may not be the same ones.
:param with_stdout: If True, default True, we open stdout on the created process
:param universal_newlines:
if True, pipes will be opened as text, and lines are split at
all known line endings.
:param shell:
Whether to invoke commands through a shell (see `Popen(..., shell=True)`).
It overrides :attr:`USE_SHELL` if it is not `None`.
:param kill_after_timeout:
To specify a timeout in seconds for the git command, after which the process
should be killed. This will have no effect if as_process is set to True. It is
set to None by default and will let the process run until the timeout is
explicitly specified. This feature is not supported on Windows. It's also worth
noting that kill_after_timeout uses SIGKILL, which can have negative side
effects on a repository. For example, stale locks in case of git gc could
render the repository incapable of accepting changes until the lock is manually
removed.
:return:
* str(output) if extended_output = False (Default)
* tuple(int(status), str(stdout), str(stderr)) if extended_output = True
if output_stream is True, the stdout value will be your output stream:
* output_stream if extended_output = False
* tuple(int(status), output_stream, str(stderr)) if extended_output = True
Note git is executed with LC_MESSAGES="C" to ensure consistent
output regardless of system language.
:raise GitCommandError:
:note:
If you add additional keyword arguments to the signature of this method,
you must update the execute_kwargs tuple housed in this module."""
if self.GIT_PYTHON_TRACE and (self.GIT_PYTHON_TRACE != 'full' or as_process):
log.info(' '.join(command))
# Allow the user to have the command executed in their working dir.
cwd = self._working_dir or os.getcwd()
# Start the process
inline_env = env
env = os.environ.copy()
# Attempt to force all output to plain ascii english, which is what some parsing code
# may expect.
# According to stackoverflow (http://goo.gl/l74GC8), we are setting LANGUAGE as well
# just to be sure.
env["LANGUAGE"] = "C"
env["LC_ALL"] = "C"
env.update(self._environment)
if inline_env is not None:
env.update(inline_env)
if is_win:
cmd_not_found_exception = OSError
if kill_after_timeout:
raise GitCommandError(command, '"kill_after_timeout" feature is not supported on Windows.')
else:
if sys.version_info[0] > 2:
cmd_not_found_exception = FileNotFoundError # NOQA # exists, flake8 unknown @UndefinedVariable
else:
cmd_not_found_exception = OSError
# end handle
stdout_sink = (PIPE
if with_stdout
else getattr(subprocess, 'DEVNULL', None) or open(os.devnull, 'wb'))
log.debug("Popen(%s, cwd=%s, universal_newlines=%s, shell=%s)",
command, cwd, universal_newlines, shell)
try:
proc = Popen(command,
env=env,
cwd=cwd,
bufsize=-1,
stdin=istream,
stderr=PIPE,
stdout=stdout_sink,
shell=shell is not None and shell or self.USE_SHELL,
close_fds=is_posix, # unsupported on windows
universal_newlines=universal_newlines,
creationflags=PROC_CREATIONFLAGS,
**subprocess_kwargs
)
except cmd_not_found_exception as err:
raise GitCommandNotFound(command, err)
if as_process:
return self.AutoInterrupt(proc, command)
def _kill_process(pid):
""" Callback method to kill a process. """
p = Popen(['ps', '--ppid', str(pid)], stdout=PIPE,
creationflags=PROC_CREATIONFLAGS)
child_pids = []
for line in p.stdout:
if len(line.split()) > 0:
local_pid = (line.split())[0]
if local_pid.isdigit():
child_pids.append(int(local_pid))
try:
# Windows does not have SIGKILL, so use SIGTERM instead
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
os.kill(pid, sig)
for child_pid in child_pids:
try:
os.kill(child_pid, sig)
except OSError:
pass
kill_check.set() # tell the main routine that the process was killed
except OSError:
# It is possible that the process gets completed in the duration after timeout
# happens and before we try to kill the process.
pass
return
# end
if kill_after_timeout:
kill_check = threading.Event()
watchdog = threading.Timer(kill_after_timeout, _kill_process, args=(proc.pid,))
# Wait for the process to return
status = 0
stdout_value = b''
stderr_value = b''
try:
if output_stream is None:
if kill_after_timeout:
watchdog.start()
stdout_value, stderr_value = proc.communicate()
if kill_after_timeout:
watchdog.cancel()
if kill_check.isSet():
stderr_value = ('Timeout: the command "%s" did not complete in %d '
'secs.' % (" ".join(command), kill_after_timeout)).encode(defenc)
# strip trailing "\n"
if stdout_value.endswith(b"\n"):
stdout_value = stdout_value[:-1]
if stderr_value.endswith(b"\n"):
stderr_value = stderr_value[:-1]
status = proc.returncode
else:
max_chunk_size = max_chunk_size if max_chunk_size and max_chunk_size > 0 else io.DEFAULT_BUFFER_SIZE
stream_copy(proc.stdout, output_stream, max_chunk_size)
stdout_value = proc.stdout.read()
stderr_value = proc.stderr.read()
# strip trailing "\n"
if stderr_value.endswith(b"\n"):
stderr_value = stderr_value[:-1]
status = proc.wait()
# END stdout handling
finally:
proc.stdout.close()
proc.stderr.close()
if self.GIT_PYTHON_TRACE == 'full':
cmdstr = " ".join(command)
def as_text(stdout_value):
return not output_stream and safe_decode(stdout_value) or '<OUTPUT_STREAM>'
# end
if stderr_value:
log.info("%s -> %d; stdout: '%s'; stderr: '%s'",
cmdstr, status, as_text(stdout_value), safe_decode(stderr_value))
elif stdout_value:
log.info("%s -> %d; stdout: '%s'", cmdstr, status, as_text(stdout_value))
else:
log.info("%s -> %d", cmdstr, status)
# END handle debug printing
if with_exceptions and status != 0:
raise GitCommandError(command, status, stderr_value, stdout_value)
if isinstance(stdout_value, bytes) and stdout_as_string: # could also be output_stream
stdout_value = safe_decode(stdout_value)
# Allow access to the command's status code
if with_extended_output:
return (status, stdout_value, safe_decode(stderr_value))
else:
return stdout_value | Handles executing the command on the shell and consumes and returns
the returned information (stdout)
:param command:
The command argument list to execute.
It should be a string, or a sequence of program arguments. The
program to execute is the first item in the args sequence or string.
:param istream:
Standard input filehandle passed to subprocess.Popen.
:param with_extended_output:
Whether to return a (status, stdout, stderr) tuple.
:param with_exceptions:
Whether to raise an exception when git returns a non-zero status.
:param as_process:
Whether to return the created process instance directly from which
streams can be read on demand. This will render with_extended_output and
with_exceptions ineffective - the caller will have
to deal with the details himself.
It is important to note that the process will be placed into an AutoInterrupt
wrapper that will interrupt the process once it goes out of scope. If you
use the command in iterators, you should pass the whole process instance
instead of a single stream.
:param output_stream:
If set to a file-like object, data produced by the git command will be
output to the given stream directly.
This feature only has any effect if as_process is False. Processes will
always be created with a pipe due to issues with subprocess.
This merely is a workaround as data will be copied from the
output pipe to the given output stream directly.
Judging from the implementation, you shouldn't use this flag !
:param stdout_as_string:
if False, the commands standard output will be bytes. Otherwise, it will be
decoded into a string using the default encoding (usually utf-8).
The latter can fail, if the output contains binary data.
:param env:
A dictionary of environment variables to be passed to `subprocess.Popen`.
:param max_chunk_size:
Maximum number of bytes in one chunk of data passed to the output_stream in
one invocation of write() method. If the given number is not positive then
the default value is used.
:param subprocess_kwargs:
Keyword arguments to be passed to subprocess.Popen. Please note that
some of the valid kwargs are already set by this method, the ones you
specify may not be the same ones.
:param with_stdout: If True, default True, we open stdout on the created process
:param universal_newlines:
if True, pipes will be opened as text, and lines are split at
all known line endings.
:param shell:
Whether to invoke commands through a shell (see `Popen(..., shell=True)`).
It overrides :attr:`USE_SHELL` if it is not `None`.
:param kill_after_timeout:
To specify a timeout in seconds for the git command, after which the process
should be killed. This will have no effect if as_process is set to True. It is
set to None by default and will let the process run until the timeout is
explicitly specified. This feature is not supported on Windows. It's also worth
noting that kill_after_timeout uses SIGKILL, which can have negative side
effects on a repository. For example, stale locks in case of git gc could
render the repository incapable of accepting changes until the lock is manually
removed.
:return:
* str(output) if extended_output = False (Default)
* tuple(int(status), str(stdout), str(stderr)) if extended_output = True
if output_stream is True, the stdout value will be your output stream:
* output_stream if extended_output = False
* tuple(int(status), output_stream, str(stderr)) if extended_output = True
Note git is executed with LC_MESSAGES="C" to ensure consistent
output regardless of system language.
:raise GitCommandError:
:note:
If you add additional keyword arguments to the signature of this method,
you must update the execute_kwargs tuple housed in this module. | Below is the the instruction that describes the task:
### Input:
Handles executing the command on the shell and consumes and returns
the returned information (stdout)
:param command:
The command argument list to execute.
It should be a string, or a sequence of program arguments. The
program to execute is the first item in the args sequence or string.
:param istream:
Standard input filehandle passed to subprocess.Popen.
:param with_extended_output:
Whether to return a (status, stdout, stderr) tuple.
:param with_exceptions:
Whether to raise an exception when git returns a non-zero status.
:param as_process:
Whether to return the created process instance directly from which
streams can be read on demand. This will render with_extended_output and
with_exceptions ineffective - the caller will have
to deal with the details himself.
It is important to note that the process will be placed into an AutoInterrupt
wrapper that will interrupt the process once it goes out of scope. If you
use the command in iterators, you should pass the whole process instance
instead of a single stream.
:param output_stream:
If set to a file-like object, data produced by the git command will be
output to the given stream directly.
This feature only has any effect if as_process is False. Processes will
always be created with a pipe due to issues with subprocess.
This merely is a workaround as data will be copied from the
output pipe to the given output stream directly.
Judging from the implementation, you shouldn't use this flag !
:param stdout_as_string:
if False, the commands standard output will be bytes. Otherwise, it will be
decoded into a string using the default encoding (usually utf-8).
The latter can fail, if the output contains binary data.
:param env:
A dictionary of environment variables to be passed to `subprocess.Popen`.
:param max_chunk_size:
Maximum number of bytes in one chunk of data passed to the output_stream in
one invocation of write() method. If the given number is not positive then
the default value is used.
:param subprocess_kwargs:
Keyword arguments to be passed to subprocess.Popen. Please note that
some of the valid kwargs are already set by this method, the ones you
specify may not be the same ones.
:param with_stdout: If True, default True, we open stdout on the created process
:param universal_newlines:
if True, pipes will be opened as text, and lines are split at
all known line endings.
:param shell:
Whether to invoke commands through a shell (see `Popen(..., shell=True)`).
It overrides :attr:`USE_SHELL` if it is not `None`.
:param kill_after_timeout:
To specify a timeout in seconds for the git command, after which the process
should be killed. This will have no effect if as_process is set to True. It is
set to None by default and will let the process run until the timeout is
explicitly specified. This feature is not supported on Windows. It's also worth
noting that kill_after_timeout uses SIGKILL, which can have negative side
effects on a repository. For example, stale locks in case of git gc could
render the repository incapable of accepting changes until the lock is manually
removed.
:return:
* str(output) if extended_output = False (Default)
* tuple(int(status), str(stdout), str(stderr)) if extended_output = True
if output_stream is True, the stdout value will be your output stream:
* output_stream if extended_output = False
* tuple(int(status), output_stream, str(stderr)) if extended_output = True
Note git is executed with LC_MESSAGES="C" to ensure consistent
output regardless of system language.
:raise GitCommandError:
:note:
If you add additional keyword arguments to the signature of this method,
you must update the execute_kwargs tuple housed in this module.
### Response:
def execute(self, command,
istream=None,
with_extended_output=False,
with_exceptions=True,
as_process=False,
output_stream=None,
stdout_as_string=True,
kill_after_timeout=None,
with_stdout=True,
universal_newlines=False,
shell=None,
env=None,
max_chunk_size=io.DEFAULT_BUFFER_SIZE,
**subprocess_kwargs
):
"""Handles executing the command on the shell and consumes and returns
the returned information (stdout)
:param command:
The command argument list to execute.
It should be a string, or a sequence of program arguments. The
program to execute is the first item in the args sequence or string.
:param istream:
Standard input filehandle passed to subprocess.Popen.
:param with_extended_output:
Whether to return a (status, stdout, stderr) tuple.
:param with_exceptions:
Whether to raise an exception when git returns a non-zero status.
:param as_process:
Whether to return the created process instance directly from which
streams can be read on demand. This will render with_extended_output and
with_exceptions ineffective - the caller will have
to deal with the details himself.
It is important to note that the process will be placed into an AutoInterrupt
wrapper that will interrupt the process once it goes out of scope. If you
use the command in iterators, you should pass the whole process instance
instead of a single stream.
:param output_stream:
If set to a file-like object, data produced by the git command will be
output to the given stream directly.
This feature only has any effect if as_process is False. Processes will
always be created with a pipe due to issues with subprocess.
This merely is a workaround as data will be copied from the
output pipe to the given output stream directly.
Judging from the implementation, you shouldn't use this flag !
:param stdout_as_string:
if False, the commands standard output will be bytes. Otherwise, it will be
decoded into a string using the default encoding (usually utf-8).
The latter can fail, if the output contains binary data.
:param env:
A dictionary of environment variables to be passed to `subprocess.Popen`.
:param max_chunk_size:
Maximum number of bytes in one chunk of data passed to the output_stream in
one invocation of write() method. If the given number is not positive then
the default value is used.
:param subprocess_kwargs:
Keyword arguments to be passed to subprocess.Popen. Please note that
some of the valid kwargs are already set by this method, the ones you
specify may not be the same ones.
:param with_stdout: If True, default True, we open stdout on the created process
:param universal_newlines:
if True, pipes will be opened as text, and lines are split at
all known line endings.
:param shell:
Whether to invoke commands through a shell (see `Popen(..., shell=True)`).
It overrides :attr:`USE_SHELL` if it is not `None`.
:param kill_after_timeout:
To specify a timeout in seconds for the git command, after which the process
should be killed. This will have no effect if as_process is set to True. It is
set to None by default and will let the process run until the timeout is
explicitly specified. This feature is not supported on Windows. It's also worth
noting that kill_after_timeout uses SIGKILL, which can have negative side
effects on a repository. For example, stale locks in case of git gc could
render the repository incapable of accepting changes until the lock is manually
removed.
:return:
* str(output) if extended_output = False (Default)
* tuple(int(status), str(stdout), str(stderr)) if extended_output = True
if output_stream is True, the stdout value will be your output stream:
* output_stream if extended_output = False
* tuple(int(status), output_stream, str(stderr)) if extended_output = True
Note git is executed with LC_MESSAGES="C" to ensure consistent
output regardless of system language.
:raise GitCommandError:
:note:
If you add additional keyword arguments to the signature of this method,
you must update the execute_kwargs tuple housed in this module."""
if self.GIT_PYTHON_TRACE and (self.GIT_PYTHON_TRACE != 'full' or as_process):
log.info(' '.join(command))
# Allow the user to have the command executed in their working dir.
cwd = self._working_dir or os.getcwd()
# Start the process
inline_env = env
env = os.environ.copy()
# Attempt to force all output to plain ascii english, which is what some parsing code
# may expect.
# According to stackoverflow (http://goo.gl/l74GC8), we are setting LANGUAGE as well
# just to be sure.
env["LANGUAGE"] = "C"
env["LC_ALL"] = "C"
env.update(self._environment)
if inline_env is not None:
env.update(inline_env)
if is_win:
cmd_not_found_exception = OSError
if kill_after_timeout:
raise GitCommandError(command, '"kill_after_timeout" feature is not supported on Windows.')
else:
if sys.version_info[0] > 2:
cmd_not_found_exception = FileNotFoundError # NOQA # exists, flake8 unknown @UndefinedVariable
else:
cmd_not_found_exception = OSError
# end handle
stdout_sink = (PIPE
if with_stdout
else getattr(subprocess, 'DEVNULL', None) or open(os.devnull, 'wb'))
log.debug("Popen(%s, cwd=%s, universal_newlines=%s, shell=%s)",
command, cwd, universal_newlines, shell)
try:
proc = Popen(command,
env=env,
cwd=cwd,
bufsize=-1,
stdin=istream,
stderr=PIPE,
stdout=stdout_sink,
shell=shell is not None and shell or self.USE_SHELL,
close_fds=is_posix, # unsupported on windows
universal_newlines=universal_newlines,
creationflags=PROC_CREATIONFLAGS,
**subprocess_kwargs
)
except cmd_not_found_exception as err:
raise GitCommandNotFound(command, err)
if as_process:
return self.AutoInterrupt(proc, command)
def _kill_process(pid):
""" Callback method to kill a process. """
p = Popen(['ps', '--ppid', str(pid)], stdout=PIPE,
creationflags=PROC_CREATIONFLAGS)
child_pids = []
for line in p.stdout:
if len(line.split()) > 0:
local_pid = (line.split())[0]
if local_pid.isdigit():
child_pids.append(int(local_pid))
try:
# Windows does not have SIGKILL, so use SIGTERM instead
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
os.kill(pid, sig)
for child_pid in child_pids:
try:
os.kill(child_pid, sig)
except OSError:
pass
kill_check.set() # tell the main routine that the process was killed
except OSError:
# It is possible that the process gets completed in the duration after timeout
# happens and before we try to kill the process.
pass
return
# end
if kill_after_timeout:
kill_check = threading.Event()
watchdog = threading.Timer(kill_after_timeout, _kill_process, args=(proc.pid,))
# Wait for the process to return
status = 0
stdout_value = b''
stderr_value = b''
try:
if output_stream is None:
if kill_after_timeout:
watchdog.start()
stdout_value, stderr_value = proc.communicate()
if kill_after_timeout:
watchdog.cancel()
if kill_check.isSet():
stderr_value = ('Timeout: the command "%s" did not complete in %d '
'secs.' % (" ".join(command), kill_after_timeout)).encode(defenc)
# strip trailing "\n"
if stdout_value.endswith(b"\n"):
stdout_value = stdout_value[:-1]
if stderr_value.endswith(b"\n"):
stderr_value = stderr_value[:-1]
status = proc.returncode
else:
max_chunk_size = max_chunk_size if max_chunk_size and max_chunk_size > 0 else io.DEFAULT_BUFFER_SIZE
stream_copy(proc.stdout, output_stream, max_chunk_size)
stdout_value = proc.stdout.read()
stderr_value = proc.stderr.read()
# strip trailing "\n"
if stderr_value.endswith(b"\n"):
stderr_value = stderr_value[:-1]
status = proc.wait()
# END stdout handling
finally:
proc.stdout.close()
proc.stderr.close()
if self.GIT_PYTHON_TRACE == 'full':
cmdstr = " ".join(command)
def as_text(stdout_value):
return not output_stream and safe_decode(stdout_value) or '<OUTPUT_STREAM>'
# end
if stderr_value:
log.info("%s -> %d; stdout: '%s'; stderr: '%s'",
cmdstr, status, as_text(stdout_value), safe_decode(stderr_value))
elif stdout_value:
log.info("%s -> %d; stdout: '%s'", cmdstr, status, as_text(stdout_value))
else:
log.info("%s -> %d", cmdstr, status)
# END handle debug printing
if with_exceptions and status != 0:
raise GitCommandError(command, status, stderr_value, stdout_value)
if isinstance(stdout_value, bytes) and stdout_as_string: # could also be output_stream
stdout_value = safe_decode(stdout_value)
# Allow access to the command's status code
if with_extended_output:
return (status, stdout_value, safe_decode(stderr_value))
else:
return stdout_value |
def encode_positions(self,
positions: mx.sym.Symbol,
data: mx.sym.Symbol) -> mx.sym.Symbol:
"""
:param positions: (batch_size,)
:param data: (batch_size, num_embed)
:return: (batch_size, num_embed)
"""
# (batch_size, 1)
positions = mx.sym.expand_dims(positions, axis=1)
# (num_embed,)
channels = mx.sym.arange(0, self.num_embed // 2)
# (1, num_embed,)
scaling = mx.sym.expand_dims(1. / mx.sym.pow(10000, (2 * channels) / self.num_embed), axis=0)
# (batch_size, num_embed/2)
scaled_positions = mx.sym.dot(positions, scaling)
sin = mx.sym.sin(scaled_positions)
cos = mx.sym.cos(scaled_positions)
# (batch_size, num_embed)
pos_embedding = mx.sym.concat(sin, cos, dim=1)
if self.scale_up_input:
data = data * (self.num_embed ** 0.5)
if self.scale_down_positions:
pos_embedding = pos_embedding * (self.num_embed ** -0.5)
pos_embedding = mx.sym.BlockGrad(pos_embedding)
return mx.sym.broadcast_add(data, pos_embedding, name="%s_add" % self.prefix) | :param positions: (batch_size,)
:param data: (batch_size, num_embed)
:return: (batch_size, num_embed) | Below is the the instruction that describes the task:
### Input:
:param positions: (batch_size,)
:param data: (batch_size, num_embed)
:return: (batch_size, num_embed)
### Response:
def encode_positions(self,
positions: mx.sym.Symbol,
data: mx.sym.Symbol) -> mx.sym.Symbol:
"""
:param positions: (batch_size,)
:param data: (batch_size, num_embed)
:return: (batch_size, num_embed)
"""
# (batch_size, 1)
positions = mx.sym.expand_dims(positions, axis=1)
# (num_embed,)
channels = mx.sym.arange(0, self.num_embed // 2)
# (1, num_embed,)
scaling = mx.sym.expand_dims(1. / mx.sym.pow(10000, (2 * channels) / self.num_embed), axis=0)
# (batch_size, num_embed/2)
scaled_positions = mx.sym.dot(positions, scaling)
sin = mx.sym.sin(scaled_positions)
cos = mx.sym.cos(scaled_positions)
# (batch_size, num_embed)
pos_embedding = mx.sym.concat(sin, cos, dim=1)
if self.scale_up_input:
data = data * (self.num_embed ** 0.5)
if self.scale_down_positions:
pos_embedding = pos_embedding * (self.num_embed ** -0.5)
pos_embedding = mx.sym.BlockGrad(pos_embedding)
return mx.sym.broadcast_add(data, pos_embedding, name="%s_add" % self.prefix) |
def _rt_members_delete(self, element, statement):
"""Finds all the member declarations in 'statement' and removes the
corresponding instances from element.members."""
removals = self.vparser.parse(statement, None)
for member in removals:
if member in element.members:
del element.members[member] | Finds all the member declarations in 'statement' and removes the
corresponding instances from element.members. | Below is the the instruction that describes the task:
### Input:
Finds all the member declarations in 'statement' and removes the
corresponding instances from element.members.
### Response:
def _rt_members_delete(self, element, statement):
"""Finds all the member declarations in 'statement' and removes the
corresponding instances from element.members."""
removals = self.vparser.parse(statement, None)
for member in removals:
if member in element.members:
del element.members[member] |
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run() | Sets the result for ``key`` and attempts to resume the generator. | Below is the the instruction that describes the task:
### Input:
Sets the result for ``key`` and attempts to resume the generator.
### Response:
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run() |
def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types."""
name = "%s_%s" % (self._name, entry.Name())
return name.upper() | Creates the name inside an enumeration for distinguishing data
types. | Below is the the instruction that describes the task:
### Input:
Creates the name inside an enumeration for distinguishing data
types.
### Response:
def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types."""
name = "%s_%s" % (self._name, entry.Name())
return name.upper() |
def _record_call(func):
"""
A decorator that logs a call into the global error context.
This is probably for internal use only.
"""
@wraps(func)
def wrapper(*args, **kwargs):
global global_error_context
# log a call as about to take place
if global_error_context is not None:
key = CallLogKey(name=func.__name__,
args=[serialize_object_for_logging(arg) for arg in args],
kwargs={k: serialize_object_for_logging(v) for k, v in kwargs.items()})
pre_entry = CallLogValue(timestamp_in=datetime.utcnow(),
timestamp_out=None,
return_value=None)
global_error_context.log[key] = pre_entry
val = func(*args, **kwargs)
# poke the return value of that call in
if global_error_context is not None:
post_entry = CallLogValue(timestamp_in=pre_entry.timestamp_in,
timestamp_out=datetime.utcnow(),
return_value=serialize_object_for_logging(val))
global_error_context.log[key] = post_entry
return val
return wrapper | A decorator that logs a call into the global error context.
This is probably for internal use only. | Below is the the instruction that describes the task:
### Input:
A decorator that logs a call into the global error context.
This is probably for internal use only.
### Response:
def _record_call(func):
"""
A decorator that logs a call into the global error context.
This is probably for internal use only.
"""
@wraps(func)
def wrapper(*args, **kwargs):
global global_error_context
# log a call as about to take place
if global_error_context is not None:
key = CallLogKey(name=func.__name__,
args=[serialize_object_for_logging(arg) for arg in args],
kwargs={k: serialize_object_for_logging(v) for k, v in kwargs.items()})
pre_entry = CallLogValue(timestamp_in=datetime.utcnow(),
timestamp_out=None,
return_value=None)
global_error_context.log[key] = pre_entry
val = func(*args, **kwargs)
# poke the return value of that call in
if global_error_context is not None:
post_entry = CallLogValue(timestamp_in=pre_entry.timestamp_in,
timestamp_out=datetime.utcnow(),
return_value=serialize_object_for_logging(val))
global_error_context.log[key] = post_entry
return val
return wrapper |
def find_clique_embedding(k, m=None, target_graph=None):
"""Find an embedding of a k-sized clique on a Pegasus graph (target_graph).
This clique is found by transforming the Pegasus graph into a K2,2 Chimera graph and then
applying a Chimera clique finding algorithm. The results are then converted back in terms of
Pegasus coordinates.
Note: If target_graph is None, m will be used to generate a m-by-m Pegasus graph. Hence m and
target_graph cannot both be None.
Args:
k (int/iterable/:obj:`networkx.Graph`): Number of members in the requested clique; list of nodes;
a complete graph that you want to embed onto the target_graph
m (int): Number of tiles in a row of a square Pegasus graph
target_graph (:obj:`networkx.Graph`): A Pegasus graph
Returns:
dict: A dictionary representing target_graphs's clique embedding. Each dictionary key
represents a node in said clique. Each corresponding dictionary value is a list of pegasus
coordinates that should be chained together to represent said node.
"""
# Organize parameter values
if target_graph is None:
if m is None:
raise TypeError("m and target_graph cannot both be None.")
target_graph = pegasus_graph(m)
m = target_graph.graph['rows'] # We only support square Pegasus graphs
_, nodes = k
# Deal with differences in ints vs coordinate target_graphs
if target_graph.graph['labels'] == 'nice':
fwd_converter = get_nice_to_pegasus_fn(m = m)
back_converter = get_pegasus_to_nice_fn(m = m)
pegasus_coords = [fwd_converter(*p) for p in target_graph.nodes]
back_translate = lambda embedding: {key: [back_converter(*p) for p in chain]
for key, chain in embedding.items()}
elif target_graph.graph['labels'] == 'int':
# Convert nodes in terms of Pegasus coordinates
coord_converter = pegasus_coordinates(m)
pegasus_coords = map(coord_converter.tuple, target_graph.nodes)
# A function to convert our final coordinate embedding to an ints embedding
back_translate = lambda embedding: {key: list(coord_converter.ints(chain))
for key, chain in embedding.items()}
else:
pegasus_coords = target_graph.nodes
back_translate = lambda embedding: embedding
# Break each Pegasus qubits into six Chimera fragments
# Note: By breaking the graph in this way, you end up with a K2,2 Chimera graph
fragment_tuple = get_tuple_fragmentation_fn(target_graph)
fragments = fragment_tuple(pegasus_coords)
# Create a K2,2 Chimera graph
# Note: 6 * m because Pegasus qubits split into six pieces, so the number of rows and columns
# get multiplied by six
chim_m = 6 * m
chim_graph = chimera_graph(chim_m, t=2, coordinates=True)
# Determine valid fragment couplers in a K2,2 Chimera graph
edges = chim_graph.subgraph(fragments).edges()
# Find clique embedding in K2,2 Chimera graph
embedding_processor = processor(edges, M=chim_m, N=chim_m, L=2, linear=False)
chimera_clique_embedding = embedding_processor.tightestNativeClique(len(nodes))
# Convert chimera fragment embedding in terms of Pegasus coordinates
defragment_tuple = get_tuple_defragmentation_fn(target_graph)
pegasus_clique_embedding = map(defragment_tuple, chimera_clique_embedding)
pegasus_clique_embedding = dict(zip(nodes, pegasus_clique_embedding))
pegasus_clique_embedding = back_translate(pegasus_clique_embedding)
if len(pegasus_clique_embedding) != len(nodes):
raise ValueError("No clique embedding found")
return pegasus_clique_embedding | Find an embedding of a k-sized clique on a Pegasus graph (target_graph).
This clique is found by transforming the Pegasus graph into a K2,2 Chimera graph and then
applying a Chimera clique finding algorithm. The results are then converted back in terms of
Pegasus coordinates.
Note: If target_graph is None, m will be used to generate a m-by-m Pegasus graph. Hence m and
target_graph cannot both be None.
Args:
k (int/iterable/:obj:`networkx.Graph`): Number of members in the requested clique; list of nodes;
a complete graph that you want to embed onto the target_graph
m (int): Number of tiles in a row of a square Pegasus graph
target_graph (:obj:`networkx.Graph`): A Pegasus graph
Returns:
dict: A dictionary representing target_graphs's clique embedding. Each dictionary key
represents a node in said clique. Each corresponding dictionary value is a list of pegasus
coordinates that should be chained together to represent said node. | Below is the the instruction that describes the task:
### Input:
Find an embedding of a k-sized clique on a Pegasus graph (target_graph).
This clique is found by transforming the Pegasus graph into a K2,2 Chimera graph and then
applying a Chimera clique finding algorithm. The results are then converted back in terms of
Pegasus coordinates.
Note: If target_graph is None, m will be used to generate a m-by-m Pegasus graph. Hence m and
target_graph cannot both be None.
Args:
k (int/iterable/:obj:`networkx.Graph`): Number of members in the requested clique; list of nodes;
a complete graph that you want to embed onto the target_graph
m (int): Number of tiles in a row of a square Pegasus graph
target_graph (:obj:`networkx.Graph`): A Pegasus graph
Returns:
dict: A dictionary representing target_graphs's clique embedding. Each dictionary key
represents a node in said clique. Each corresponding dictionary value is a list of pegasus
coordinates that should be chained together to represent said node.
### Response:
def find_clique_embedding(k, m=None, target_graph=None):
"""Find an embedding of a k-sized clique on a Pegasus graph (target_graph).
This clique is found by transforming the Pegasus graph into a K2,2 Chimera graph and then
applying a Chimera clique finding algorithm. The results are then converted back in terms of
Pegasus coordinates.
Note: If target_graph is None, m will be used to generate a m-by-m Pegasus graph. Hence m and
target_graph cannot both be None.
Args:
k (int/iterable/:obj:`networkx.Graph`): Number of members in the requested clique; list of nodes;
a complete graph that you want to embed onto the target_graph
m (int): Number of tiles in a row of a square Pegasus graph
target_graph (:obj:`networkx.Graph`): A Pegasus graph
Returns:
dict: A dictionary representing target_graphs's clique embedding. Each dictionary key
represents a node in said clique. Each corresponding dictionary value is a list of pegasus
coordinates that should be chained together to represent said node.
"""
# Organize parameter values
if target_graph is None:
if m is None:
raise TypeError("m and target_graph cannot both be None.")
target_graph = pegasus_graph(m)
m = target_graph.graph['rows'] # We only support square Pegasus graphs
_, nodes = k
# Deal with differences in ints vs coordinate target_graphs
if target_graph.graph['labels'] == 'nice':
fwd_converter = get_nice_to_pegasus_fn(m = m)
back_converter = get_pegasus_to_nice_fn(m = m)
pegasus_coords = [fwd_converter(*p) for p in target_graph.nodes]
back_translate = lambda embedding: {key: [back_converter(*p) for p in chain]
for key, chain in embedding.items()}
elif target_graph.graph['labels'] == 'int':
# Convert nodes in terms of Pegasus coordinates
coord_converter = pegasus_coordinates(m)
pegasus_coords = map(coord_converter.tuple, target_graph.nodes)
# A function to convert our final coordinate embedding to an ints embedding
back_translate = lambda embedding: {key: list(coord_converter.ints(chain))
for key, chain in embedding.items()}
else:
pegasus_coords = target_graph.nodes
back_translate = lambda embedding: embedding
# Break each Pegasus qubits into six Chimera fragments
# Note: By breaking the graph in this way, you end up with a K2,2 Chimera graph
fragment_tuple = get_tuple_fragmentation_fn(target_graph)
fragments = fragment_tuple(pegasus_coords)
# Create a K2,2 Chimera graph
# Note: 6 * m because Pegasus qubits split into six pieces, so the number of rows and columns
# get multiplied by six
chim_m = 6 * m
chim_graph = chimera_graph(chim_m, t=2, coordinates=True)
# Determine valid fragment couplers in a K2,2 Chimera graph
edges = chim_graph.subgraph(fragments).edges()
# Find clique embedding in K2,2 Chimera graph
embedding_processor = processor(edges, M=chim_m, N=chim_m, L=2, linear=False)
chimera_clique_embedding = embedding_processor.tightestNativeClique(len(nodes))
# Convert chimera fragment embedding in terms of Pegasus coordinates
defragment_tuple = get_tuple_defragmentation_fn(target_graph)
pegasus_clique_embedding = map(defragment_tuple, chimera_clique_embedding)
pegasus_clique_embedding = dict(zip(nodes, pegasus_clique_embedding))
pegasus_clique_embedding = back_translate(pegasus_clique_embedding)
if len(pegasus_clique_embedding) != len(nodes):
raise ValueError("No clique embedding found")
return pegasus_clique_embedding |
def quit(self):
"""
The memcached "quit" command.
This will close the connection with memcached. Calling any other
method on this object will re-open the connection, so this object can
be re-used after quit.
"""
cmd = b"quit\r\n"
self._misc_cmd([cmd], b'quit', True)
self.close() | The memcached "quit" command.
This will close the connection with memcached. Calling any other
method on this object will re-open the connection, so this object can
be re-used after quit. | Below is the the instruction that describes the task:
### Input:
The memcached "quit" command.
This will close the connection with memcached. Calling any other
method on this object will re-open the connection, so this object can
be re-used after quit.
### Response:
def quit(self):
"""
The memcached "quit" command.
This will close the connection with memcached. Calling any other
method on this object will re-open the connection, so this object can
be re-used after quit.
"""
cmd = b"quit\r\n"
self._misc_cmd([cmd], b'quit', True)
self.close() |
def reindex_repo_dev_panel(self, project, repository):
"""
Reindex all of the Jira issues related to this repository, including branches and pull requests.
This automatically happens as part of an upgrade, and calling this manually should only be required
if something unforeseen happens and the index becomes out of sync.
The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource.
:param project:
:param repository:
:return:
"""
url = 'rest/jira-dev/1.0/projects/{projectKey}/repos/{repositorySlug}/reindex'.format(projectKey=project,
repositorySlug=repository)
return self.post(url) | Reindex all of the Jira issues related to this repository, including branches and pull requests.
This automatically happens as part of an upgrade, and calling this manually should only be required
if something unforeseen happens and the index becomes out of sync.
The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource.
:param project:
:param repository:
:return: | Below is the the instruction that describes the task:
### Input:
Reindex all of the Jira issues related to this repository, including branches and pull requests.
This automatically happens as part of an upgrade, and calling this manually should only be required
if something unforeseen happens and the index becomes out of sync.
The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource.
:param project:
:param repository:
:return:
### Response:
def reindex_repo_dev_panel(self, project, repository):
"""
Reindex all of the Jira issues related to this repository, including branches and pull requests.
This automatically happens as part of an upgrade, and calling this manually should only be required
if something unforeseen happens and the index becomes out of sync.
The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource.
:param project:
:param repository:
:return:
"""
url = 'rest/jira-dev/1.0/projects/{projectKey}/repos/{repositorySlug}/reindex'.format(projectKey=project,
repositorySlug=repository)
return self.post(url) |
def _get_ruuvitag_datas(macs=[], search_duratio_sec=None, run_flag=RunFlag(), bt_device=''):
"""
Get data from BluetoothCommunication and handle data encoding.
Args:
macs (list): MAC addresses. Default empty list
search_duratio_sec (int): Search duration in seconds. Default None
run_flag (object): RunFlag object. Function executes while run_flag.running. Default new RunFlag
bt_device (string): Bluetooth device id
Yields:
tuple: MAC and State of RuuviTag sensor data
"""
mac_blacklist = []
start_time = time.time()
data_iter = ble.get_datas(mac_blacklist, bt_device)
for ble_data in data_iter:
# Check duration
if search_duratio_sec and time.time() - start_time > search_duratio_sec:
data_iter.send(StopIteration)
break
# Check running flag
if not run_flag.running:
data_iter.send(StopIteration)
break
# Check MAC whitelist
if macs and not ble_data[0] in macs:
continue
(data_format, data) = RuuviTagSensor.convert_data(ble_data[1])
# Check that encoded data is valid RuuviTag data and it is sensor data
# If data is not valid RuuviTag data add MAC to blacklist
if data is not None:
state = get_decoder(data_format).decode_data(data)
if state is not None:
yield (ble_data[0], state)
else:
mac_blacklist.append(ble_data[0]) | Get data from BluetoothCommunication and handle data encoding.
Args:
macs (list): MAC addresses. Default empty list
search_duratio_sec (int): Search duration in seconds. Default None
run_flag (object): RunFlag object. Function executes while run_flag.running. Default new RunFlag
bt_device (string): Bluetooth device id
Yields:
tuple: MAC and State of RuuviTag sensor data | Below is the the instruction that describes the task:
### Input:
Get data from BluetoothCommunication and handle data encoding.
Args:
macs (list): MAC addresses. Default empty list
search_duratio_sec (int): Search duration in seconds. Default None
run_flag (object): RunFlag object. Function executes while run_flag.running. Default new RunFlag
bt_device (string): Bluetooth device id
Yields:
tuple: MAC and State of RuuviTag sensor data
### Response:
def _get_ruuvitag_datas(macs=[], search_duratio_sec=None, run_flag=RunFlag(), bt_device=''):
"""
Get data from BluetoothCommunication and handle data encoding.
Args:
macs (list): MAC addresses. Default empty list
search_duratio_sec (int): Search duration in seconds. Default None
run_flag (object): RunFlag object. Function executes while run_flag.running. Default new RunFlag
bt_device (string): Bluetooth device id
Yields:
tuple: MAC and State of RuuviTag sensor data
"""
mac_blacklist = []
start_time = time.time()
data_iter = ble.get_datas(mac_blacklist, bt_device)
for ble_data in data_iter:
# Check duration
if search_duratio_sec and time.time() - start_time > search_duratio_sec:
data_iter.send(StopIteration)
break
# Check running flag
if not run_flag.running:
data_iter.send(StopIteration)
break
# Check MAC whitelist
if macs and not ble_data[0] in macs:
continue
(data_format, data) = RuuviTagSensor.convert_data(ble_data[1])
# Check that encoded data is valid RuuviTag data and it is sensor data
# If data is not valid RuuviTag data add MAC to blacklist
if data is not None:
state = get_decoder(data_format).decode_data(data)
if state is not None:
yield (ble_data[0], state)
else:
mac_blacklist.append(ble_data[0]) |
def _prettify_dict(key):
"""Return a human readable format of a key (dict).
Example:
Description: My Wonderful Key
Uid: a54d6de1-922a-4998-ad34-cb838646daaa
Created_At: 2016-09-15T12:42:32
Metadata: owner=me;
Modified_At: 2016-09-15T12:42:32
Value: secret_key=my_secret_key;access_key=my_access_key
Name: aws
"""
assert isinstance(key, dict)
pretty_key = ''
for key, value in key.items():
if isinstance(value, dict):
pretty_value = ''
for k, v in value.items():
pretty_value += '{0}={1};'.format(k, v)
value = pretty_value
pretty_key += '{0:15}{1}\n'.format(key.title() + ':', value)
return pretty_key | Return a human readable format of a key (dict).
Example:
Description: My Wonderful Key
Uid: a54d6de1-922a-4998-ad34-cb838646daaa
Created_At: 2016-09-15T12:42:32
Metadata: owner=me;
Modified_At: 2016-09-15T12:42:32
Value: secret_key=my_secret_key;access_key=my_access_key
Name: aws | Below is the the instruction that describes the task:
### Input:
Return a human readable format of a key (dict).
Example:
Description: My Wonderful Key
Uid: a54d6de1-922a-4998-ad34-cb838646daaa
Created_At: 2016-09-15T12:42:32
Metadata: owner=me;
Modified_At: 2016-09-15T12:42:32
Value: secret_key=my_secret_key;access_key=my_access_key
Name: aws
### Response:
def _prettify_dict(key):
"""Return a human readable format of a key (dict).
Example:
Description: My Wonderful Key
Uid: a54d6de1-922a-4998-ad34-cb838646daaa
Created_At: 2016-09-15T12:42:32
Metadata: owner=me;
Modified_At: 2016-09-15T12:42:32
Value: secret_key=my_secret_key;access_key=my_access_key
Name: aws
"""
assert isinstance(key, dict)
pretty_key = ''
for key, value in key.items():
if isinstance(value, dict):
pretty_value = ''
for k, v in value.items():
pretty_value += '{0}={1};'.format(k, v)
value = pretty_value
pretty_key += '{0:15}{1}\n'.format(key.title() + ':', value)
return pretty_key |
def _get_utxos(self, address, services, **modes):
"""
Using the service fallback engine, get utxos from remote service.
"""
return get_unspent_outputs(
self.crypto, address, services=services,
**modes
) | Using the service fallback engine, get utxos from remote service. | Below is the the instruction that describes the task:
### Input:
Using the service fallback engine, get utxos from remote service.
### Response:
def _get_utxos(self, address, services, **modes):
"""
Using the service fallback engine, get utxos from remote service.
"""
return get_unspent_outputs(
self.crypto, address, services=services,
**modes
) |
def remove_hairs_decorator(fn=None, hairs=HAIRS):
"""
Parametrized decorator wrapping the :func:`remove_hairs` function.
Args:
hairs (str, default HAIRS): List of characters which should be removed.
See :attr:`HAIRS` for details.
"""
def decorator_wrapper(fn):
@wraps(fn)
def decorator(*args, **kwargs):
out = fn(*args, **kwargs)
return remove_hairs(out, hairs)
return decorator
if fn:
return decorator_wrapper(fn)
return decorator_wrapper | Parametrized decorator wrapping the :func:`remove_hairs` function.
Args:
hairs (str, default HAIRS): List of characters which should be removed.
See :attr:`HAIRS` for details. | Below is the the instruction that describes the task:
### Input:
Parametrized decorator wrapping the :func:`remove_hairs` function.
Args:
hairs (str, default HAIRS): List of characters which should be removed.
See :attr:`HAIRS` for details.
### Response:
def remove_hairs_decorator(fn=None, hairs=HAIRS):
"""
Parametrized decorator wrapping the :func:`remove_hairs` function.
Args:
hairs (str, default HAIRS): List of characters which should be removed.
See :attr:`HAIRS` for details.
"""
def decorator_wrapper(fn):
@wraps(fn)
def decorator(*args, **kwargs):
out = fn(*args, **kwargs)
return remove_hairs(out, hairs)
return decorator
if fn:
return decorator_wrapper(fn)
return decorator_wrapper |
def update(cls, **kwargs):
'''
If a record matching the instance id already exists in the database,
update it. If a record matching the instance id does not already exist,
create a new record.
'''
q = cls._get_instance(**{'id': kwargs['id']})
if q:
for k, v in kwargs.items():
setattr(q, k, v)
_action_and_commit(q, session.add)
else:
cls.get_or_create(**kwargs) | If a record matching the instance id already exists in the database,
update it. If a record matching the instance id does not already exist,
create a new record. | Below is the the instruction that describes the task:
### Input:
If a record matching the instance id already exists in the database,
update it. If a record matching the instance id does not already exist,
create a new record.
### Response:
def update(cls, **kwargs):
'''
If a record matching the instance id already exists in the database,
update it. If a record matching the instance id does not already exist,
create a new record.
'''
q = cls._get_instance(**{'id': kwargs['id']})
if q:
for k, v in kwargs.items():
setattr(q, k, v)
_action_and_commit(q, session.add)
else:
cls.get_or_create(**kwargs) |
def _serialize(self, skip_empty=True):
"""
Serialise this instance into JSON-style request data.
Filters out:
* attribute names starting with ``_``
* attribute values that are ``None`` (unless ``skip_empty`` is ``False``)
* attribute values that are empty lists/tuples/dicts (unless ``skip_empty`` is ``False``)
* attribute names in ``Meta.serialize_skip``
* constants set on the model class
Inner :py:class:`Model` instances get :py:meth:`._serialize` called on them.
Date and datetime objects are converted into ISO 8601 strings.
:param bool skip_empty: whether to skip attributes where the value is ``None``
:rtype: dict
"""
skip = set(getattr(self._meta, 'serialize_skip', []))
r = {}
for k, v in self.__dict__.items():
if k.startswith('_'):
continue
elif k in skip:
continue
elif v is None and skip_empty:
continue
elif isinstance(v, (dict, list, tuple, set)) and len(v) == 0 and skip_empty:
continue
else:
r[k] = self._serialize_value(v)
return r | Serialise this instance into JSON-style request data.
Filters out:
* attribute names starting with ``_``
* attribute values that are ``None`` (unless ``skip_empty`` is ``False``)
* attribute values that are empty lists/tuples/dicts (unless ``skip_empty`` is ``False``)
* attribute names in ``Meta.serialize_skip``
* constants set on the model class
Inner :py:class:`Model` instances get :py:meth:`._serialize` called on them.
Date and datetime objects are converted into ISO 8601 strings.
:param bool skip_empty: whether to skip attributes where the value is ``None``
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Serialise this instance into JSON-style request data.
Filters out:
* attribute names starting with ``_``
* attribute values that are ``None`` (unless ``skip_empty`` is ``False``)
* attribute values that are empty lists/tuples/dicts (unless ``skip_empty`` is ``False``)
* attribute names in ``Meta.serialize_skip``
* constants set on the model class
Inner :py:class:`Model` instances get :py:meth:`._serialize` called on them.
Date and datetime objects are converted into ISO 8601 strings.
:param bool skip_empty: whether to skip attributes where the value is ``None``
:rtype: dict
### Response:
def _serialize(self, skip_empty=True):
"""
Serialise this instance into JSON-style request data.
Filters out:
* attribute names starting with ``_``
* attribute values that are ``None`` (unless ``skip_empty`` is ``False``)
* attribute values that are empty lists/tuples/dicts (unless ``skip_empty`` is ``False``)
* attribute names in ``Meta.serialize_skip``
* constants set on the model class
Inner :py:class:`Model` instances get :py:meth:`._serialize` called on them.
Date and datetime objects are converted into ISO 8601 strings.
:param bool skip_empty: whether to skip attributes where the value is ``None``
:rtype: dict
"""
skip = set(getattr(self._meta, 'serialize_skip', []))
r = {}
for k, v in self.__dict__.items():
if k.startswith('_'):
continue
elif k in skip:
continue
elif v is None and skip_empty:
continue
elif isinstance(v, (dict, list, tuple, set)) and len(v) == 0 and skip_empty:
continue
else:
r[k] = self._serialize_value(v)
return r |
def _validate_property_names(class_name, properties):
"""Validate that properties do not have names that may cause problems in the GraphQL schema."""
for property_name in properties:
if not property_name or property_name.startswith(ILLEGAL_PROPERTY_NAME_PREFIXES):
raise IllegalSchemaStateError(u'Class "{}" has a property with an illegal name: '
u'{}'.format(class_name, property_name)) | Validate that properties do not have names that may cause problems in the GraphQL schema. | Below is the the instruction that describes the task:
### Input:
Validate that properties do not have names that may cause problems in the GraphQL schema.
### Response:
def _validate_property_names(class_name, properties):
"""Validate that properties do not have names that may cause problems in the GraphQL schema."""
for property_name in properties:
if not property_name or property_name.startswith(ILLEGAL_PROPERTY_NAME_PREFIXES):
raise IllegalSchemaStateError(u'Class "{}" has a property with an illegal name: '
u'{}'.format(class_name, property_name)) |
def _handle_result_line(self, split_line):
"""
Parses the data line and adds the results to the dictionary.
:param split_line: a split data line to parse
:returns: the current result id and the dictionary of values obtained from the results
"""
values = {}
result_id = ''
if self._ar_keyword:
# Create a new entry in the values dictionary and store the results
values[self._ar_keyword] = {}
for idx, val in enumerate(split_line):
if self._columns[idx].lower() == 'sampleid':
result_id = val
else:
# columns with date in its name store only the date and
# columns with time in its name store date and time.
if val and ('date' in self._columns[idx].lower()
or 'time' in self._columns[idx].lower()):
val = self._date_to_bika_date(val, 'date' in self._columns[idx].lower())
values[self._ar_keyword][self._columns[idx]] = val
values[self._ar_keyword]['DefaultResult'] = 'FinalResult'
return result_id, values | Parses the data line and adds the results to the dictionary.
:param split_line: a split data line to parse
:returns: the current result id and the dictionary of values obtained from the results | Below is the the instruction that describes the task:
### Input:
Parses the data line and adds the results to the dictionary.
:param split_line: a split data line to parse
:returns: the current result id and the dictionary of values obtained from the results
### Response:
def _handle_result_line(self, split_line):
"""
Parses the data line and adds the results to the dictionary.
:param split_line: a split data line to parse
:returns: the current result id and the dictionary of values obtained from the results
"""
values = {}
result_id = ''
if self._ar_keyword:
# Create a new entry in the values dictionary and store the results
values[self._ar_keyword] = {}
for idx, val in enumerate(split_line):
if self._columns[idx].lower() == 'sampleid':
result_id = val
else:
# columns with date in its name store only the date and
# columns with time in its name store date and time.
if val and ('date' in self._columns[idx].lower()
or 'time' in self._columns[idx].lower()):
val = self._date_to_bika_date(val, 'date' in self._columns[idx].lower())
values[self._ar_keyword][self._columns[idx]] = val
values[self._ar_keyword]['DefaultResult'] = 'FinalResult'
return result_id, values |
def filter_db_names(paths: List[str]) -> List[str]:
"""Returns a filtered list of `paths`, where every name matches our format.
Args:
paths: A list of file names.
"""
return [
db_path
for db_path in paths
if VERSION_RE.match(os.path.basename(db_path))
] | Returns a filtered list of `paths`, where every name matches our format.
Args:
paths: A list of file names. | Below is the the instruction that describes the task:
### Input:
Returns a filtered list of `paths`, where every name matches our format.
Args:
paths: A list of file names.
### Response:
def filter_db_names(paths: List[str]) -> List[str]:
"""Returns a filtered list of `paths`, where every name matches our format.
Args:
paths: A list of file names.
"""
return [
db_path
for db_path in paths
if VERSION_RE.match(os.path.basename(db_path))
] |
def copy(self, key=None):
"""
Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key.
"""
other = self.__class__(
self.__iter__(),
self.maxlen,
redis=self.redis,
key=key,
writeback=self.writeback,
)
return other | Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key. | Below is the the instruction that describes the task:
### Input:
Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key.
### Response:
def copy(self, key=None):
"""
Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key.
"""
other = self.__class__(
self.__iter__(),
self.maxlen,
redis=self.redis,
key=key,
writeback=self.writeback,
)
return other |
def intersect_boxes(box1, box2):
"""Takes two pyPdf boxes (such as page.mediaBox) and returns the pyPdf
box which is their intersection."""
if not box1 and not box2: return None
if not box1: return box2
if not box2: return box1
intersect = RectangleObject([0, 0, 0, 0]) # Note [llx,lly,urx,ury] == [l,b,r,t]
intersect.upperRight = (min(box1.upperRight[0], box2.upperRight[0]),
min(box1.upperRight[1], box2.upperRight[1]))
intersect.lowerLeft = (max(box1.lowerLeft[0], box2.lowerLeft[0]),
max(box1.lowerLeft[1], box2.lowerLeft[1]))
return intersect | Takes two pyPdf boxes (such as page.mediaBox) and returns the pyPdf
box which is their intersection. | Below is the the instruction that describes the task:
### Input:
Takes two pyPdf boxes (such as page.mediaBox) and returns the pyPdf
box which is their intersection.
### Response:
def intersect_boxes(box1, box2):
"""Takes two pyPdf boxes (such as page.mediaBox) and returns the pyPdf
box which is their intersection."""
if not box1 and not box2: return None
if not box1: return box2
if not box2: return box1
intersect = RectangleObject([0, 0, 0, 0]) # Note [llx,lly,urx,ury] == [l,b,r,t]
intersect.upperRight = (min(box1.upperRight[0], box2.upperRight[0]),
min(box1.upperRight[1], box2.upperRight[1]))
intersect.lowerLeft = (max(box1.lowerLeft[0], box2.lowerLeft[0]),
max(box1.lowerLeft[1], box2.lowerLeft[1]))
return intersect |
def dict(self):
"""The dict representation of this sentence."""
return {
'raw': self.raw,
'start_index': self.start_index,
'end_index': self.end_index,
'stripped': self.stripped,
'noun_phrases': self.noun_phrases,
'polarity': self.polarity,
'subjectivity': self.subjectivity,
} | The dict representation of this sentence. | Below is the the instruction that describes the task:
### Input:
The dict representation of this sentence.
### Response:
def dict(self):
"""The dict representation of this sentence."""
return {
'raw': self.raw,
'start_index': self.start_index,
'end_index': self.end_index,
'stripped': self.stripped,
'noun_phrases': self.noun_phrases,
'polarity': self.polarity,
'subjectivity': self.subjectivity,
} |
def parse_midi_file_header(self, fp):
"""Read the header of a MIDI file and return a tuple containing the
format type, number of tracks and parsed time division information."""
# Check header
try:
if fp.read(4) != 'MThd':
raise HeaderError('Not a valid MIDI file header. Byte %d.'
% self.bytes_read)
self.bytes_read += 4
except:
raise IOError("Couldn't read from file.")
# Parse chunk size
try:
chunk_size = self.bytes_to_int(fp.read(4))
self.bytes_read += 4
except:
raise IOError("Couldn't read chunk size from file. Byte %d."
% self.bytes_read)
# Expect chunk size to be at least 6
if chunk_size < 6:
return False
try:
format_type = self.bytes_to_int(fp.read(2))
self.bytes_read += 2
if format_type not in [0, 1, 2]:
raise FormatError('%d is not a valid MIDI format.'
% format_type)
except:
raise IOError("Couldn't read format type from file.")
try:
number_of_tracks = self.bytes_to_int(fp.read(2))
time_division = self.parse_time_division(fp.read(2))
self.bytes_read += 4
except:
raise IOError("Couldn't read number of tracks "
"and/or time division from tracks.")
chunk_size -= 6
if chunk_size % 2 == 1:
raise FormatError("Won't parse this.")
fp.read(chunk_size / 2)
self.bytes_read += chunk_size / 2
return (format_type, number_of_tracks, time_division) | Read the header of a MIDI file and return a tuple containing the
format type, number of tracks and parsed time division information. | Below is the the instruction that describes the task:
### Input:
Read the header of a MIDI file and return a tuple containing the
format type, number of tracks and parsed time division information.
### Response:
def parse_midi_file_header(self, fp):
"""Read the header of a MIDI file and return a tuple containing the
format type, number of tracks and parsed time division information."""
# Check header
try:
if fp.read(4) != 'MThd':
raise HeaderError('Not a valid MIDI file header. Byte %d.'
% self.bytes_read)
self.bytes_read += 4
except:
raise IOError("Couldn't read from file.")
# Parse chunk size
try:
chunk_size = self.bytes_to_int(fp.read(4))
self.bytes_read += 4
except:
raise IOError("Couldn't read chunk size from file. Byte %d."
% self.bytes_read)
# Expect chunk size to be at least 6
if chunk_size < 6:
return False
try:
format_type = self.bytes_to_int(fp.read(2))
self.bytes_read += 2
if format_type not in [0, 1, 2]:
raise FormatError('%d is not a valid MIDI format.'
% format_type)
except:
raise IOError("Couldn't read format type from file.")
try:
number_of_tracks = self.bytes_to_int(fp.read(2))
time_division = self.parse_time_division(fp.read(2))
self.bytes_read += 4
except:
raise IOError("Couldn't read number of tracks "
"and/or time division from tracks.")
chunk_size -= 6
if chunk_size % 2 == 1:
raise FormatError("Won't parse this.")
fp.read(chunk_size / 2)
self.bytes_read += chunk_size / 2
return (format_type, number_of_tracks, time_division) |
def get_ordering(self, reverseTime=False):
'''
This method provides the tuple for ordering of querysets. However, this will only
work if the annotations generated by the get_annotations() method above have been
added to the queryset. Otherwise, the use of this ordering tuple will fail because
the appropriate column names will not exist to sort with.
'''
# Reverse ordering can be optionally specified in the view class definition.
reverseTime = getattr(self,'reverse_time_ordering',reverseTime)
timeParameter = '-startTime' if reverseTime is True else 'startTime'
return ('nullParam', 'paramOne', 'paramTwo', timeParameter) | This method provides the tuple for ordering of querysets. However, this will only
work if the annotations generated by the get_annotations() method above have been
added to the queryset. Otherwise, the use of this ordering tuple will fail because
the appropriate column names will not exist to sort with. | Below is the the instruction that describes the task:
### Input:
This method provides the tuple for ordering of querysets. However, this will only
work if the annotations generated by the get_annotations() method above have been
added to the queryset. Otherwise, the use of this ordering tuple will fail because
the appropriate column names will not exist to sort with.
### Response:
def get_ordering(self, reverseTime=False):
'''
This method provides the tuple for ordering of querysets. However, this will only
work if the annotations generated by the get_annotations() method above have been
added to the queryset. Otherwise, the use of this ordering tuple will fail because
the appropriate column names will not exist to sort with.
'''
# Reverse ordering can be optionally specified in the view class definition.
reverseTime = getattr(self,'reverse_time_ordering',reverseTime)
timeParameter = '-startTime' if reverseTime is True else 'startTime'
return ('nullParam', 'paramOne', 'paramTwo', timeParameter) |
def extract_war_version(war):
'''
Extract the version from the war file name. There does not seem to be a
standard for encoding the version into the `war file name`_
.. _`war file name`: https://tomcat.apache.org/tomcat-6.0-doc/deployer-howto.html
Examples:
.. code-block:: bash
/path/salt-2015.8.6.war -> 2015.8.6
/path/V6R2013xD5.war -> None
'''
basename = os.path.basename(war)
war_package = os.path.splitext(basename)[0] # remove '.war'
version = re.findall("-([\\d.-]+)$", war_package) # try semver
return version[0] if version and len(version) == 1 else None | Extract the version from the war file name. There does not seem to be a
standard for encoding the version into the `war file name`_
.. _`war file name`: https://tomcat.apache.org/tomcat-6.0-doc/deployer-howto.html
Examples:
.. code-block:: bash
/path/salt-2015.8.6.war -> 2015.8.6
/path/V6R2013xD5.war -> None | Below is the the instruction that describes the task:
### Input:
Extract the version from the war file name. There does not seem to be a
standard for encoding the version into the `war file name`_
.. _`war file name`: https://tomcat.apache.org/tomcat-6.0-doc/deployer-howto.html
Examples:
.. code-block:: bash
/path/salt-2015.8.6.war -> 2015.8.6
/path/V6R2013xD5.war -> None
### Response:
def extract_war_version(war):
'''
Extract the version from the war file name. There does not seem to be a
standard for encoding the version into the `war file name`_
.. _`war file name`: https://tomcat.apache.org/tomcat-6.0-doc/deployer-howto.html
Examples:
.. code-block:: bash
/path/salt-2015.8.6.war -> 2015.8.6
/path/V6R2013xD5.war -> None
'''
basename = os.path.basename(war)
war_package = os.path.splitext(basename)[0] # remove '.war'
version = re.findall("-([\\d.-]+)$", war_package) # try semver
return version[0] if version and len(version) == 1 else None |
def format_value(value):
"""
Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted
(e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i.
If they do not they will be written as floats.
Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10).
Boolean values indicate true or false. Valid boolean strings for line protocol are
(t, T, true, True, TRUE, f, F, false, False and FALSE).
Strings are text values. All string field values must be surrounded in double-quotes ".
If the string contains a double-quote, the double-quote must be escaped with a backslash, e.g. \".
"""
if isinstance(value, basestring):
value = value.replace('"', '\"')
value = u'"{0}"'.format(value)
elif isinstance(value, bool):
value = str(value)
elif isinstance(value, int):
value = "{0}i".format(value)
elif isinstance(value, float):
value = str(value)
return value | Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted
(e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i.
If they do not they will be written as floats.
Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10).
Boolean values indicate true or false. Valid boolean strings for line protocol are
(t, T, true, True, TRUE, f, F, false, False and FALSE).
Strings are text values. All string field values must be surrounded in double-quotes ".
If the string contains a double-quote, the double-quote must be escaped with a backslash, e.g. \". | Below is the the instruction that describes the task:
### Input:
Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted
(e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i.
If they do not they will be written as floats.
Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10).
Boolean values indicate true or false. Valid boolean strings for line protocol are
(t, T, true, True, TRUE, f, F, false, False and FALSE).
Strings are text values. All string field values must be surrounded in double-quotes ".
If the string contains a double-quote, the double-quote must be escaped with a backslash, e.g. \".
### Response:
def format_value(value):
"""
Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted
(e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i.
If they do not they will be written as floats.
Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10).
Boolean values indicate true or false. Valid boolean strings for line protocol are
(t, T, true, True, TRUE, f, F, false, False and FALSE).
Strings are text values. All string field values must be surrounded in double-quotes ".
If the string contains a double-quote, the double-quote must be escaped with a backslash, e.g. \".
"""
if isinstance(value, basestring):
value = value.replace('"', '\"')
value = u'"{0}"'.format(value)
elif isinstance(value, bool):
value = str(value)
elif isinstance(value, int):
value = "{0}i".format(value)
elif isinstance(value, float):
value = str(value)
return value |
def get_bucket(self, name):
"Find out which bucket a given tag name is in"
for bucket in self:
for k,v in self[bucket].items():
if k == name:
return bucket | Find out which bucket a given tag name is in | Below is the the instruction that describes the task:
### Input:
Find out which bucket a given tag name is in
### Response:
def get_bucket(self, name):
"Find out which bucket a given tag name is in"
for bucket in self:
for k,v in self[bucket].items():
if k == name:
return bucket |
def Query(r, what, fields, qfilter=None):
"""
Retrieves information about resources.
@type what: string
@param what: Resource name, one of L{constants.QR_VIA_RAPI}
@type fields: list of string
@param fields: Requested fields
@type qfilter: None or list
@param qfilter: Query filter
@rtype: string
@return: job id
"""
body = {
"fields": fields,
}
if qfilter is not None:
body["qfilter"] = body["filter"] = qfilter
return r.request("put", "/2/query/%s" % what, content=body) | Retrieves information about resources.
@type what: string
@param what: Resource name, one of L{constants.QR_VIA_RAPI}
@type fields: list of string
@param fields: Requested fields
@type qfilter: None or list
@param qfilter: Query filter
@rtype: string
@return: job id | Below is the the instruction that describes the task:
### Input:
Retrieves information about resources.
@type what: string
@param what: Resource name, one of L{constants.QR_VIA_RAPI}
@type fields: list of string
@param fields: Requested fields
@type qfilter: None or list
@param qfilter: Query filter
@rtype: string
@return: job id
### Response:
def Query(r, what, fields, qfilter=None):
"""
Retrieves information about resources.
@type what: string
@param what: Resource name, one of L{constants.QR_VIA_RAPI}
@type fields: list of string
@param fields: Requested fields
@type qfilter: None or list
@param qfilter: Query filter
@rtype: string
@return: job id
"""
body = {
"fields": fields,
}
if qfilter is not None:
body["qfilter"] = body["filter"] = qfilter
return r.request("put", "/2/query/%s" % what, content=body) |
def add_edge(self, info):
""" Handles adding an Edge to the graph.
"""
if not info.initialized:
return
graph = self._request_graph(info.ui.control)
if graph is None:
return
n_nodes = len(graph.nodes)
IDs = [v.ID for v in graph.nodes]
if n_nodes == 0:
tail_node = Node(ID=make_unique_name("node", IDs))
head_name = make_unique_name("node", IDs + [tail_node.ID])
head_node = Node(ID=head_name)
elif n_nodes == 1:
tail_node = graph.nodes[0]
head_node = Node(ID=make_unique_name("node", IDs))
else:
tail_node = graph.nodes[0]
head_node = graph.nodes[1]
edge = Edge(tail_node, head_node, _nodes=graph.nodes)
retval = edge.edit_traits(parent=info.ui.control, kind="livemodal")
if retval.result:
graph.edges.append(edge) | Handles adding an Edge to the graph. | Below is the the instruction that describes the task:
### Input:
Handles adding an Edge to the graph.
### Response:
def add_edge(self, info):
""" Handles adding an Edge to the graph.
"""
if not info.initialized:
return
graph = self._request_graph(info.ui.control)
if graph is None:
return
n_nodes = len(graph.nodes)
IDs = [v.ID for v in graph.nodes]
if n_nodes == 0:
tail_node = Node(ID=make_unique_name("node", IDs))
head_name = make_unique_name("node", IDs + [tail_node.ID])
head_node = Node(ID=head_name)
elif n_nodes == 1:
tail_node = graph.nodes[0]
head_node = Node(ID=make_unique_name("node", IDs))
else:
tail_node = graph.nodes[0]
head_node = graph.nodes[1]
edge = Edge(tail_node, head_node, _nodes=graph.nodes)
retval = edge.edit_traits(parent=info.ui.control, kind="livemodal")
if retval.result:
graph.edges.append(edge) |
def _subspan(self, s, span, nextspan):
"""Recursively subdivide spans based on a series of rules."""
text = s[span[0]:span[1]]
lowertext = text.lower()
# Skip if only a single character or a split sequence
if span[1] - span[0] < 2 or text in self.SPLIT or text in self.SPLIT_END_WORD or text in self.SPLIT_START_WORD or lowertext in self.NO_SPLIT:
return [span]
# Skip if it looks like URL
if text.startswith('http://') or text.startswith('ftp://') or text.startswith('www.'):
return [span]
# Split full stop at end of final token (allow certain characters to follow) unless ellipsis
if self.split_last_stop and nextspan is None and text not in self.NO_SPLIT_STOP and not text[-3:] == '...':
if text[-1] == '.':
return self._split_span(span, -1)
ind = text.rfind('.')
if ind > -1 and all(t in '\'‘’"“”)]}' for t in text[ind + 1:]):
return self._split_span(span, ind, 1)
# Split off certain sequences at the end of a word
for spl in self.SPLIT_END_WORD:
if text.endswith(spl) and len(text) > len(spl) and text[-len(spl) - 1].isalpha():
return self._split_span(span, -len(spl), 0)
# Split off certain sequences at the start of a word
for spl in self.SPLIT_START_WORD:
if text.startswith(spl) and len(text) > len(spl) and text[-len(spl) - 1].isalpha():
return self._split_span(span, len(spl), 0)
# Split around certain sequences
for spl in self.SPLIT:
ind = text.find(spl)
if ind > -1:
return self._split_span(span, ind, len(spl))
# Split around certain sequences unless followed by a digit
for spl in self.SPLIT_NO_DIGIT:
ind = text.rfind(spl)
if ind > -1 and (len(text) <= ind + len(spl) or not text[ind + len(spl)].isdigit()):
return self._split_span(span, ind, len(spl))
# Characters to split around, but with exceptions
for i, char in enumerate(text):
if char == '-':
before = lowertext[:i]
after = lowertext[i+1:]
# By default we split on hyphens
split = True
if before in self.NO_SPLIT_PREFIX or after in self.NO_SPLIT_SUFFIX:
split = False # Don't split if prefix or suffix in list
elif not before.strip(self.NO_SPLIT_CHARS) or not after.strip(self.NO_SPLIT_CHARS):
split = False # Don't split if prefix or suffix entirely consist of certain characters
if split:
return self._split_span(span, i, 1)
# Split contraction words
for contraction in self.CONTRACTIONS:
if lowertext == contraction[0]:
return self._split_span(span, contraction[1])
return [span] | Recursively subdivide spans based on a series of rules. | Below is the the instruction that describes the task:
### Input:
Recursively subdivide spans based on a series of rules.
### Response:
def _subspan(self, s, span, nextspan):
"""Recursively subdivide spans based on a series of rules."""
text = s[span[0]:span[1]]
lowertext = text.lower()
# Skip if only a single character or a split sequence
if span[1] - span[0] < 2 or text in self.SPLIT or text in self.SPLIT_END_WORD or text in self.SPLIT_START_WORD or lowertext in self.NO_SPLIT:
return [span]
# Skip if it looks like URL
if text.startswith('http://') or text.startswith('ftp://') or text.startswith('www.'):
return [span]
# Split full stop at end of final token (allow certain characters to follow) unless ellipsis
if self.split_last_stop and nextspan is None and text not in self.NO_SPLIT_STOP and not text[-3:] == '...':
if text[-1] == '.':
return self._split_span(span, -1)
ind = text.rfind('.')
if ind > -1 and all(t in '\'‘’"“”)]}' for t in text[ind + 1:]):
return self._split_span(span, ind, 1)
# Split off certain sequences at the end of a word
for spl in self.SPLIT_END_WORD:
if text.endswith(spl) and len(text) > len(spl) and text[-len(spl) - 1].isalpha():
return self._split_span(span, -len(spl), 0)
# Split off certain sequences at the start of a word
for spl in self.SPLIT_START_WORD:
if text.startswith(spl) and len(text) > len(spl) and text[-len(spl) - 1].isalpha():
return self._split_span(span, len(spl), 0)
# Split around certain sequences
for spl in self.SPLIT:
ind = text.find(spl)
if ind > -1:
return self._split_span(span, ind, len(spl))
# Split around certain sequences unless followed by a digit
for spl in self.SPLIT_NO_DIGIT:
ind = text.rfind(spl)
if ind > -1 and (len(text) <= ind + len(spl) or not text[ind + len(spl)].isdigit()):
return self._split_span(span, ind, len(spl))
# Characters to split around, but with exceptions
for i, char in enumerate(text):
if char == '-':
before = lowertext[:i]
after = lowertext[i+1:]
# By default we split on hyphens
split = True
if before in self.NO_SPLIT_PREFIX or after in self.NO_SPLIT_SUFFIX:
split = False # Don't split if prefix or suffix in list
elif not before.strip(self.NO_SPLIT_CHARS) or not after.strip(self.NO_SPLIT_CHARS):
split = False # Don't split if prefix or suffix entirely consist of certain characters
if split:
return self._split_span(span, i, 1)
# Split contraction words
for contraction in self.CONTRACTIONS:
if lowertext == contraction[0]:
return self._split_span(span, contraction[1])
return [span] |
def on_created(self, event):
'''Fired when something's been created'''
if self.trigger != "create":
return
action_input = ActionInput(event, "", self.name)
flows.Global.MESSAGE_DISPATCHER.send_message(action_input) | Fired when something's been created | Below is the the instruction that describes the task:
### Input:
Fired when something's been created
### Response:
def on_created(self, event):
'''Fired when something's been created'''
if self.trigger != "create":
return
action_input = ActionInput(event, "", self.name)
flows.Global.MESSAGE_DISPATCHER.send_message(action_input) |
def derivative_via_diff(cls, ops, kwargs):
"""Implementation of the :meth:`QuantumDerivative.create` interface via the
use of :meth:`QuantumExpression._diff`.
Thus, by having :meth:`.QuantumExpression.diff` delegate to
:meth:`.QuantumDerivative.create`, instead of
:meth:`.QuantumExpression._diff` directly, we get automatic caching of
derivatives
"""
assert len(ops) == 1
op = ops[0]
derivs = kwargs['derivs']
vals = kwargs['vals']
# both `derivs` and `vals` are guaranteed to be tuples, via the conversion
# that's happening in `QuantumDerivative.create`
for (sym, n) in derivs:
if sym.free_symbols.issubset(op.free_symbols):
for k in range(n):
op = op._diff(sym)
else:
return op.__class__._zero
if vals is not None:
try:
# for QuantumDerivative instance
return op.evaluate_at(vals)
except AttributeError:
# for explicit Expression
return op.substitute(vals)
else:
return op | Implementation of the :meth:`QuantumDerivative.create` interface via the
use of :meth:`QuantumExpression._diff`.
Thus, by having :meth:`.QuantumExpression.diff` delegate to
:meth:`.QuantumDerivative.create`, instead of
:meth:`.QuantumExpression._diff` directly, we get automatic caching of
derivatives | Below is the the instruction that describes the task:
### Input:
Implementation of the :meth:`QuantumDerivative.create` interface via the
use of :meth:`QuantumExpression._diff`.
Thus, by having :meth:`.QuantumExpression.diff` delegate to
:meth:`.QuantumDerivative.create`, instead of
:meth:`.QuantumExpression._diff` directly, we get automatic caching of
derivatives
### Response:
def derivative_via_diff(cls, ops, kwargs):
"""Implementation of the :meth:`QuantumDerivative.create` interface via the
use of :meth:`QuantumExpression._diff`.
Thus, by having :meth:`.QuantumExpression.diff` delegate to
:meth:`.QuantumDerivative.create`, instead of
:meth:`.QuantumExpression._diff` directly, we get automatic caching of
derivatives
"""
assert len(ops) == 1
op = ops[0]
derivs = kwargs['derivs']
vals = kwargs['vals']
# both `derivs` and `vals` are guaranteed to be tuples, via the conversion
# that's happening in `QuantumDerivative.create`
for (sym, n) in derivs:
if sym.free_symbols.issubset(op.free_symbols):
for k in range(n):
op = op._diff(sym)
else:
return op.__class__._zero
if vals is not None:
try:
# for QuantumDerivative instance
return op.evaluate_at(vals)
except AttributeError:
# for explicit Expression
return op.substitute(vals)
else:
return op |
def effective_balance(self, address: Address, block_identifier: BlockSpecification) -> Balance:
""" The user's balance with planned withdrawals deducted. """
fn = getattr(self.proxy.contract.functions, 'effectiveBalance')
balance = fn(address).call(block_identifier=block_identifier)
if balance == b'':
raise RuntimeError(f"Call to 'effectiveBalance' returned nothing")
return balance | The user's balance with planned withdrawals deducted. | Below is the the instruction that describes the task:
### Input:
The user's balance with planned withdrawals deducted.
### Response:
def effective_balance(self, address: Address, block_identifier: BlockSpecification) -> Balance:
""" The user's balance with planned withdrawals deducted. """
fn = getattr(self.proxy.contract.functions, 'effectiveBalance')
balance = fn(address).call(block_identifier=block_identifier)
if balance == b'':
raise RuntimeError(f"Call to 'effectiveBalance' returned nothing")
return balance |
def login(self, role, jwt, use_token=True, mount_point=DEFAULT_MOUNT_POINT):
"""Login to retrieve a Vault token via the GCP auth method.
This endpoint takes a signed JSON Web Token (JWT) and a role name for some entity. It verifies the JWT
signature with Google Cloud to authenticate that entity and then authorizes the entity for the given role.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param role: The name of the role against which the login is being attempted.
:type role: str | unicode
:param jwt: A signed JSON web token
:type jwt: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {
'role': role,
'jwt': jwt,
}
api_path = '/v1/auth/{mount_point}/login'.format(mount_point=mount_point)
response = self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
)
return response | Login to retrieve a Vault token via the GCP auth method.
This endpoint takes a signed JSON Web Token (JWT) and a role name for some entity. It verifies the JWT
signature with Google Cloud to authenticate that entity and then authorizes the entity for the given role.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param role: The name of the role against which the login is being attempted.
:type role: str | unicode
:param jwt: A signed JSON web token
:type jwt: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Login to retrieve a Vault token via the GCP auth method.
This endpoint takes a signed JSON Web Token (JWT) and a role name for some entity. It verifies the JWT
signature with Google Cloud to authenticate that entity and then authorizes the entity for the given role.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param role: The name of the role against which the login is being attempted.
:type role: str | unicode
:param jwt: A signed JSON web token
:type jwt: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
### Response:
def login(self, role, jwt, use_token=True, mount_point=DEFAULT_MOUNT_POINT):
"""Login to retrieve a Vault token via the GCP auth method.
This endpoint takes a signed JSON Web Token (JWT) and a role name for some entity. It verifies the JWT
signature with Google Cloud to authenticate that entity and then authorizes the entity for the given role.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param role: The name of the role against which the login is being attempted.
:type role: str | unicode
:param jwt: A signed JSON web token
:type jwt: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {
'role': role,
'jwt': jwt,
}
api_path = '/v1/auth/{mount_point}/login'.format(mount_point=mount_point)
response = self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
)
return response |
def WriteSignedBinary(binary_urn,
binary_content,
private_key,
public_key,
chunk_size = 1024,
token = None):
"""Signs a binary and saves it to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: URN that should serve as a unique identifier for the binary.
binary_content: Contents of the binary, as raw bytes.
private_key: Key that should be used for signing the binary contents.
public_key: Key that should be used to verify the signature generated using
the private key.
chunk_size: Size, in bytes, of the individual blobs that the binary contents
will be split to before saving to the datastore.
token: ACL token to use with the legacy (non-relational) datastore.
"""
if _ShouldUseLegacyDatastore():
collects.GRRSignedBlob.NewFromContent(
binary_content,
binary_urn,
chunk_size=chunk_size,
token=token,
private_key=private_key,
public_key=public_key)
if data_store.RelationalDBEnabled():
blob_references = rdf_objects.BlobReferences()
for chunk_offset in range(0, len(binary_content), chunk_size):
chunk = binary_content[chunk_offset:chunk_offset + chunk_size]
blob_rdf = rdf_crypto.SignedBlob()
blob_rdf.Sign(chunk, private_key, verify_key=public_key)
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
blob_rdf.SerializeToString())
blob_references.items.Append(
rdf_objects.BlobReference(
offset=chunk_offset, size=len(chunk), blob_id=blob_id))
data_store.REL_DB.WriteSignedBinaryReferences(
_SignedBinaryIDFromURN(binary_urn), blob_references) | Signs a binary and saves it to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: URN that should serve as a unique identifier for the binary.
binary_content: Contents of the binary, as raw bytes.
private_key: Key that should be used for signing the binary contents.
public_key: Key that should be used to verify the signature generated using
the private key.
chunk_size: Size, in bytes, of the individual blobs that the binary contents
will be split to before saving to the datastore.
token: ACL token to use with the legacy (non-relational) datastore. | Below is the the instruction that describes the task:
### Input:
Signs a binary and saves it to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: URN that should serve as a unique identifier for the binary.
binary_content: Contents of the binary, as raw bytes.
private_key: Key that should be used for signing the binary contents.
public_key: Key that should be used to verify the signature generated using
the private key.
chunk_size: Size, in bytes, of the individual blobs that the binary contents
will be split to before saving to the datastore.
token: ACL token to use with the legacy (non-relational) datastore.
### Response:
def WriteSignedBinary(binary_urn,
binary_content,
private_key,
public_key,
chunk_size = 1024,
token = None):
"""Signs a binary and saves it to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: URN that should serve as a unique identifier for the binary.
binary_content: Contents of the binary, as raw bytes.
private_key: Key that should be used for signing the binary contents.
public_key: Key that should be used to verify the signature generated using
the private key.
chunk_size: Size, in bytes, of the individual blobs that the binary contents
will be split to before saving to the datastore.
token: ACL token to use with the legacy (non-relational) datastore.
"""
if _ShouldUseLegacyDatastore():
collects.GRRSignedBlob.NewFromContent(
binary_content,
binary_urn,
chunk_size=chunk_size,
token=token,
private_key=private_key,
public_key=public_key)
if data_store.RelationalDBEnabled():
blob_references = rdf_objects.BlobReferences()
for chunk_offset in range(0, len(binary_content), chunk_size):
chunk = binary_content[chunk_offset:chunk_offset + chunk_size]
blob_rdf = rdf_crypto.SignedBlob()
blob_rdf.Sign(chunk, private_key, verify_key=public_key)
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
blob_rdf.SerializeToString())
blob_references.items.Append(
rdf_objects.BlobReference(
offset=chunk_offset, size=len(chunk), blob_id=blob_id))
data_store.REL_DB.WriteSignedBinaryReferences(
_SignedBinaryIDFromURN(binary_urn), blob_references) |
def _create_sbatch(self, ostr):
"""Write sbatch template to output stream
:param ostr: opened file to write to
"""
properties = dict(
sbatch_arguments=self.sbatch_args, hpcbench_command=self.hpcbench_cmd
)
try:
self.sbatch_template.stream(**properties).dump(ostr)
except jinja2.exceptions.UndefinedError:
self.logger.error('Error while generating SBATCH template:')
self.logger.error('%%<--------' * 5)
for line in self.sbatch_template_str.splitlines():
self.logger.error(line)
self.logger.error('%%<--------' * 5)
self.logger.error('Template properties: %s', properties)
raise | Write sbatch template to output stream
:param ostr: opened file to write to | Below is the the instruction that describes the task:
### Input:
Write sbatch template to output stream
:param ostr: opened file to write to
### Response:
def _create_sbatch(self, ostr):
"""Write sbatch template to output stream
:param ostr: opened file to write to
"""
properties = dict(
sbatch_arguments=self.sbatch_args, hpcbench_command=self.hpcbench_cmd
)
try:
self.sbatch_template.stream(**properties).dump(ostr)
except jinja2.exceptions.UndefinedError:
self.logger.error('Error while generating SBATCH template:')
self.logger.error('%%<--------' * 5)
for line in self.sbatch_template_str.splitlines():
self.logger.error(line)
self.logger.error('%%<--------' * 5)
self.logger.error('Template properties: %s', properties)
raise |
def move(self):
"""Create a state change."""
k = random.choice(self.keys)
multiplier = random.choice((0.95, 1.05))
invalid_key = True
while invalid_key:
# make sure bias doesn't exceed 1.0
if k == "bias":
if self.state[k] > 0.909:
k = random.choice(self.keys)
continue
invalid_key = False
newval = self.state[k] * multiplier
self.state[k] = newval | Create a state change. | Below is the the instruction that describes the task:
### Input:
Create a state change.
### Response:
def move(self):
"""Create a state change."""
k = random.choice(self.keys)
multiplier = random.choice((0.95, 1.05))
invalid_key = True
while invalid_key:
# make sure bias doesn't exceed 1.0
if k == "bias":
if self.state[k] > 0.909:
k = random.choice(self.keys)
continue
invalid_key = False
newval = self.state[k] * multiplier
self.state[k] = newval |
def get_logs(self, resource_group, name, tail=1000):
"""
Get the tail from logs of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:param tail: the size of the tail
:type tail: int
:return: A list of log messages
:rtype: list[str]
"""
logs = self.connection.container.list_logs(resource_group, name, name, tail=tail)
return logs.content.splitlines(True) | Get the tail from logs of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:param tail: the size of the tail
:type tail: int
:return: A list of log messages
:rtype: list[str] | Below is the the instruction that describes the task:
### Input:
Get the tail from logs of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:param tail: the size of the tail
:type tail: int
:return: A list of log messages
:rtype: list[str]
### Response:
def get_logs(self, resource_group, name, tail=1000):
"""
Get the tail from logs of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:param tail: the size of the tail
:type tail: int
:return: A list of log messages
:rtype: list[str]
"""
logs = self.connection.container.list_logs(resource_group, name, name, tail=tail)
return logs.content.splitlines(True) |
def epcr_parse(self):
"""
Parse the ePCR outputs
"""
logging.info('Parsing ePCR outputs')
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
# Create a set to store all the unique results
toxin_set = set()
if os.path.isfile(sample[self.analysistype].resultsfile):
with open(sample[self.analysistype].resultsfile) as epcrresults:
for result in epcrresults:
# Only the lines without a # contain results
if "#" not in result:
# Split on \t
data = result.split('\t')
# The subtyping primer pair is the first entry on lines with results
vttype = data[0].split('_')[0]
# Add the verotoxin subtype to the set of detected subtypes
toxin_set.add(vttype)
# Create a string of the entries in the sorted list of toxins joined with ";"
sample[self.analysistype].toxinprofile = ";".join(sorted(list(toxin_set))) if toxin_set else 'ND'
else:
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].toxinprofile = 'NA' | Parse the ePCR outputs | Below is the the instruction that describes the task:
### Input:
Parse the ePCR outputs
### Response:
def epcr_parse(self):
"""
Parse the ePCR outputs
"""
logging.info('Parsing ePCR outputs')
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
# Create a set to store all the unique results
toxin_set = set()
if os.path.isfile(sample[self.analysistype].resultsfile):
with open(sample[self.analysistype].resultsfile) as epcrresults:
for result in epcrresults:
# Only the lines without a # contain results
if "#" not in result:
# Split on \t
data = result.split('\t')
# The subtyping primer pair is the first entry on lines with results
vttype = data[0].split('_')[0]
# Add the verotoxin subtype to the set of detected subtypes
toxin_set.add(vttype)
# Create a string of the entries in the sorted list of toxins joined with ";"
sample[self.analysistype].toxinprofile = ";".join(sorted(list(toxin_set))) if toxin_set else 'ND'
else:
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].toxinprofile = 'NA' |
def assemble_caption(begin_line, begin_index, end_line, end_index, lines):
"""
Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together
"""
# stuff we don't like
label_head = '\\label{'
# reassemble that sucker
if end_line > begin_line:
# our caption spanned multiple lines
caption = lines[begin_line][begin_index:]
for included_line_index in range(begin_line + 1, end_line):
caption = caption + ' ' + lines[included_line_index]
caption = caption + ' ' + lines[end_line][:end_index]
caption = caption.replace('\n', ' ')
caption = caption.replace(' ', ' ')
else:
# it fit on one line
caption = lines[begin_line][begin_index:end_index]
# clean out a label tag, if there is one
label_begin = caption.find(label_head)
if label_begin > -1:
# we know that our caption is only one line, so if there's a label
# tag in it, it will be all on one line. so we make up some args
dummy_start, dummy_start_line, label_end, dummy_end = \
find_open_and_close_braces(0, label_begin, '{', [caption])
caption = caption[:label_begin] + caption[label_end + 1:]
caption = caption.strip()
if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':
caption = caption[1:-1]
return caption | Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together | Below is the the instruction that describes the task:
### Input:
Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together
### Response:
def assemble_caption(begin_line, begin_index, end_line, end_index, lines):
"""
Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together
"""
# stuff we don't like
label_head = '\\label{'
# reassemble that sucker
if end_line > begin_line:
# our caption spanned multiple lines
caption = lines[begin_line][begin_index:]
for included_line_index in range(begin_line + 1, end_line):
caption = caption + ' ' + lines[included_line_index]
caption = caption + ' ' + lines[end_line][:end_index]
caption = caption.replace('\n', ' ')
caption = caption.replace(' ', ' ')
else:
# it fit on one line
caption = lines[begin_line][begin_index:end_index]
# clean out a label tag, if there is one
label_begin = caption.find(label_head)
if label_begin > -1:
# we know that our caption is only one line, so if there's a label
# tag in it, it will be all on one line. so we make up some args
dummy_start, dummy_start_line, label_end, dummy_end = \
find_open_and_close_braces(0, label_begin, '{', [caption])
caption = caption[:label_begin] + caption[label_end + 1:]
caption = caption.strip()
if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':
caption = caption[1:-1]
return caption |
def eval(self, command):
"""
@summary: Evaluate Tcl command.
@param command: command to evaluate.
@return: command output.
"""
# Some operations (like take ownership) may take long time.
con_command_out = self._con.send_cmd(command, timeout=256)
if 'ERROR_SEND_CMD_EXIT_DUE_TO_TIMEOUT' in con_command_out:
raise Exception('{} - command timeout'.format(command))
command = command.replace('\\', '/')
con_command_out = con_command_out.replace('\\', '/')
command = command.replace('(', '\(').replace(')', '\)')
command = command.replace('{', '\{').replace('}', '\}')
m = re.search(command + '(.*)' + '%', con_command_out, re.DOTALL)
command_out = m.group(1).strip()
if 'couldn\'t read file' in command_out or 'RuntimeError' in command_out:
raise Exception(command_out)
return command_out | @summary: Evaluate Tcl command.
@param command: command to evaluate.
@return: command output. | Below is the the instruction that describes the task:
### Input:
@summary: Evaluate Tcl command.
@param command: command to evaluate.
@return: command output.
### Response:
def eval(self, command):
"""
@summary: Evaluate Tcl command.
@param command: command to evaluate.
@return: command output.
"""
# Some operations (like take ownership) may take long time.
con_command_out = self._con.send_cmd(command, timeout=256)
if 'ERROR_SEND_CMD_EXIT_DUE_TO_TIMEOUT' in con_command_out:
raise Exception('{} - command timeout'.format(command))
command = command.replace('\\', '/')
con_command_out = con_command_out.replace('\\', '/')
command = command.replace('(', '\(').replace(')', '\)')
command = command.replace('{', '\{').replace('}', '\}')
m = re.search(command + '(.*)' + '%', con_command_out, re.DOTALL)
command_out = m.group(1).strip()
if 'couldn\'t read file' in command_out or 'RuntimeError' in command_out:
raise Exception(command_out)
return command_out |
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index,
dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result | Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method | Below is the the instruction that describes the task:
### Input:
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
### Response:
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index,
dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result |
def sort_list_of_dicts(lst_of_dct, keys, reverse=False, **sort_args):
"""
Sort list of dicts by one or multiple keys.
If the key is not available, sort these to the end.
:param lst_of_dct: input structure. List of dicts.
:param keys: one or more keys in list
:param reverse:
:param sort_args:
:return:
"""
if type(keys) != list:
keys = [keys]
# dcmdir = lst_of_dct[:]
# lst_of_dct.sort(key=lambda x: [x[key] for key in keys], reverse=reverse, **sort_args)
lst_of_dct.sort(key=lambda x: [((False, x[key]) if key in x else (True, 0)) for key in keys], reverse=reverse, **sort_args)
return lst_of_dct | Sort list of dicts by one or multiple keys.
If the key is not available, sort these to the end.
:param lst_of_dct: input structure. List of dicts.
:param keys: one or more keys in list
:param reverse:
:param sort_args:
:return: | Below is the the instruction that describes the task:
### Input:
Sort list of dicts by one or multiple keys.
If the key is not available, sort these to the end.
:param lst_of_dct: input structure. List of dicts.
:param keys: one or more keys in list
:param reverse:
:param sort_args:
:return:
### Response:
def sort_list_of_dicts(lst_of_dct, keys, reverse=False, **sort_args):
"""
Sort list of dicts by one or multiple keys.
If the key is not available, sort these to the end.
:param lst_of_dct: input structure. List of dicts.
:param keys: one or more keys in list
:param reverse:
:param sort_args:
:return:
"""
if type(keys) != list:
keys = [keys]
# dcmdir = lst_of_dct[:]
# lst_of_dct.sort(key=lambda x: [x[key] for key in keys], reverse=reverse, **sort_args)
lst_of_dct.sort(key=lambda x: [((False, x[key]) if key in x else (True, 0)) for key in keys], reverse=reverse, **sort_args)
return lst_of_dct |
def assert_page_source_contains(self, expected_value, failure_message='Expected page source to contain: "{}"'):
"""
Asserts that the page source contains the string passed in expected_value
"""
assertion = lambda: expected_value in self.driver_wrapper.page_source()
self.webdriver_assert(assertion, unicode(failure_message).format(expected_value)) | Asserts that the page source contains the string passed in expected_value | Below is the the instruction that describes the task:
### Input:
Asserts that the page source contains the string passed in expected_value
### Response:
def assert_page_source_contains(self, expected_value, failure_message='Expected page source to contain: "{}"'):
"""
Asserts that the page source contains the string passed in expected_value
"""
assertion = lambda: expected_value in self.driver_wrapper.page_source()
self.webdriver_assert(assertion, unicode(failure_message).format(expected_value)) |
def is_sqlatype_numeric(coltype: Union[TypeEngine, VisitableType]) -> bool:
"""
Is the SQLAlchemy column type one that inherits from :class:`Numeric`,
such as :class:`Float`, :class:`Decimal`?
"""
coltype = _coltype_to_typeengine(coltype)
return isinstance(coltype, sqltypes.Numeric) | Is the SQLAlchemy column type one that inherits from :class:`Numeric`,
such as :class:`Float`, :class:`Decimal`? | Below is the the instruction that describes the task:
### Input:
Is the SQLAlchemy column type one that inherits from :class:`Numeric`,
such as :class:`Float`, :class:`Decimal`?
### Response:
def is_sqlatype_numeric(coltype: Union[TypeEngine, VisitableType]) -> bool:
"""
Is the SQLAlchemy column type one that inherits from :class:`Numeric`,
such as :class:`Float`, :class:`Decimal`?
"""
coltype = _coltype_to_typeengine(coltype)
return isinstance(coltype, sqltypes.Numeric) |
def register_view(design_doc, full_set=True):
"""Model document decorator to register its design document view::
@register_view('dev_books')
class Book(Document):
__bucket_name__ = 'mybucket'
doc_type = 'book'
structure = {
# snip snip
}
:param design_doc: The name of the design document.
:type design_doc: basestring
:param full_set: Attach full_set param to development views.
:type full_set: bool
"""
def _injector(doc):
if not isinstance(doc, type) or not issubclass(doc, Document):
raise TypeError("Class must be a cbwrapper 'Document' subclass.")
doc.__view_name__ = design_doc
doc.full_set = full_set
ViewSync._documents.add(doc)
return doc
return _injector | Model document decorator to register its design document view::
@register_view('dev_books')
class Book(Document):
__bucket_name__ = 'mybucket'
doc_type = 'book'
structure = {
# snip snip
}
:param design_doc: The name of the design document.
:type design_doc: basestring
:param full_set: Attach full_set param to development views.
:type full_set: bool | Below is the the instruction that describes the task:
### Input:
Model document decorator to register its design document view::
@register_view('dev_books')
class Book(Document):
__bucket_name__ = 'mybucket'
doc_type = 'book'
structure = {
# snip snip
}
:param design_doc: The name of the design document.
:type design_doc: basestring
:param full_set: Attach full_set param to development views.
:type full_set: bool
### Response:
def register_view(design_doc, full_set=True):
"""Model document decorator to register its design document view::
@register_view('dev_books')
class Book(Document):
__bucket_name__ = 'mybucket'
doc_type = 'book'
structure = {
# snip snip
}
:param design_doc: The name of the design document.
:type design_doc: basestring
:param full_set: Attach full_set param to development views.
:type full_set: bool
"""
def _injector(doc):
if not isinstance(doc, type) or not issubclass(doc, Document):
raise TypeError("Class must be a cbwrapper 'Document' subclass.")
doc.__view_name__ = design_doc
doc.full_set = full_set
ViewSync._documents.add(doc)
return doc
return _injector |
def mcc(y, z):
"""Matthews correlation coefficient
"""
tp, tn, fp, fn = contingency_table(y, z)
return (tp * tn - fp * fn) / K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) | Matthews correlation coefficient | Below is the the instruction that describes the task:
### Input:
Matthews correlation coefficient
### Response:
def mcc(y, z):
"""Matthews correlation coefficient
"""
tp, tn, fp, fn = contingency_table(y, z)
return (tp * tn - fp * fn) / K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) |
def regex_extract(arg, pattern, index):
"""
Returns specified index, 0 indexed, from string based on regex pattern
given
Parameters
----------
pattern : string (regular expression string)
index : int, 0 indexed
Returns
-------
extracted : string
"""
return ops.RegexExtract(arg, pattern, index).to_expr() | Returns specified index, 0 indexed, from string based on regex pattern
given
Parameters
----------
pattern : string (regular expression string)
index : int, 0 indexed
Returns
-------
extracted : string | Below is the the instruction that describes the task:
### Input:
Returns specified index, 0 indexed, from string based on regex pattern
given
Parameters
----------
pattern : string (regular expression string)
index : int, 0 indexed
Returns
-------
extracted : string
### Response:
def regex_extract(arg, pattern, index):
"""
Returns specified index, 0 indexed, from string based on regex pattern
given
Parameters
----------
pattern : string (regular expression string)
index : int, 0 indexed
Returns
-------
extracted : string
"""
return ops.RegexExtract(arg, pattern, index).to_expr() |
def integer_id(self):
"""Return the integer id in the last (kind, id) pair, if any.
Returns:
An integer id, or None if the key has a string id or is incomplete.
"""
id = self.id()
if not isinstance(id, (int, long)):
id = None
return id | Return the integer id in the last (kind, id) pair, if any.
Returns:
An integer id, or None if the key has a string id or is incomplete. | Below is the the instruction that describes the task:
### Input:
Return the integer id in the last (kind, id) pair, if any.
Returns:
An integer id, or None if the key has a string id or is incomplete.
### Response:
def integer_id(self):
"""Return the integer id in the last (kind, id) pair, if any.
Returns:
An integer id, or None if the key has a string id or is incomplete.
"""
id = self.id()
if not isinstance(id, (int, long)):
id = None
return id |
def transform(self, m):
"""Replace rectangle with its transformation by matrix m."""
if not len(m) == 6:
raise ValueError("bad sequ. length")
self.x0, self.y0, self.x1, self.y1 = TOOLS._transform_rect(self, m)
return self | Replace rectangle with its transformation by matrix m. | Below is the the instruction that describes the task:
### Input:
Replace rectangle with its transformation by matrix m.
### Response:
def transform(self, m):
"""Replace rectangle with its transformation by matrix m."""
if not len(m) == 6:
raise ValueError("bad sequ. length")
self.x0, self.y0, self.x1, self.y1 = TOOLS._transform_rect(self, m)
return self |
def get_external_logger(name=None, short_name=" ", log_to_file=True):
"""
Get a logger for external modules, whose logging should usually be on a less verbose level.
:param name: Name for logger
:param short_name: Shorthand name for logger
:param log_to_file: Boolean, True if logger should log to a file as well.
:return: Logger
"""
global LOGGERS
loggername = name
logger = _check_existing_logger(loggername, short_name)
if logger is not None:
return logger
logging_config = LOGGING_CONFIG.get(name, LOGGING_CONFIG.get("external"))
filename = logging_config.get("file", {}).get("name", loggername)
if not filename.endswith(".log"):
filename = str(filename) + ".log"
logger = _get_basic_logger(loggername, log_to_file, get_base_logfilename(filename))
cbh = logging.StreamHandler()
cbh.formatter = BenchFormatterWithType(COLOR_ON)
if VERBOSE_LEVEL == 1 and not SILENT_ON:
cbh.setLevel(logging.INFO)
elif VERBOSE_LEVEL >= 2 and not SILENT_ON:
cbh.setLevel(logging.DEBUG)
elif SILENT_ON:
cbh.setLevel(logging.ERROR)
else:
cbh.setLevel(getattr(logging, logging_config.get("level")))
logger.addHandler(cbh)
LOGGERS[loggername] = BenchLoggerAdapter(logger, {"source": short_name})
return LOGGERS[loggername] | Get a logger for external modules, whose logging should usually be on a less verbose level.
:param name: Name for logger
:param short_name: Shorthand name for logger
:param log_to_file: Boolean, True if logger should log to a file as well.
:return: Logger | Below is the the instruction that describes the task:
### Input:
Get a logger for external modules, whose logging should usually be on a less verbose level.
:param name: Name for logger
:param short_name: Shorthand name for logger
:param log_to_file: Boolean, True if logger should log to a file as well.
:return: Logger
### Response:
def get_external_logger(name=None, short_name=" ", log_to_file=True):
"""
Get a logger for external modules, whose logging should usually be on a less verbose level.
:param name: Name for logger
:param short_name: Shorthand name for logger
:param log_to_file: Boolean, True if logger should log to a file as well.
:return: Logger
"""
global LOGGERS
loggername = name
logger = _check_existing_logger(loggername, short_name)
if logger is not None:
return logger
logging_config = LOGGING_CONFIG.get(name, LOGGING_CONFIG.get("external"))
filename = logging_config.get("file", {}).get("name", loggername)
if not filename.endswith(".log"):
filename = str(filename) + ".log"
logger = _get_basic_logger(loggername, log_to_file, get_base_logfilename(filename))
cbh = logging.StreamHandler()
cbh.formatter = BenchFormatterWithType(COLOR_ON)
if VERBOSE_LEVEL == 1 and not SILENT_ON:
cbh.setLevel(logging.INFO)
elif VERBOSE_LEVEL >= 2 and not SILENT_ON:
cbh.setLevel(logging.DEBUG)
elif SILENT_ON:
cbh.setLevel(logging.ERROR)
else:
cbh.setLevel(getattr(logging, logging_config.get("level")))
logger.addHandler(cbh)
LOGGERS[loggername] = BenchLoggerAdapter(logger, {"source": short_name})
return LOGGERS[loggername] |
def fork_exec(args, stdin='', **kwargs):
"""
Do a fork-exec through the subprocess.Popen abstraction in a way
that takes a stdin and return stdout.
"""
as_bytes = isinstance(stdin, bytes)
source = stdin if as_bytes else stdin.encode(locale)
p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, **kwargs)
stdout, stderr = p.communicate(source)
if as_bytes:
return stdout, stderr
return (stdout.decode(locale), stderr.decode(locale)) | Do a fork-exec through the subprocess.Popen abstraction in a way
that takes a stdin and return stdout. | Below is the the instruction that describes the task:
### Input:
Do a fork-exec through the subprocess.Popen abstraction in a way
that takes a stdin and return stdout.
### Response:
def fork_exec(args, stdin='', **kwargs):
"""
Do a fork-exec through the subprocess.Popen abstraction in a way
that takes a stdin and return stdout.
"""
as_bytes = isinstance(stdin, bytes)
source = stdin if as_bytes else stdin.encode(locale)
p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, **kwargs)
stdout, stderr = p.communicate(source)
if as_bytes:
return stdout, stderr
return (stdout.decode(locale), stderr.decode(locale)) |
def show(self, wait = False):
"""Show the window."""
self.tk.deiconify()
self._visible = True
self._modal = wait
if self._modal:
self.tk.grab_set() | Show the window. | Below is the the instruction that describes the task:
### Input:
Show the window.
### Response:
def show(self, wait = False):
"""Show the window."""
self.tk.deiconify()
self._visible = True
self._modal = wait
if self._modal:
self.tk.grab_set() |
def on_usb_device_attach(self, device, error, masked_interfaces, capture_filename):
"""Triggered when a request to capture a USB device (as a result
of matched USB filters or direct call to
:py:func:`IConsole.attach_usb_device` ) has completed.
A @c null @a error object means success, otherwise it
describes a failure.
in device of type :class:`IUSBDevice`
in error of type :class:`IVirtualBoxErrorInfo`
in masked_interfaces of type int
in capture_filename of type str
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
"""
if not isinstance(device, IUSBDevice):
raise TypeError("device can only be an instance of type IUSBDevice")
if not isinstance(error, IVirtualBoxErrorInfo):
raise TypeError("error can only be an instance of type IVirtualBoxErrorInfo")
if not isinstance(masked_interfaces, baseinteger):
raise TypeError("masked_interfaces can only be an instance of type baseinteger")
if not isinstance(capture_filename, basestring):
raise TypeError("capture_filename can only be an instance of type basestring")
self._call("onUSBDeviceAttach",
in_p=[device, error, masked_interfaces, capture_filename]) | Triggered when a request to capture a USB device (as a result
of matched USB filters or direct call to
:py:func:`IConsole.attach_usb_device` ) has completed.
A @c null @a error object means success, otherwise it
describes a failure.
in device of type :class:`IUSBDevice`
in error of type :class:`IVirtualBoxErrorInfo`
in masked_interfaces of type int
in capture_filename of type str
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation. | Below is the the instruction that describes the task:
### Input:
Triggered when a request to capture a USB device (as a result
of matched USB filters or direct call to
:py:func:`IConsole.attach_usb_device` ) has completed.
A @c null @a error object means success, otherwise it
describes a failure.
in device of type :class:`IUSBDevice`
in error of type :class:`IVirtualBoxErrorInfo`
in masked_interfaces of type int
in capture_filename of type str
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
### Response:
def on_usb_device_attach(self, device, error, masked_interfaces, capture_filename):
"""Triggered when a request to capture a USB device (as a result
of matched USB filters or direct call to
:py:func:`IConsole.attach_usb_device` ) has completed.
A @c null @a error object means success, otherwise it
describes a failure.
in device of type :class:`IUSBDevice`
in error of type :class:`IVirtualBoxErrorInfo`
in masked_interfaces of type int
in capture_filename of type str
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
"""
if not isinstance(device, IUSBDevice):
raise TypeError("device can only be an instance of type IUSBDevice")
if not isinstance(error, IVirtualBoxErrorInfo):
raise TypeError("error can only be an instance of type IVirtualBoxErrorInfo")
if not isinstance(masked_interfaces, baseinteger):
raise TypeError("masked_interfaces can only be an instance of type baseinteger")
if not isinstance(capture_filename, basestring):
raise TypeError("capture_filename can only be an instance of type basestring")
self._call("onUSBDeviceAttach",
in_p=[device, error, masked_interfaces, capture_filename]) |
def SetType(self, vtype):
'''
Sets the type, i.e duration of the note. Types are given as keys inside options
:param vtype: str - see keys in options for full list
:return: None, side effects modifying the class
'''
self.val_type = vtype
options = {
"128th": 128,
"64th": 64,
"32nd": 32,
"16th": 16,
"eighth": 8,
"quarter": 4,
"half": 2,
"whole": 1,
"h": 8,
"long": "\\longa",
"breve": "\\breve"}
if vtype in options:
self.duration = options[self.val_type] | Sets the type, i.e duration of the note. Types are given as keys inside options
:param vtype: str - see keys in options for full list
:return: None, side effects modifying the class | Below is the the instruction that describes the task:
### Input:
Sets the type, i.e duration of the note. Types are given as keys inside options
:param vtype: str - see keys in options for full list
:return: None, side effects modifying the class
### Response:
def SetType(self, vtype):
'''
Sets the type, i.e duration of the note. Types are given as keys inside options
:param vtype: str - see keys in options for full list
:return: None, side effects modifying the class
'''
self.val_type = vtype
options = {
"128th": 128,
"64th": 64,
"32nd": 32,
"16th": 16,
"eighth": 8,
"quarter": 4,
"half": 2,
"whole": 1,
"h": 8,
"long": "\\longa",
"breve": "\\breve"}
if vtype in options:
self.duration = options[self.val_type] |
def pool(n=None, dummy=False):
"""
create a multiprocessing pool that responds to interrupts.
"""
if dummy:
from multiprocessing.dummy import Pool
else:
from multiprocessing import Pool
if n is None:
import multiprocessing
n = multiprocessing.cpu_count() - 1
return Pool(n) | create a multiprocessing pool that responds to interrupts. | Below is the the instruction that describes the task:
### Input:
create a multiprocessing pool that responds to interrupts.
### Response:
def pool(n=None, dummy=False):
"""
create a multiprocessing pool that responds to interrupts.
"""
if dummy:
from multiprocessing.dummy import Pool
else:
from multiprocessing import Pool
if n is None:
import multiprocessing
n = multiprocessing.cpu_count() - 1
return Pool(n) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.