code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def positive_nonzero_int(string):
"""Convert string to positive integer greater than zero."""
error_msg = 'Positive non-zero integer required, {string} given.'.format(string=string)
try:
value = int(string)
except ValueError:
raise ArgumentTypeError(error_msg)
if value <= 0:
raise ArgumentTypeError(error_msg)
return value | Convert string to positive integer greater than zero. | Below is the the instruction that describes the task:
### Input:
Convert string to positive integer greater than zero.
### Response:
def positive_nonzero_int(string):
"""Convert string to positive integer greater than zero."""
error_msg = 'Positive non-zero integer required, {string} given.'.format(string=string)
try:
value = int(string)
except ValueError:
raise ArgumentTypeError(error_msg)
if value <= 0:
raise ArgumentTypeError(error_msg)
return value |
def urljoin(*fragments):
"""Concatenate multi part strings into urls."""
# Strip possible already existent final slashes of fragments except for the last one
parts = [fragment.rstrip('/') for fragment in fragments[:len(fragments) - 1]]
parts.append(fragments[-1])
return '/'.join(parts) | Concatenate multi part strings into urls. | Below is the the instruction that describes the task:
### Input:
Concatenate multi part strings into urls.
### Response:
def urljoin(*fragments):
"""Concatenate multi part strings into urls."""
# Strip possible already existent final slashes of fragments except for the last one
parts = [fragment.rstrip('/') for fragment in fragments[:len(fragments) - 1]]
parts.append(fragments[-1])
return '/'.join(parts) |
def _get_stream_for_parsing(self):
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, '_cached_data', None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream | This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3 | Below is the the instruction that describes the task:
### Input:
This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
### Response:
def _get_stream_for_parsing(self):
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, '_cached_data', None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream |
def _get_recurse_set(recurse):
'''
Converse *recurse* definition to a set of strings.
Raises TypeError or ValueError when *recurse* has wrong structure.
'''
if not recurse:
return set()
if not isinstance(recurse, list):
raise TypeError('"recurse" must be formed as a list of strings')
try:
recurse_set = set(recurse)
except TypeError: # non-hashable elements
recurse_set = None
if recurse_set is None or not set(_RECURSE_TYPES) >= recurse_set:
raise ValueError('Types for "recurse" limited to {0}.'.format(
', '.join('"{0}"'.format(rtype) for rtype in _RECURSE_TYPES)))
if 'ignore_files' in recurse_set and 'ignore_dirs' in recurse_set:
raise ValueError('Must not specify "recurse" options "ignore_files"'
' and "ignore_dirs" at the same time.')
return recurse_set | Converse *recurse* definition to a set of strings.
Raises TypeError or ValueError when *recurse* has wrong structure. | Below is the the instruction that describes the task:
### Input:
Converse *recurse* definition to a set of strings.
Raises TypeError or ValueError when *recurse* has wrong structure.
### Response:
def _get_recurse_set(recurse):
'''
Converse *recurse* definition to a set of strings.
Raises TypeError or ValueError when *recurse* has wrong structure.
'''
if not recurse:
return set()
if not isinstance(recurse, list):
raise TypeError('"recurse" must be formed as a list of strings')
try:
recurse_set = set(recurse)
except TypeError: # non-hashable elements
recurse_set = None
if recurse_set is None or not set(_RECURSE_TYPES) >= recurse_set:
raise ValueError('Types for "recurse" limited to {0}.'.format(
', '.join('"{0}"'.format(rtype) for rtype in _RECURSE_TYPES)))
if 'ignore_files' in recurse_set and 'ignore_dirs' in recurse_set:
raise ValueError('Must not specify "recurse" options "ignore_files"'
' and "ignore_dirs" at the same time.')
return recurse_set |
def pairwise_indices(self, alpha=0.05, only_larger=True, hs_dims=None):
"""Indices of columns where p < alpha for column-comparison t-tests
Returns an array of tuples of columns that are significant at p<alpha,
from a series of pairwise t-tests.
Argument both_pairs returns indices striclty on the test statistic. If
False, however, only the index of values *significantly smaller* than
each cell are indicated.
"""
return PairwiseSignificance(
self, alpha=alpha, only_larger=only_larger, hs_dims=hs_dims
).pairwise_indices | Indices of columns where p < alpha for column-comparison t-tests
Returns an array of tuples of columns that are significant at p<alpha,
from a series of pairwise t-tests.
Argument both_pairs returns indices striclty on the test statistic. If
False, however, only the index of values *significantly smaller* than
each cell are indicated. | Below is the the instruction that describes the task:
### Input:
Indices of columns where p < alpha for column-comparison t-tests
Returns an array of tuples of columns that are significant at p<alpha,
from a series of pairwise t-tests.
Argument both_pairs returns indices striclty on the test statistic. If
False, however, only the index of values *significantly smaller* than
each cell are indicated.
### Response:
def pairwise_indices(self, alpha=0.05, only_larger=True, hs_dims=None):
"""Indices of columns where p < alpha for column-comparison t-tests
Returns an array of tuples of columns that are significant at p<alpha,
from a series of pairwise t-tests.
Argument both_pairs returns indices striclty on the test statistic. If
False, however, only the index of values *significantly smaller* than
each cell are indicated.
"""
return PairwiseSignificance(
self, alpha=alpha, only_larger=only_larger, hs_dims=hs_dims
).pairwise_indices |
def put(self, url, body=None, **kwargs):
"""
Send a PUT request.
:param str url: Sub URL for the request. You MUST not specify neither base url nor api version prefix.
:param dict body: (optional) Dictionary of body attributes that will be wrapped with envelope and json encoded.
:param dict **kwargs: (optional) Other parameters which are directly passed to :func:`requests.request`.
:return: Tuple of three elements: (http status code, headers, response - either parsed json or plain text)
:rtype: tuple
"""
return self.request('put', url, body=body, **kwargs) | Send a PUT request.
:param str url: Sub URL for the request. You MUST not specify neither base url nor api version prefix.
:param dict body: (optional) Dictionary of body attributes that will be wrapped with envelope and json encoded.
:param dict **kwargs: (optional) Other parameters which are directly passed to :func:`requests.request`.
:return: Tuple of three elements: (http status code, headers, response - either parsed json or plain text)
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Send a PUT request.
:param str url: Sub URL for the request. You MUST not specify neither base url nor api version prefix.
:param dict body: (optional) Dictionary of body attributes that will be wrapped with envelope and json encoded.
:param dict **kwargs: (optional) Other parameters which are directly passed to :func:`requests.request`.
:return: Tuple of three elements: (http status code, headers, response - either parsed json or plain text)
:rtype: tuple
### Response:
def put(self, url, body=None, **kwargs):
"""
Send a PUT request.
:param str url: Sub URL for the request. You MUST not specify neither base url nor api version prefix.
:param dict body: (optional) Dictionary of body attributes that will be wrapped with envelope and json encoded.
:param dict **kwargs: (optional) Other parameters which are directly passed to :func:`requests.request`.
:return: Tuple of three elements: (http status code, headers, response - either parsed json or plain text)
:rtype: tuple
"""
return self.request('put', url, body=body, **kwargs) |
def XYZ_to_galcenrect(X,Y,Z,Xsun=1.,Zsun=0.,_extra_rot=True):
"""
NAME:
XYZ_to_galcenrect
PURPOSE:
transform XYZ coordinates (wrt Sun) to rectangular Galactocentric coordinates
INPUT:
X - X
Y - Y
Z - Z
Xsun - cylindrical distance to the GC
Zsun - Sun's height above the midplane
_extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition
OUTPUT:
(Xg, Yg, Zg)
HISTORY:
2010-09-24 - Written - Bovy (NYU)
2016-05-12 - Edited to properly take into account the Sun's vertical position; dropped Ysun keyword - Bovy (UofT)
2018-04-18 - Tweaked to be consistent with astropy's Galactocentric frame - Bovy (UofT)
"""
if _extra_rot:
X,Y,Z= nu.dot(galcen_extra_rot,nu.array([X,Y,Z]))
dgc= nu.sqrt(Xsun**2.+Zsun**2.)
costheta, sintheta= Xsun/dgc, Zsun/dgc
return nu.dot(nu.array([[costheta,0.,-sintheta],
[0.,1.,0.],
[sintheta,0.,costheta]]),
nu.array([-X+dgc,Y,nu.sign(Xsun)*Z])).T | NAME:
XYZ_to_galcenrect
PURPOSE:
transform XYZ coordinates (wrt Sun) to rectangular Galactocentric coordinates
INPUT:
X - X
Y - Y
Z - Z
Xsun - cylindrical distance to the GC
Zsun - Sun's height above the midplane
_extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition
OUTPUT:
(Xg, Yg, Zg)
HISTORY:
2010-09-24 - Written - Bovy (NYU)
2016-05-12 - Edited to properly take into account the Sun's vertical position; dropped Ysun keyword - Bovy (UofT)
2018-04-18 - Tweaked to be consistent with astropy's Galactocentric frame - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
XYZ_to_galcenrect
PURPOSE:
transform XYZ coordinates (wrt Sun) to rectangular Galactocentric coordinates
INPUT:
X - X
Y - Y
Z - Z
Xsun - cylindrical distance to the GC
Zsun - Sun's height above the midplane
_extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition
OUTPUT:
(Xg, Yg, Zg)
HISTORY:
2010-09-24 - Written - Bovy (NYU)
2016-05-12 - Edited to properly take into account the Sun's vertical position; dropped Ysun keyword - Bovy (UofT)
2018-04-18 - Tweaked to be consistent with astropy's Galactocentric frame - Bovy (UofT)
### Response:
def XYZ_to_galcenrect(X,Y,Z,Xsun=1.,Zsun=0.,_extra_rot=True):
"""
NAME:
XYZ_to_galcenrect
PURPOSE:
transform XYZ coordinates (wrt Sun) to rectangular Galactocentric coordinates
INPUT:
X - X
Y - Y
Z - Z
Xsun - cylindrical distance to the GC
Zsun - Sun's height above the midplane
_extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition
OUTPUT:
(Xg, Yg, Zg)
HISTORY:
2010-09-24 - Written - Bovy (NYU)
2016-05-12 - Edited to properly take into account the Sun's vertical position; dropped Ysun keyword - Bovy (UofT)
2018-04-18 - Tweaked to be consistent with astropy's Galactocentric frame - Bovy (UofT)
"""
if _extra_rot:
X,Y,Z= nu.dot(galcen_extra_rot,nu.array([X,Y,Z]))
dgc= nu.sqrt(Xsun**2.+Zsun**2.)
costheta, sintheta= Xsun/dgc, Zsun/dgc
return nu.dot(nu.array([[costheta,0.,-sintheta],
[0.,1.,0.],
[sintheta,0.,costheta]]),
nu.array([-X+dgc,Y,nu.sign(Xsun)*Z])).T |
def walk(textRoot, currentTag, level, prefix=None, postfix=None, unwrapUntilPara=False):
'''
.. note::
This method does not cover all possible input doxygen types! This means that
when an unsupported / unrecognized doxygen tag appears in the xml listing, the
**raw xml will appear on the file page being documented**. This traverser is
greedily designed to work for what testing revealed as the *bare minimum*
required. **Please** see the :ref:`Doxygen ALIASES <doxygen_aliases>` section
for how to bypass invalid documentation coming form Exhale.
Recursive traverser method to parse the input parsed xml tree and convert the nodes
into raw reStructuredText from the input doxygen format. **Not all doxygen markup
types are handled**. The current supported doxygen xml markup tags are:
- ``para``
- ``orderedlist``
- ``itemizedlist``
- ``verbatim`` (specifically: ``embed:rst:leading-asterisk``)
- ``formula``
- ``ref``
- ``emphasis`` (e.g., using `em`_)
- ``computeroutput`` (e.g., using `c`_)
- ``bold`` (e.g., using `b`_)
.. _em: http://www.doxygen.nl/manual/commands.html#cmdem
.. _c: http://www.doxygen.nl/manual/commands.html#cmdc
.. _b: http://www.doxygen.nl/manual/commands.html#cmdb
The goal of this method is to "explode" input ``xml`` data into raw reStructuredText
to put at the top of the file pages. Wielding beautiful soup, this essentially
means that you need to expand every non ``para`` tag into a ``para``. So if an
ordered list appears in the xml, then the raw listing must be built up from the
child nodes. After this is finished, though, the :meth:`bs4.BeautifulSoup.get_text`
method will happily remove all remaining ``para`` tags to produce the final
reStructuredText **provided that** the original "exploded" tags (such as the ordered
list definition and its ``listitem`` children) have been *removed* from the soup.
**Parameters**
``textRoot`` (:class:`~exhale.graph.ExhaleRoot`)
The text root object that is calling this method. This parameter is
necessary in order to retrieve / convert the doxygen ``\\ref SomeClass`` tag
and link it to the appropriate node page. The ``textRoot`` object is not
modified by executing this method.
``currentTag`` (:class:`bs4.element.Tag`)
The current xml tag being processed, either to have its contents directly
modified or unraveled.
``level`` (int)
.. warning::
This variable does **not** represent "recursion depth" (as one would
typically see with a variable like this)!
The **block** level of indentation currently being parsed. Because we are
parsing a tree in order to generate raw reStructuredText code, we need to
maintain a notion of "block level". This means tracking when there are
nested structures such as a list within a list:
.. code-block:: rst
1. This is an outer ordered list.
- There is a nested unordered list.
- It is a child of the outer list.
2. This is another item in the outer list.
The outer ordered (numbers ``1`` and ``2``) list is at indentation level
``0``, and the inner unordered (``-``) list is at indentation level ``1``.
Meaning that level is used as
.. code-block:: py
indent = " " * level
# ... later ...
some_text = "\\n{indent}{text}".format(indent=indent, text=some_text)
to indent the ordered / unordered lists accordingly.
'''
if not currentTag:
return
if prefix:
currentTag.insert_before(prefix)
if postfix:
currentTag.insert_after(postfix)
children = currentTag.findChildren(recursive=False)
indent = " " * level
if currentTag.name == "orderedlist":
idx = 1
for child in children:
walk(textRoot, child, level + 1, "\n{0}{1}. ".format(indent, idx), None, True)
idx += 1
child.unwrap()
currentTag.unwrap()
elif currentTag.name == "itemizedlist":
for child in children:
walk(textRoot, child, level + 1, "\n{0}- ".format(indent), None, True)
child.unwrap()
currentTag.unwrap()
elif currentTag.name == "verbatim":
# TODO: find relevant section in breathe.sphinxrenderer and include the versions
# for both leading /// as well as just plain embed:rst.
leading_asterisk = "embed:rst:leading-asterisk\n*"
if currentTag.string.startswith(leading_asterisk):
cont = currentTag.string.replace(leading_asterisk, "")
cont = textwrap.dedent(cont.replace("\n*", "\n"))
currentTag.string = cont
elif currentTag.name == "formula":
currentTag.string = ":math:`{0}`".format(currentTag.string[1:-1])
elif currentTag.name == "ref":
signal = None
if "refid" not in currentTag.attrs:
signal = "No 'refid' in `ref` tag attributes of file documentation. Attributes were: {0}".format(
currentTag.attrs
)
else:
refid = currentTag.attrs["refid"]
if refid not in textRoot.node_by_refid:
signal = "Found unknown 'refid' of [{0}] in file level documentation.".format(refid)
else:
currentTag.string = ":ref:`{0}`".format(textRoot.node_by_refid[refid].link_name)
if signal:
# << verboseBuild
utils.verbose_log(signal, utils.AnsiColors.BOLD_YELLOW)
elif currentTag.name == "emphasis":
currentTag.string = "*{0}*".format(currentTag.string)
elif currentTag.name == "computeroutput":
currentTag.string = "``{0}``".format(currentTag.string)
elif currentTag.name == "bold":
currentTag.string = "**{0}**".format(currentTag.string)
else:
ctr = 0
for child in children:
c_prefix = None
c_postfix = None
if ctr > 0 and child.name == "para":
c_prefix = "\n{0}".format(indent)
walk(textRoot, child, level, c_prefix, c_postfix)
ctr += 1 | .. note::
This method does not cover all possible input doxygen types! This means that
when an unsupported / unrecognized doxygen tag appears in the xml listing, the
**raw xml will appear on the file page being documented**. This traverser is
greedily designed to work for what testing revealed as the *bare minimum*
required. **Please** see the :ref:`Doxygen ALIASES <doxygen_aliases>` section
for how to bypass invalid documentation coming form Exhale.
Recursive traverser method to parse the input parsed xml tree and convert the nodes
into raw reStructuredText from the input doxygen format. **Not all doxygen markup
types are handled**. The current supported doxygen xml markup tags are:
- ``para``
- ``orderedlist``
- ``itemizedlist``
- ``verbatim`` (specifically: ``embed:rst:leading-asterisk``)
- ``formula``
- ``ref``
- ``emphasis`` (e.g., using `em`_)
- ``computeroutput`` (e.g., using `c`_)
- ``bold`` (e.g., using `b`_)
.. _em: http://www.doxygen.nl/manual/commands.html#cmdem
.. _c: http://www.doxygen.nl/manual/commands.html#cmdc
.. _b: http://www.doxygen.nl/manual/commands.html#cmdb
The goal of this method is to "explode" input ``xml`` data into raw reStructuredText
to put at the top of the file pages. Wielding beautiful soup, this essentially
means that you need to expand every non ``para`` tag into a ``para``. So if an
ordered list appears in the xml, then the raw listing must be built up from the
child nodes. After this is finished, though, the :meth:`bs4.BeautifulSoup.get_text`
method will happily remove all remaining ``para`` tags to produce the final
reStructuredText **provided that** the original "exploded" tags (such as the ordered
list definition and its ``listitem`` children) have been *removed* from the soup.
**Parameters**
``textRoot`` (:class:`~exhale.graph.ExhaleRoot`)
The text root object that is calling this method. This parameter is
necessary in order to retrieve / convert the doxygen ``\\ref SomeClass`` tag
and link it to the appropriate node page. The ``textRoot`` object is not
modified by executing this method.
``currentTag`` (:class:`bs4.element.Tag`)
The current xml tag being processed, either to have its contents directly
modified or unraveled.
``level`` (int)
.. warning::
This variable does **not** represent "recursion depth" (as one would
typically see with a variable like this)!
The **block** level of indentation currently being parsed. Because we are
parsing a tree in order to generate raw reStructuredText code, we need to
maintain a notion of "block level". This means tracking when there are
nested structures such as a list within a list:
.. code-block:: rst
1. This is an outer ordered list.
- There is a nested unordered list.
- It is a child of the outer list.
2. This is another item in the outer list.
The outer ordered (numbers ``1`` and ``2``) list is at indentation level
``0``, and the inner unordered (``-``) list is at indentation level ``1``.
Meaning that level is used as
.. code-block:: py
indent = " " * level
# ... later ...
some_text = "\\n{indent}{text}".format(indent=indent, text=some_text)
to indent the ordered / unordered lists accordingly. | Below is the the instruction that describes the task:
### Input:
.. note::
This method does not cover all possible input doxygen types! This means that
when an unsupported / unrecognized doxygen tag appears in the xml listing, the
**raw xml will appear on the file page being documented**. This traverser is
greedily designed to work for what testing revealed as the *bare minimum*
required. **Please** see the :ref:`Doxygen ALIASES <doxygen_aliases>` section
for how to bypass invalid documentation coming form Exhale.
Recursive traverser method to parse the input parsed xml tree and convert the nodes
into raw reStructuredText from the input doxygen format. **Not all doxygen markup
types are handled**. The current supported doxygen xml markup tags are:
- ``para``
- ``orderedlist``
- ``itemizedlist``
- ``verbatim`` (specifically: ``embed:rst:leading-asterisk``)
- ``formula``
- ``ref``
- ``emphasis`` (e.g., using `em`_)
- ``computeroutput`` (e.g., using `c`_)
- ``bold`` (e.g., using `b`_)
.. _em: http://www.doxygen.nl/manual/commands.html#cmdem
.. _c: http://www.doxygen.nl/manual/commands.html#cmdc
.. _b: http://www.doxygen.nl/manual/commands.html#cmdb
The goal of this method is to "explode" input ``xml`` data into raw reStructuredText
to put at the top of the file pages. Wielding beautiful soup, this essentially
means that you need to expand every non ``para`` tag into a ``para``. So if an
ordered list appears in the xml, then the raw listing must be built up from the
child nodes. After this is finished, though, the :meth:`bs4.BeautifulSoup.get_text`
method will happily remove all remaining ``para`` tags to produce the final
reStructuredText **provided that** the original "exploded" tags (such as the ordered
list definition and its ``listitem`` children) have been *removed* from the soup.
**Parameters**
``textRoot`` (:class:`~exhale.graph.ExhaleRoot`)
The text root object that is calling this method. This parameter is
necessary in order to retrieve / convert the doxygen ``\\ref SomeClass`` tag
and link it to the appropriate node page. The ``textRoot`` object is not
modified by executing this method.
``currentTag`` (:class:`bs4.element.Tag`)
The current xml tag being processed, either to have its contents directly
modified or unraveled.
``level`` (int)
.. warning::
This variable does **not** represent "recursion depth" (as one would
typically see with a variable like this)!
The **block** level of indentation currently being parsed. Because we are
parsing a tree in order to generate raw reStructuredText code, we need to
maintain a notion of "block level". This means tracking when there are
nested structures such as a list within a list:
.. code-block:: rst
1. This is an outer ordered list.
- There is a nested unordered list.
- It is a child of the outer list.
2. This is another item in the outer list.
The outer ordered (numbers ``1`` and ``2``) list is at indentation level
``0``, and the inner unordered (``-``) list is at indentation level ``1``.
Meaning that level is used as
.. code-block:: py
indent = " " * level
# ... later ...
some_text = "\\n{indent}{text}".format(indent=indent, text=some_text)
to indent the ordered / unordered lists accordingly.
### Response:
def walk(textRoot, currentTag, level, prefix=None, postfix=None, unwrapUntilPara=False):
'''
.. note::
This method does not cover all possible input doxygen types! This means that
when an unsupported / unrecognized doxygen tag appears in the xml listing, the
**raw xml will appear on the file page being documented**. This traverser is
greedily designed to work for what testing revealed as the *bare minimum*
required. **Please** see the :ref:`Doxygen ALIASES <doxygen_aliases>` section
for how to bypass invalid documentation coming form Exhale.
Recursive traverser method to parse the input parsed xml tree and convert the nodes
into raw reStructuredText from the input doxygen format. **Not all doxygen markup
types are handled**. The current supported doxygen xml markup tags are:
- ``para``
- ``orderedlist``
- ``itemizedlist``
- ``verbatim`` (specifically: ``embed:rst:leading-asterisk``)
- ``formula``
- ``ref``
- ``emphasis`` (e.g., using `em`_)
- ``computeroutput`` (e.g., using `c`_)
- ``bold`` (e.g., using `b`_)
.. _em: http://www.doxygen.nl/manual/commands.html#cmdem
.. _c: http://www.doxygen.nl/manual/commands.html#cmdc
.. _b: http://www.doxygen.nl/manual/commands.html#cmdb
The goal of this method is to "explode" input ``xml`` data into raw reStructuredText
to put at the top of the file pages. Wielding beautiful soup, this essentially
means that you need to expand every non ``para`` tag into a ``para``. So if an
ordered list appears in the xml, then the raw listing must be built up from the
child nodes. After this is finished, though, the :meth:`bs4.BeautifulSoup.get_text`
method will happily remove all remaining ``para`` tags to produce the final
reStructuredText **provided that** the original "exploded" tags (such as the ordered
list definition and its ``listitem`` children) have been *removed* from the soup.
**Parameters**
``textRoot`` (:class:`~exhale.graph.ExhaleRoot`)
The text root object that is calling this method. This parameter is
necessary in order to retrieve / convert the doxygen ``\\ref SomeClass`` tag
and link it to the appropriate node page. The ``textRoot`` object is not
modified by executing this method.
``currentTag`` (:class:`bs4.element.Tag`)
The current xml tag being processed, either to have its contents directly
modified or unraveled.
``level`` (int)
.. warning::
This variable does **not** represent "recursion depth" (as one would
typically see with a variable like this)!
The **block** level of indentation currently being parsed. Because we are
parsing a tree in order to generate raw reStructuredText code, we need to
maintain a notion of "block level". This means tracking when there are
nested structures such as a list within a list:
.. code-block:: rst
1. This is an outer ordered list.
- There is a nested unordered list.
- It is a child of the outer list.
2. This is another item in the outer list.
The outer ordered (numbers ``1`` and ``2``) list is at indentation level
``0``, and the inner unordered (``-``) list is at indentation level ``1``.
Meaning that level is used as
.. code-block:: py
indent = " " * level
# ... later ...
some_text = "\\n{indent}{text}".format(indent=indent, text=some_text)
to indent the ordered / unordered lists accordingly.
'''
if not currentTag:
return
if prefix:
currentTag.insert_before(prefix)
if postfix:
currentTag.insert_after(postfix)
children = currentTag.findChildren(recursive=False)
indent = " " * level
if currentTag.name == "orderedlist":
idx = 1
for child in children:
walk(textRoot, child, level + 1, "\n{0}{1}. ".format(indent, idx), None, True)
idx += 1
child.unwrap()
currentTag.unwrap()
elif currentTag.name == "itemizedlist":
for child in children:
walk(textRoot, child, level + 1, "\n{0}- ".format(indent), None, True)
child.unwrap()
currentTag.unwrap()
elif currentTag.name == "verbatim":
# TODO: find relevant section in breathe.sphinxrenderer and include the versions
# for both leading /// as well as just plain embed:rst.
leading_asterisk = "embed:rst:leading-asterisk\n*"
if currentTag.string.startswith(leading_asterisk):
cont = currentTag.string.replace(leading_asterisk, "")
cont = textwrap.dedent(cont.replace("\n*", "\n"))
currentTag.string = cont
elif currentTag.name == "formula":
currentTag.string = ":math:`{0}`".format(currentTag.string[1:-1])
elif currentTag.name == "ref":
signal = None
if "refid" not in currentTag.attrs:
signal = "No 'refid' in `ref` tag attributes of file documentation. Attributes were: {0}".format(
currentTag.attrs
)
else:
refid = currentTag.attrs["refid"]
if refid not in textRoot.node_by_refid:
signal = "Found unknown 'refid' of [{0}] in file level documentation.".format(refid)
else:
currentTag.string = ":ref:`{0}`".format(textRoot.node_by_refid[refid].link_name)
if signal:
# << verboseBuild
utils.verbose_log(signal, utils.AnsiColors.BOLD_YELLOW)
elif currentTag.name == "emphasis":
currentTag.string = "*{0}*".format(currentTag.string)
elif currentTag.name == "computeroutput":
currentTag.string = "``{0}``".format(currentTag.string)
elif currentTag.name == "bold":
currentTag.string = "**{0}**".format(currentTag.string)
else:
ctr = 0
for child in children:
c_prefix = None
c_postfix = None
if ctr > 0 and child.name == "para":
c_prefix = "\n{0}".format(indent)
walk(textRoot, child, level, c_prefix, c_postfix)
ctr += 1 |
def list_dscp_marking_rules(self, policy_id,
retrieve_all=True, **_params):
"""Fetches a list of all DSCP marking rules for the given policy."""
return self.list('dscp_marking_rules',
self.qos_dscp_marking_rules_path % policy_id,
retrieve_all, **_params) | Fetches a list of all DSCP marking rules for the given policy. | Below is the the instruction that describes the task:
### Input:
Fetches a list of all DSCP marking rules for the given policy.
### Response:
def list_dscp_marking_rules(self, policy_id,
retrieve_all=True, **_params):
"""Fetches a list of all DSCP marking rules for the given policy."""
return self.list('dscp_marking_rules',
self.qos_dscp_marking_rules_path % policy_id,
retrieve_all, **_params) |
def commit(self, *args, **kwargs):
"""Store changes on current instance in database and index it."""
return super(Deposit, self).commit(*args, **kwargs) | Store changes on current instance in database and index it. | Below is the the instruction that describes the task:
### Input:
Store changes on current instance in database and index it.
### Response:
def commit(self, *args, **kwargs):
"""Store changes on current instance in database and index it."""
return super(Deposit, self).commit(*args, **kwargs) |
def tokenize(text=""):
"""
Tokenize text into words.
@param text: the input text.
@type text: unicode.
@return: list of words.
@rtype: list.
"""
if text == '':
return []
else:
# split tokens
mylist = TOKEN_PATTERN.split(text)
# don't remove newline \n
mylist = [TOKEN_REPLACE.sub('', x) for x in mylist if x]
# remove empty substring
mylist = [x for x in mylist if x]
return mylist | Tokenize text into words.
@param text: the input text.
@type text: unicode.
@return: list of words.
@rtype: list. | Below is the the instruction that describes the task:
### Input:
Tokenize text into words.
@param text: the input text.
@type text: unicode.
@return: list of words.
@rtype: list.
### Response:
def tokenize(text=""):
"""
Tokenize text into words.
@param text: the input text.
@type text: unicode.
@return: list of words.
@rtype: list.
"""
if text == '':
return []
else:
# split tokens
mylist = TOKEN_PATTERN.split(text)
# don't remove newline \n
mylist = [TOKEN_REPLACE.sub('', x) for x in mylist if x]
# remove empty substring
mylist = [x for x in mylist if x]
return mylist |
async def pause(self, *, device: Optional[SomeDevice] = None):
"""Pause playback on the user’s account.
Parameters
----------
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
If not supplied, the user’s currently active device is the target.
"""
await self._user.http.pause_playback(device_id=str(device)) | Pause playback on the user’s account.
Parameters
----------
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
If not supplied, the user’s currently active device is the target. | Below is the the instruction that describes the task:
### Input:
Pause playback on the user’s account.
Parameters
----------
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
If not supplied, the user’s currently active device is the target.
### Response:
async def pause(self, *, device: Optional[SomeDevice] = None):
"""Pause playback on the user’s account.
Parameters
----------
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
If not supplied, the user’s currently active device is the target.
"""
await self._user.http.pause_playback(device_id=str(device)) |
def include_flags(self, arch):
'''Returns a string with the include folders'''
openssl_includes = join(self.get_build_dir(arch.arch), 'include')
return (' -I' + openssl_includes +
' -I' + join(openssl_includes, 'internal') +
' -I' + join(openssl_includes, 'openssl')) | Returns a string with the include folders | Below is the the instruction that describes the task:
### Input:
Returns a string with the include folders
### Response:
def include_flags(self, arch):
'''Returns a string with the include folders'''
openssl_includes = join(self.get_build_dir(arch.arch), 'include')
return (' -I' + openssl_includes +
' -I' + join(openssl_includes, 'internal') +
' -I' + join(openssl_includes, 'openssl')) |
def get_cid():
"""Return the currently set correlation id (if any).
If no correlation id has been set and ``CID_GENERATE`` is enabled
in the settings, a new correlation id is set and returned.
FIXME (dbaty): in version 2, just `return getattr(_thread_locals, 'CID', None)`
We want the simplest thing here and let `generate_new_cid` do the job.
"""
cid = getattr(_thread_locals, 'CID', None)
if cid is None and getattr(settings, 'CID_GENERATE', False):
cid = str(uuid.uuid4())
set_cid(cid)
return cid | Return the currently set correlation id (if any).
If no correlation id has been set and ``CID_GENERATE`` is enabled
in the settings, a new correlation id is set and returned.
FIXME (dbaty): in version 2, just `return getattr(_thread_locals, 'CID', None)`
We want the simplest thing here and let `generate_new_cid` do the job. | Below is the the instruction that describes the task:
### Input:
Return the currently set correlation id (if any).
If no correlation id has been set and ``CID_GENERATE`` is enabled
in the settings, a new correlation id is set and returned.
FIXME (dbaty): in version 2, just `return getattr(_thread_locals, 'CID', None)`
We want the simplest thing here and let `generate_new_cid` do the job.
### Response:
def get_cid():
"""Return the currently set correlation id (if any).
If no correlation id has been set and ``CID_GENERATE`` is enabled
in the settings, a new correlation id is set and returned.
FIXME (dbaty): in version 2, just `return getattr(_thread_locals, 'CID', None)`
We want the simplest thing here and let `generate_new_cid` do the job.
"""
cid = getattr(_thread_locals, 'CID', None)
if cid is None and getattr(settings, 'CID_GENERATE', False):
cid = str(uuid.uuid4())
set_cid(cid)
return cid |
def ec2_security_group_security_group_id(self, lookup, default=None):
"""
Args:
lookup: the friendly name of a security group to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
Security group ID if target found or default/None if no match
"""
try:
response = EFAwsResolver.__CLIENTS["ec2"].describe_security_groups(Filters=[{
'Name':'group-name', 'Values':[lookup]
}])
except:
return default
if len(response["SecurityGroups"]) > 0:
return response["SecurityGroups"][0]["GroupId"]
else:
return default | Args:
lookup: the friendly name of a security group to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
Security group ID if target found or default/None if no match | Below is the the instruction that describes the task:
### Input:
Args:
lookup: the friendly name of a security group to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
Security group ID if target found or default/None if no match
### Response:
def ec2_security_group_security_group_id(self, lookup, default=None):
"""
Args:
lookup: the friendly name of a security group to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
Security group ID if target found or default/None if no match
"""
try:
response = EFAwsResolver.__CLIENTS["ec2"].describe_security_groups(Filters=[{
'Name':'group-name', 'Values':[lookup]
}])
except:
return default
if len(response["SecurityGroups"]) > 0:
return response["SecurityGroups"][0]["GroupId"]
else:
return default |
def _trim_front(strings):
"""
Trims zeros and decimal points.
"""
trimmed = strings
while len(strings) > 0 and all(x[0] == ' ' for x in trimmed):
trimmed = [x[1:] for x in trimmed]
return trimmed | Trims zeros and decimal points. | Below is the the instruction that describes the task:
### Input:
Trims zeros and decimal points.
### Response:
def _trim_front(strings):
"""
Trims zeros and decimal points.
"""
trimmed = strings
while len(strings) > 0 and all(x[0] == ' ' for x in trimmed):
trimmed = [x[1:] for x in trimmed]
return trimmed |
def get_program_course_keys(self, program_uuid):
"""
Get a list of the course IDs (not course run IDs) contained in the program.
Arguments:
program_uuid (str): Program UUID in string form
Returns:
list(str): List of course keys in string form that are included in the program
"""
program_details = self.get_program_by_uuid(program_uuid)
if not program_details:
return []
return [course['key'] for course in program_details.get('courses', [])] | Get a list of the course IDs (not course run IDs) contained in the program.
Arguments:
program_uuid (str): Program UUID in string form
Returns:
list(str): List of course keys in string form that are included in the program | Below is the the instruction that describes the task:
### Input:
Get a list of the course IDs (not course run IDs) contained in the program.
Arguments:
program_uuid (str): Program UUID in string form
Returns:
list(str): List of course keys in string form that are included in the program
### Response:
def get_program_course_keys(self, program_uuid):
"""
Get a list of the course IDs (not course run IDs) contained in the program.
Arguments:
program_uuid (str): Program UUID in string form
Returns:
list(str): List of course keys in string form that are included in the program
"""
program_details = self.get_program_by_uuid(program_uuid)
if not program_details:
return []
return [course['key'] for course in program_details.get('courses', [])] |
def copyDirectoryToHdfs(localDirectory, hdfsDirectory, hdfsClient):
'''Copy directory from local to HDFS'''
if not os.path.exists(localDirectory):
raise Exception('Local Directory does not exist!')
hdfsClient.mkdirs(hdfsDirectory)
result = True
for file in os.listdir(localDirectory):
file_path = os.path.join(localDirectory, file)
if os.path.isdir(file_path):
hdfs_directory = os.path.join(hdfsDirectory, file)
try:
result = result and copyDirectoryToHdfs(file_path, hdfs_directory, hdfsClient)
except Exception as exception:
nni_log(LogType.Error, 'Copy local directory {0} to hdfs directory {1} error: {2}'.format(file_path, hdfs_directory, str(exception)))
result = False
else:
hdfs_file_path = os.path.join(hdfsDirectory, file)
try:
result = result and copyFileToHdfs(file_path, hdfs_file_path, hdfsClient)
except Exception as exception:
nni_log(LogType.Error, 'Copy local file {0} to hdfs {1} error: {2}'.format(file_path, hdfs_file_path, str(exception)))
result = False
return result | Copy directory from local to HDFS | Below is the the instruction that describes the task:
### Input:
Copy directory from local to HDFS
### Response:
def copyDirectoryToHdfs(localDirectory, hdfsDirectory, hdfsClient):
'''Copy directory from local to HDFS'''
if not os.path.exists(localDirectory):
raise Exception('Local Directory does not exist!')
hdfsClient.mkdirs(hdfsDirectory)
result = True
for file in os.listdir(localDirectory):
file_path = os.path.join(localDirectory, file)
if os.path.isdir(file_path):
hdfs_directory = os.path.join(hdfsDirectory, file)
try:
result = result and copyDirectoryToHdfs(file_path, hdfs_directory, hdfsClient)
except Exception as exception:
nni_log(LogType.Error, 'Copy local directory {0} to hdfs directory {1} error: {2}'.format(file_path, hdfs_directory, str(exception)))
result = False
else:
hdfs_file_path = os.path.join(hdfsDirectory, file)
try:
result = result and copyFileToHdfs(file_path, hdfs_file_path, hdfsClient)
except Exception as exception:
nni_log(LogType.Error, 'Copy local file {0} to hdfs {1} error: {2}'.format(file_path, hdfs_file_path, str(exception)))
result = False
return result |
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number)) | Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number`` | Below is the the instruction that describes the task:
### Input:
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
### Response:
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number)) |
def create_tasks(self, wfk_file, scr_input):
"""
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
"""
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
# Build a temporary work in the tmpdir that will use a shell manager
# to run ABINIT in order to get the list of q-points for the screening.
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
# Create the symbolic link and add the magic value
# nqpdm = -1 to the input to get the list of q-points.
fake_task.inlink_file(wfk_file)
fake_task.set_vars({"nqptdm": -1})
fake_task.start_and_wait()
# Parse the section with the q-points
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
#print("qpoints)
# Now we can register the task for the different q-points
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
# Add the garbage collector.
if self.flow.gc is not None:
new_task.set_gc(self.flow.gc)
self.allocate() | Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation. | Below is the the instruction that describes the task:
### Input:
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
### Response:
def create_tasks(self, wfk_file, scr_input):
"""
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
"""
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
# Build a temporary work in the tmpdir that will use a shell manager
# to run ABINIT in order to get the list of q-points for the screening.
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
# Create the symbolic link and add the magic value
# nqpdm = -1 to the input to get the list of q-points.
fake_task.inlink_file(wfk_file)
fake_task.set_vars({"nqptdm": -1})
fake_task.start_and_wait()
# Parse the section with the q-points
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
#print("qpoints)
# Now we can register the task for the different q-points
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
# Add the garbage collector.
if self.flow.gc is not None:
new_task.set_gc(self.flow.gc)
self.allocate() |
def Close(self):
"""Closes the file system.
Raises:
IOError: if the file system object was not opened or the close failed.
OSError: if the file system object was not opened or the close failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if not self._is_cached:
close_file_system = True
elif self._resolver_context.ReleaseFileSystem(self):
self._is_cached = False
close_file_system = True
else:
close_file_system = False
if close_file_system:
self._Close()
self._is_open = False
self._path_spec = None | Closes the file system.
Raises:
IOError: if the file system object was not opened or the close failed.
OSError: if the file system object was not opened or the close failed. | Below is the the instruction that describes the task:
### Input:
Closes the file system.
Raises:
IOError: if the file system object was not opened or the close failed.
OSError: if the file system object was not opened or the close failed.
### Response:
def Close(self):
"""Closes the file system.
Raises:
IOError: if the file system object was not opened or the close failed.
OSError: if the file system object was not opened or the close failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if not self._is_cached:
close_file_system = True
elif self._resolver_context.ReleaseFileSystem(self):
self._is_cached = False
close_file_system = True
else:
close_file_system = False
if close_file_system:
self._Close()
self._is_open = False
self._path_spec = None |
def searchproject(self, search, page=1, per_page=20):
"""
Search for projects by name which are accessible to the authenticated user
:param search: Query to search for
:param page: Page number
:param per_page: Records per page
:return: list of results
"""
data = {'page': page, 'per_page': per_page}
request = requests.get("{0}/{1}".format(self.search_url, search), params=data,
verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout)
if request.status_code == 200:
return request.json()
else:
return False | Search for projects by name which are accessible to the authenticated user
:param search: Query to search for
:param page: Page number
:param per_page: Records per page
:return: list of results | Below is the the instruction that describes the task:
### Input:
Search for projects by name which are accessible to the authenticated user
:param search: Query to search for
:param page: Page number
:param per_page: Records per page
:return: list of results
### Response:
def searchproject(self, search, page=1, per_page=20):
"""
Search for projects by name which are accessible to the authenticated user
:param search: Query to search for
:param page: Page number
:param per_page: Records per page
:return: list of results
"""
data = {'page': page, 'per_page': per_page}
request = requests.get("{0}/{1}".format(self.search_url, search), params=data,
verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout)
if request.status_code == 200:
return request.json()
else:
return False |
def blocks(self):
"""
The RDD of sub-matrix blocks
((blockRowIndex, blockColIndex), sub-matrix) that form this
distributed matrix.
>>> mat = BlockMatrix(
... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2)
>>> blocks = mat.blocks
>>> blocks.first()
((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0))
"""
# We use DataFrames for serialization of sub-matrix blocks
# from Java, so we first convert the RDD of blocks to a
# DataFrame on the Scala/Java side. Then we map each Row in
# the DataFrame back to a sub-matrix block on this side.
blocks_df = callMLlibFunc("getMatrixBlocks", self._java_matrix_wrapper._java_model)
blocks = blocks_df.rdd.map(lambda row: ((row[0][0], row[0][1]), row[1]))
return blocks | The RDD of sub-matrix blocks
((blockRowIndex, blockColIndex), sub-matrix) that form this
distributed matrix.
>>> mat = BlockMatrix(
... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2)
>>> blocks = mat.blocks
>>> blocks.first()
((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0)) | Below is the the instruction that describes the task:
### Input:
The RDD of sub-matrix blocks
((blockRowIndex, blockColIndex), sub-matrix) that form this
distributed matrix.
>>> mat = BlockMatrix(
... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2)
>>> blocks = mat.blocks
>>> blocks.first()
((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0))
### Response:
def blocks(self):
"""
The RDD of sub-matrix blocks
((blockRowIndex, blockColIndex), sub-matrix) that form this
distributed matrix.
>>> mat = BlockMatrix(
... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2)
>>> blocks = mat.blocks
>>> blocks.first()
((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0))
"""
# We use DataFrames for serialization of sub-matrix blocks
# from Java, so we first convert the RDD of blocks to a
# DataFrame on the Scala/Java side. Then we map each Row in
# the DataFrame back to a sub-matrix block on this side.
blocks_df = callMLlibFunc("getMatrixBlocks", self._java_matrix_wrapper._java_model)
blocks = blocks_df.rdd.map(lambda row: ((row[0][0], row[0][1]), row[1]))
return blocks |
def remove(self, path, recursive=True):
"""
Remove a file or directory from S3.
:param path: File or directory to remove
:param recursive: Boolean indicator to remove object and children
:return: Boolean indicator denoting success of the removal of 1 or more files
"""
if not self.exists(path):
logger.debug('Could not delete %s; path does not exist', path)
return False
(bucket, key) = self._path_to_bucket_and_key(path)
s3_bucket = self.s3.Bucket(bucket)
# root
if self._is_root(key):
raise InvalidDeleteException('Cannot delete root of bucket at path %s' % path)
# file
if self._exists(bucket, key):
self.s3.meta.client.delete_object(Bucket=bucket, Key=key)
logger.debug('Deleting %s from bucket %s', key, bucket)
return True
if self.isdir(path) and not recursive:
raise InvalidDeleteException('Path %s is a directory. Must use recursive delete' % path)
delete_key_list = [{'Key': obj.key} for obj in s3_bucket.objects.filter(Prefix=self._add_path_delimiter(key))]
# delete the directory marker file if it exists
if self._exists(bucket, '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)):
delete_key_list.append({'Key': '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)})
if len(delete_key_list) > 0:
n = 1000
for i in range(0, len(delete_key_list), n):
self.s3.meta.client.delete_objects(Bucket=bucket, Delete={'Objects': delete_key_list[i: i + n]})
return True
return False | Remove a file or directory from S3.
:param path: File or directory to remove
:param recursive: Boolean indicator to remove object and children
:return: Boolean indicator denoting success of the removal of 1 or more files | Below is the the instruction that describes the task:
### Input:
Remove a file or directory from S3.
:param path: File or directory to remove
:param recursive: Boolean indicator to remove object and children
:return: Boolean indicator denoting success of the removal of 1 or more files
### Response:
def remove(self, path, recursive=True):
"""
Remove a file or directory from S3.
:param path: File or directory to remove
:param recursive: Boolean indicator to remove object and children
:return: Boolean indicator denoting success of the removal of 1 or more files
"""
if not self.exists(path):
logger.debug('Could not delete %s; path does not exist', path)
return False
(bucket, key) = self._path_to_bucket_and_key(path)
s3_bucket = self.s3.Bucket(bucket)
# root
if self._is_root(key):
raise InvalidDeleteException('Cannot delete root of bucket at path %s' % path)
# file
if self._exists(bucket, key):
self.s3.meta.client.delete_object(Bucket=bucket, Key=key)
logger.debug('Deleting %s from bucket %s', key, bucket)
return True
if self.isdir(path) and not recursive:
raise InvalidDeleteException('Path %s is a directory. Must use recursive delete' % path)
delete_key_list = [{'Key': obj.key} for obj in s3_bucket.objects.filter(Prefix=self._add_path_delimiter(key))]
# delete the directory marker file if it exists
if self._exists(bucket, '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)):
delete_key_list.append({'Key': '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)})
if len(delete_key_list) > 0:
n = 1000
for i in range(0, len(delete_key_list), n):
self.s3.meta.client.delete_objects(Bucket=bucket, Delete={'Objects': delete_key_list[i: i + n]})
return True
return False |
def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY):
"""Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
Args:
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset to update.
fields (Sequence[str]):
The properties of ``dataset`` to change (e.g. "friendly_name").
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
The modified ``Dataset`` instance.
"""
partial = dataset._build_resource(fields)
if dataset.etag is not None:
headers = {"If-Match": dataset.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=dataset.path, data=partial, headers=headers
)
return Dataset.from_api_repr(api_response) | Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
Args:
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset to update.
fields (Sequence[str]):
The properties of ``dataset`` to change (e.g. "friendly_name").
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
The modified ``Dataset`` instance. | Below is the the instruction that describes the task:
### Input:
Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
Args:
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset to update.
fields (Sequence[str]):
The properties of ``dataset`` to change (e.g. "friendly_name").
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
The modified ``Dataset`` instance.
### Response:
def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY):
"""Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
Args:
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset to update.
fields (Sequence[str]):
The properties of ``dataset`` to change (e.g. "friendly_name").
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
The modified ``Dataset`` instance.
"""
partial = dataset._build_resource(fields)
if dataset.etag is not None:
headers = {"If-Match": dataset.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=dataset.path, data=partial, headers=headers
)
return Dataset.from_api_repr(api_response) |
def check_ethinca_against_bank_params(ethincaParams, metricParams):
"""
Cross-check the ethinca and bank layout metric calculation parameters
and set the ethinca metric PN order equal to the bank PN order if not
previously set.
Parameters
----------
ethincaParams: instance of ethincaParameters
metricParams: instance of metricParameters
"""
if ethincaParams.doEthinca:
if metricParams.f0 != metricParams.fLow:
raise ValueError("If calculating ethinca metric, f0 and f-low "
"must be equal!")
if ethincaParams.fLow is not None and (
ethincaParams.fLow != metricParams.fLow):
raise ValueError("Ethinca metric calculation does not currently "
"support a f-low value different from the bank "
"metric!")
if ethincaParams.pnOrder is None:
ethincaParams.pnOrder = metricParams.pnOrder
else: pass | Cross-check the ethinca and bank layout metric calculation parameters
and set the ethinca metric PN order equal to the bank PN order if not
previously set.
Parameters
----------
ethincaParams: instance of ethincaParameters
metricParams: instance of metricParameters | Below is the the instruction that describes the task:
### Input:
Cross-check the ethinca and bank layout metric calculation parameters
and set the ethinca metric PN order equal to the bank PN order if not
previously set.
Parameters
----------
ethincaParams: instance of ethincaParameters
metricParams: instance of metricParameters
### Response:
def check_ethinca_against_bank_params(ethincaParams, metricParams):
"""
Cross-check the ethinca and bank layout metric calculation parameters
and set the ethinca metric PN order equal to the bank PN order if not
previously set.
Parameters
----------
ethincaParams: instance of ethincaParameters
metricParams: instance of metricParameters
"""
if ethincaParams.doEthinca:
if metricParams.f0 != metricParams.fLow:
raise ValueError("If calculating ethinca metric, f0 and f-low "
"must be equal!")
if ethincaParams.fLow is not None and (
ethincaParams.fLow != metricParams.fLow):
raise ValueError("Ethinca metric calculation does not currently "
"support a f-low value different from the bank "
"metric!")
if ethincaParams.pnOrder is None:
ethincaParams.pnOrder = metricParams.pnOrder
else: pass |
def discard(self, msg, reason, logMethod=logging.error, cliOutput=False):
"""
Discard a message and log a reason using the specified `logMethod`.
:param msg: the message to discard
:param reason: the reason why this message is being discarded
:param logMethod: the logging function to be used
:param cliOutput: if truthy, informs a CLI that the logged msg should
be printed
"""
reason = "" if not reason else " because {}".format(reason)
logMethod("{} discarding message {}{}".format(self, msg, reason),
extra={"cli": cliOutput}) | Discard a message and log a reason using the specified `logMethod`.
:param msg: the message to discard
:param reason: the reason why this message is being discarded
:param logMethod: the logging function to be used
:param cliOutput: if truthy, informs a CLI that the logged msg should
be printed | Below is the the instruction that describes the task:
### Input:
Discard a message and log a reason using the specified `logMethod`.
:param msg: the message to discard
:param reason: the reason why this message is being discarded
:param logMethod: the logging function to be used
:param cliOutput: if truthy, informs a CLI that the logged msg should
be printed
### Response:
def discard(self, msg, reason, logMethod=logging.error, cliOutput=False):
"""
Discard a message and log a reason using the specified `logMethod`.
:param msg: the message to discard
:param reason: the reason why this message is being discarded
:param logMethod: the logging function to be used
:param cliOutput: if truthy, informs a CLI that the logged msg should
be printed
"""
reason = "" if not reason else " because {}".format(reason)
logMethod("{} discarding message {}{}".format(self, msg, reason),
extra={"cli": cliOutput}) |
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints
"""
vol = structure.lattice.reciprocal_lattice.volume
kppa = kppvol * vol * structure.num_sites
return Kpoints.automatic_density(structure, kppa,
force_gamma=force_gamma) | Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints | Below is the the instruction that describes the task:
### Input:
Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints
### Response:
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints
"""
vol = structure.lattice.reciprocal_lattice.volume
kppa = kppvol * vol * structure.num_sites
return Kpoints.automatic_density(structure, kppa,
force_gamma=force_gamma) |
def result_for_solid(self, name):
'''Get a :py:class:`SolidExecutionResult` for a given solid name.
'''
check.str_param(name, 'name')
if not self.pipeline.has_solid(name):
raise DagsterInvariantViolationError(
'Try to get result for solid {name} in {pipeline}. No such solid.'.format(
name=name, pipeline=self.pipeline.display_name
)
)
if name not in self.solid_result_dict:
raise DagsterInvariantViolationError(
'Did not find result for solid {name} in pipeline execution result'.format(
name=name
)
)
return self.solid_result_dict[name] | Get a :py:class:`SolidExecutionResult` for a given solid name. | Below is the the instruction that describes the task:
### Input:
Get a :py:class:`SolidExecutionResult` for a given solid name.
### Response:
def result_for_solid(self, name):
'''Get a :py:class:`SolidExecutionResult` for a given solid name.
'''
check.str_param(name, 'name')
if not self.pipeline.has_solid(name):
raise DagsterInvariantViolationError(
'Try to get result for solid {name} in {pipeline}. No such solid.'.format(
name=name, pipeline=self.pipeline.display_name
)
)
if name not in self.solid_result_dict:
raise DagsterInvariantViolationError(
'Did not find result for solid {name} in pipeline execution result'.format(
name=name
)
)
return self.solid_result_dict[name] |
def _is_error(self):
'''
Is this is an error code?
:return:
'''
if self.exit_code:
msg = self.SUCCESS_EXIT_CODES.get(self.exit_code)
if msg:
log.info(msg)
msg = self.WARNING_EXIT_CODES.get(self.exit_code)
if msg:
log.warning(msg)
return self.exit_code not in self.SUCCESS_EXIT_CODES and self.exit_code not in self.WARNING_EXIT_CODES | Is this is an error code?
:return: | Below is the the instruction that describes the task:
### Input:
Is this is an error code?
:return:
### Response:
def _is_error(self):
'''
Is this is an error code?
:return:
'''
if self.exit_code:
msg = self.SUCCESS_EXIT_CODES.get(self.exit_code)
if msg:
log.info(msg)
msg = self.WARNING_EXIT_CODES.get(self.exit_code)
if msg:
log.warning(msg)
return self.exit_code not in self.SUCCESS_EXIT_CODES and self.exit_code not in self.WARNING_EXIT_CODES |
def displayMousePosition(xOffset=0, yOffset=0):
"""This function is meant to be run from the command line. It will
automatically display the location and RGB of the mouse cursor."""
print('Press Ctrl-C to quit.')
if xOffset != 0 or yOffset != 0:
print('xOffset: %s yOffset: %s' % (xOffset, yOffset))
resolution = size()
try:
while True:
# Get and print the mouse coordinates.
x, y = position()
positionStr = 'X: ' + str(x - xOffset).rjust(4) + ' Y: ' + str(y - yOffset).rjust(4)
if (x - xOffset) < 0 or (y - yOffset) < 0 or (x - xOffset) >= resolution[0] or (y - yOffset) >= resolution[1]:
pixelColor = ('NaN', 'NaN', 'NaN')
else:
pixelColor = pyscreeze.screenshot().getpixel((x, y))
positionStr += ' RGB: (' + str(pixelColor[0]).rjust(3)
positionStr += ', ' + str(pixelColor[1]).rjust(3)
positionStr += ', ' + str(pixelColor[2]).rjust(3) + ')'
sys.stdout.write(positionStr)
sys.stdout.write('\b' * len(positionStr))
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.stdout.flush() | This function is meant to be run from the command line. It will
automatically display the location and RGB of the mouse cursor. | Below is the the instruction that describes the task:
### Input:
This function is meant to be run from the command line. It will
automatically display the location and RGB of the mouse cursor.
### Response:
def displayMousePosition(xOffset=0, yOffset=0):
"""This function is meant to be run from the command line. It will
automatically display the location and RGB of the mouse cursor."""
print('Press Ctrl-C to quit.')
if xOffset != 0 or yOffset != 0:
print('xOffset: %s yOffset: %s' % (xOffset, yOffset))
resolution = size()
try:
while True:
# Get and print the mouse coordinates.
x, y = position()
positionStr = 'X: ' + str(x - xOffset).rjust(4) + ' Y: ' + str(y - yOffset).rjust(4)
if (x - xOffset) < 0 or (y - yOffset) < 0 or (x - xOffset) >= resolution[0] or (y - yOffset) >= resolution[1]:
pixelColor = ('NaN', 'NaN', 'NaN')
else:
pixelColor = pyscreeze.screenshot().getpixel((x, y))
positionStr += ' RGB: (' + str(pixelColor[0]).rjust(3)
positionStr += ', ' + str(pixelColor[1]).rjust(3)
positionStr += ', ' + str(pixelColor[2]).rjust(3) + ')'
sys.stdout.write(positionStr)
sys.stdout.write('\b' * len(positionStr))
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.stdout.flush() |
def necessary(self) -> bool:
"""
Is any special handling (e.g. the addition of
:class:`ReverseProxiedMiddleware`) necessary for thie config?
"""
return any([
self.trusted_proxy_headers,
self.http_host,
self.remote_addr,
self.script_name,
self.server_name,
self.server_port,
self.url_scheme,
self.rewrite_path_info,
]) | Is any special handling (e.g. the addition of
:class:`ReverseProxiedMiddleware`) necessary for thie config? | Below is the the instruction that describes the task:
### Input:
Is any special handling (e.g. the addition of
:class:`ReverseProxiedMiddleware`) necessary for thie config?
### Response:
def necessary(self) -> bool:
"""
Is any special handling (e.g. the addition of
:class:`ReverseProxiedMiddleware`) necessary for thie config?
"""
return any([
self.trusted_proxy_headers,
self.http_host,
self.remote_addr,
self.script_name,
self.server_name,
self.server_port,
self.url_scheme,
self.rewrite_path_info,
]) |
def _populate_commands(self):
""" Create an instance of each of the debugger
commands. Commands are found by importing files in the
directory 'command'. Some files are excluded via an array set
in __init__. For each of the remaining files, we import them
and scan for class names inside those files and for each class
name, we will create an instance of that class. The set of
DebuggerCommand class instances form set of possible debugger
commands."""
cmd_instances = []
from trepan.bwprocessor import command as Mcommand
eval_cmd_template = 'command_mod.%s(self)'
for mod_name in Mcommand.__modules__:
import_name = "command." + mod_name
try:
command_mod = getattr(__import__(import_name), mod_name)
except:
print('Error importing %s: %s' % (mod_name, sys.exc_info()[0]))
continue
classnames = [ tup[0] for tup in
inspect.getmembers(command_mod, inspect.isclass)
if ('DebuggerCommand' != tup[0] and
tup[0].endswith('Command')) ]
for classname in classnames:
eval_cmd = eval_cmd_template % classname
try:
instance = eval(eval_cmd)
cmd_instances.append(instance)
except:
print('Error loading %s from %s: %s' %
(classname, mod_name, sys.exc_info()[0]))
pass
pass
pass
return cmd_instances | Create an instance of each of the debugger
commands. Commands are found by importing files in the
directory 'command'. Some files are excluded via an array set
in __init__. For each of the remaining files, we import them
and scan for class names inside those files and for each class
name, we will create an instance of that class. The set of
DebuggerCommand class instances form set of possible debugger
commands. | Below is the the instruction that describes the task:
### Input:
Create an instance of each of the debugger
commands. Commands are found by importing files in the
directory 'command'. Some files are excluded via an array set
in __init__. For each of the remaining files, we import them
and scan for class names inside those files and for each class
name, we will create an instance of that class. The set of
DebuggerCommand class instances form set of possible debugger
commands.
### Response:
def _populate_commands(self):
""" Create an instance of each of the debugger
commands. Commands are found by importing files in the
directory 'command'. Some files are excluded via an array set
in __init__. For each of the remaining files, we import them
and scan for class names inside those files and for each class
name, we will create an instance of that class. The set of
DebuggerCommand class instances form set of possible debugger
commands."""
cmd_instances = []
from trepan.bwprocessor import command as Mcommand
eval_cmd_template = 'command_mod.%s(self)'
for mod_name in Mcommand.__modules__:
import_name = "command." + mod_name
try:
command_mod = getattr(__import__(import_name), mod_name)
except:
print('Error importing %s: %s' % (mod_name, sys.exc_info()[0]))
continue
classnames = [ tup[0] for tup in
inspect.getmembers(command_mod, inspect.isclass)
if ('DebuggerCommand' != tup[0] and
tup[0].endswith('Command')) ]
for classname in classnames:
eval_cmd = eval_cmd_template % classname
try:
instance = eval(eval_cmd)
cmd_instances.append(instance)
except:
print('Error loading %s from %s: %s' %
(classname, mod_name, sys.exc_info()[0]))
pass
pass
pass
return cmd_instances |
def unschedule(self):
"""
Given a a list of scheduled functions,
tear down their regular execution.
"""
# Run even if events are not defined to remove previously existing ones (thus default to []).
events = self.stage_config.get('events', [])
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
function_arn = None
try:
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
function_arn = function_response['Configuration']['FunctionArn']
except botocore.exceptions.ClientError as e: # pragma: no cover
raise ClickException("Function does not exist, you should deploy first. Ex: zappa deploy {}. "
"Proceeding to unschedule CloudWatch based events.".format(self.api_stage))
print("Unscheduling..")
self.zappa.unschedule_events(
lambda_name=self.lambda_name,
lambda_arn=function_arn,
events=events,
)
# Remove async task SNS
if self.stage_config.get('async_source', None) == 'sns' \
and self.stage_config.get('async_resources', True):
removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name)
click.echo('SNS Topic removed: %s' % ', '.join(removed_arns)) | Given a a list of scheduled functions,
tear down their regular execution. | Below is the the instruction that describes the task:
### Input:
Given a a list of scheduled functions,
tear down their regular execution.
### Response:
def unschedule(self):
"""
Given a a list of scheduled functions,
tear down their regular execution.
"""
# Run even if events are not defined to remove previously existing ones (thus default to []).
events = self.stage_config.get('events', [])
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
function_arn = None
try:
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
function_arn = function_response['Configuration']['FunctionArn']
except botocore.exceptions.ClientError as e: # pragma: no cover
raise ClickException("Function does not exist, you should deploy first. Ex: zappa deploy {}. "
"Proceeding to unschedule CloudWatch based events.".format(self.api_stage))
print("Unscheduling..")
self.zappa.unschedule_events(
lambda_name=self.lambda_name,
lambda_arn=function_arn,
events=events,
)
# Remove async task SNS
if self.stage_config.get('async_source', None) == 'sns' \
and self.stage_config.get('async_resources', True):
removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name)
click.echo('SNS Topic removed: %s' % ', '.join(removed_arns)) |
def respond_fw_config(self, msg):
"""Respond to a firmware config request."""
(req_fw_type,
req_fw_ver,
req_blocks,
req_crc,
bloader_ver) = fw_hex_to_int(msg.payload, 5)
_LOGGER.debug(
'Received firmware config request with firmware type %s, '
'firmware version %s, %s blocks, CRC %s, bootloader %s',
req_fw_type, req_fw_ver, req_blocks, req_crc, bloader_ver)
fw_type, fw_ver, fware = self._get_fw(
msg, (self.requested, self.unstarted))
if fware is None:
return None
if fw_type != req_fw_type:
_LOGGER.warning(
'Firmware type %s of update is not identical to existing '
'firmware type %s for node %s',
fw_type, req_fw_type, msg.node_id)
_LOGGER.info(
'Updating node %s to firmware type %s version %s from type %s '
'version %s', msg.node_id, fw_type, fw_ver, req_fw_type,
req_fw_ver)
msg = msg.copy(sub_type=self._const.Stream.ST_FIRMWARE_CONFIG_RESPONSE)
msg.payload = fw_int_to_hex(
fw_type, fw_ver, fware['blocks'], fware['crc'])
return msg | Respond to a firmware config request. | Below is the the instruction that describes the task:
### Input:
Respond to a firmware config request.
### Response:
def respond_fw_config(self, msg):
"""Respond to a firmware config request."""
(req_fw_type,
req_fw_ver,
req_blocks,
req_crc,
bloader_ver) = fw_hex_to_int(msg.payload, 5)
_LOGGER.debug(
'Received firmware config request with firmware type %s, '
'firmware version %s, %s blocks, CRC %s, bootloader %s',
req_fw_type, req_fw_ver, req_blocks, req_crc, bloader_ver)
fw_type, fw_ver, fware = self._get_fw(
msg, (self.requested, self.unstarted))
if fware is None:
return None
if fw_type != req_fw_type:
_LOGGER.warning(
'Firmware type %s of update is not identical to existing '
'firmware type %s for node %s',
fw_type, req_fw_type, msg.node_id)
_LOGGER.info(
'Updating node %s to firmware type %s version %s from type %s '
'version %s', msg.node_id, fw_type, fw_ver, req_fw_type,
req_fw_ver)
msg = msg.copy(sub_type=self._const.Stream.ST_FIRMWARE_CONFIG_RESPONSE)
msg.payload = fw_int_to_hex(
fw_type, fw_ver, fware['blocks'], fware['crc'])
return msg |
def read_log(self, logfile):
"""The read_log method returns a memory efficient generator for rows in a Bro log.
Usage:
rows = my_bro_reader.read_log(logfile)
for row in rows:
do something with row
Args:
logfile: The Bro Log file.
"""
# Make sure we're at the beginning
logfile.seek(0)
# First parse the header of the bro log
field_names, _ = self._parse_bro_header(logfile)
# Note: SO stupid to write a csv reader, but csv.DictReader on Bro
# files was doing something weird with generator output that
# affected zeroRPC and gave 'could not route _zpc_more' error.
# So wrote my own, put a sleep at the end, seems to fix it.
while 1:
_line = next(logfile).strip()
if not _line.startswith('#close'):
yield self._cast_dict(dict(zip(field_names, _line.split(self.delimiter))))
else:
time.sleep(.1) # Give time for zeroRPC to finish messages
break | The read_log method returns a memory efficient generator for rows in a Bro log.
Usage:
rows = my_bro_reader.read_log(logfile)
for row in rows:
do something with row
Args:
logfile: The Bro Log file. | Below is the the instruction that describes the task:
### Input:
The read_log method returns a memory efficient generator for rows in a Bro log.
Usage:
rows = my_bro_reader.read_log(logfile)
for row in rows:
do something with row
Args:
logfile: The Bro Log file.
### Response:
def read_log(self, logfile):
"""The read_log method returns a memory efficient generator for rows in a Bro log.
Usage:
rows = my_bro_reader.read_log(logfile)
for row in rows:
do something with row
Args:
logfile: The Bro Log file.
"""
# Make sure we're at the beginning
logfile.seek(0)
# First parse the header of the bro log
field_names, _ = self._parse_bro_header(logfile)
# Note: SO stupid to write a csv reader, but csv.DictReader on Bro
# files was doing something weird with generator output that
# affected zeroRPC and gave 'could not route _zpc_more' error.
# So wrote my own, put a sleep at the end, seems to fix it.
while 1:
_line = next(logfile).strip()
if not _line.startswith('#close'):
yield self._cast_dict(dict(zip(field_names, _line.split(self.delimiter))))
else:
time.sleep(.1) # Give time for zeroRPC to finish messages
break |
def set_parent(self, parent):
"""Set the parent of the treeitem
:param parent: parent treeitem
:type parent: :class:`TreeItem` | None
:returns: None
:rtype: None
:raises: None
"""
if self._parent == parent:
return
if self._parent:
self._parent.remove_child(self)
self._parent = parent
if parent:
parent.add_child(self) | Set the parent of the treeitem
:param parent: parent treeitem
:type parent: :class:`TreeItem` | None
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Set the parent of the treeitem
:param parent: parent treeitem
:type parent: :class:`TreeItem` | None
:returns: None
:rtype: None
:raises: None
### Response:
def set_parent(self, parent):
"""Set the parent of the treeitem
:param parent: parent treeitem
:type parent: :class:`TreeItem` | None
:returns: None
:rtype: None
:raises: None
"""
if self._parent == parent:
return
if self._parent:
self._parent.remove_child(self)
self._parent = parent
if parent:
parent.add_child(self) |
def add_cyclic_datepart(df:DataFrame, field_name:str, prefix:str=None, drop:bool=True, time:bool=False, add_linear:bool=False):
"Helper function that adds trigonometric date/time features to a date in the column `field_name` of `df`."
make_date(df, field_name)
field = df[field_name]
prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name))
series = field.apply(partial(cyclic_dt_features, time=time, add_linear=add_linear))
columns = [prefix + c for c in cyclic_dt_feat_names(time, add_linear)]
df_feats = pd.DataFrame([item for item in series], columns=columns, index=series.index)
df = pd.concat([df, df_feats], axis=1)
if drop: df.drop(field_name, axis=1, inplace=True)
return df | Helper function that adds trigonometric date/time features to a date in the column `field_name` of `df`. | Below is the the instruction that describes the task:
### Input:
Helper function that adds trigonometric date/time features to a date in the column `field_name` of `df`.
### Response:
def add_cyclic_datepart(df:DataFrame, field_name:str, prefix:str=None, drop:bool=True, time:bool=False, add_linear:bool=False):
"Helper function that adds trigonometric date/time features to a date in the column `field_name` of `df`."
make_date(df, field_name)
field = df[field_name]
prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name))
series = field.apply(partial(cyclic_dt_features, time=time, add_linear=add_linear))
columns = [prefix + c for c in cyclic_dt_feat_names(time, add_linear)]
df_feats = pd.DataFrame([item for item in series], columns=columns, index=series.index)
df = pd.concat([df, df_feats], axis=1)
if drop: df.drop(field_name, axis=1, inplace=True)
return df |
def fit(self, x, y, **kwargs):
"""
Fit a naive model
:param x: Predictors to use for fitting the data (this will not be used in naive models)
:param y: Outcome
"""
self.mean = numpy.mean(y)
return {} | Fit a naive model
:param x: Predictors to use for fitting the data (this will not be used in naive models)
:param y: Outcome | Below is the the instruction that describes the task:
### Input:
Fit a naive model
:param x: Predictors to use for fitting the data (this will not be used in naive models)
:param y: Outcome
### Response:
def fit(self, x, y, **kwargs):
"""
Fit a naive model
:param x: Predictors to use for fitting the data (this will not be used in naive models)
:param y: Outcome
"""
self.mean = numpy.mean(y)
return {} |
def feature_extractor(self):
"""feature_extractor() -> extractor
Returns the feature extractor used to extract the positive and negative features.
This feature extractor is stored to file during the :py:meth:`extract` method ran, so this function reads that file (from the ``feature_directory`` set in the constructor) and returns its content.
**Returns:**
``extractor`` : :py:class:`FeatureExtractor`
The feature extractor used to extract the features stored in the ``feature_directory``
"""
extractor_file = os.path.join(self.feature_directory, "Extractor.hdf5")
if not os.path.exists(extractor_file):
raise IOError("Could not found extractor file %s. Did you already run the extraction process? Did you specify the correct `feature_directory` in the constructor?" % extractor_file)
hdf5 = bob.io.base.HDF5File(extractor_file)
return FeatureExtractor(hdf5) | feature_extractor() -> extractor
Returns the feature extractor used to extract the positive and negative features.
This feature extractor is stored to file during the :py:meth:`extract` method ran, so this function reads that file (from the ``feature_directory`` set in the constructor) and returns its content.
**Returns:**
``extractor`` : :py:class:`FeatureExtractor`
The feature extractor used to extract the features stored in the ``feature_directory`` | Below is the the instruction that describes the task:
### Input:
feature_extractor() -> extractor
Returns the feature extractor used to extract the positive and negative features.
This feature extractor is stored to file during the :py:meth:`extract` method ran, so this function reads that file (from the ``feature_directory`` set in the constructor) and returns its content.
**Returns:**
``extractor`` : :py:class:`FeatureExtractor`
The feature extractor used to extract the features stored in the ``feature_directory``
### Response:
def feature_extractor(self):
"""feature_extractor() -> extractor
Returns the feature extractor used to extract the positive and negative features.
This feature extractor is stored to file during the :py:meth:`extract` method ran, so this function reads that file (from the ``feature_directory`` set in the constructor) and returns its content.
**Returns:**
``extractor`` : :py:class:`FeatureExtractor`
The feature extractor used to extract the features stored in the ``feature_directory``
"""
extractor_file = os.path.join(self.feature_directory, "Extractor.hdf5")
if not os.path.exists(extractor_file):
raise IOError("Could not found extractor file %s. Did you already run the extraction process? Did you specify the correct `feature_directory` in the constructor?" % extractor_file)
hdf5 = bob.io.base.HDF5File(extractor_file)
return FeatureExtractor(hdf5) |
def raw_to_central(n_counter, species, k_counter):
"""
Expresses central moments in terms of raw moments (and other central moments).
Based on equation 8 in the paper:
.. math::
\mathbf{M_{x^n}} = \sum_{k_1=0}^{n_1} ... \sum_{k_d=0}^{n_d} \mathbf{{n \choose k}} (-1)^{\mathbf{n-k}} \mu^{\mathbf{n-k}} \langle \mathbf{x^k} \\rangle
The term :math:`\mu^{\mathbf{n-k}}`, so called alpha term is expressed with respect to `species` values that
are equivalent to :math:`\mu_i` in the paper.
The last term, the beta term, :math:`\langle \mathbf{x^n} \\rangle` is simply obtained
from k_counter as it contains the symbols for raw moments.
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param species: the symbols for species means
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a vector of central moments expressed in terms of raw moment
"""
# create empty output
central_in_terms_of_raw = []
# This loop loops through the ::math::`[n_1, ..., n_d]` vectors of the sums in the beginning of the equation
# i.e. :math:`\sum_{k1=0}^n_1 ... \sum_{kd=0}^n_d` part of the equation.
# Note, this is not the sum over k's in that equation, or at least I think its not
for n_iter in n_counter: #loop through all n1,...,nd combinations
# nothing to do for 0th order central moment
if n_iter.order == 0:
continue
# n_vec is the vector ::math::`[n_1, ... n_d]` in equation 8
n_vec = n_iter.n_vector
# k_lower contains the elements of `k_counter` that are lower than or equal to the current n_vec
# This generates the list of possible k values to satisfy ns in the equation.
# `k_vec` iterators bellow are the vector ::math::`[k_1, ..., k_d]`
k_lower = [k for k in k_counter if n_iter >= k]
# (n k) binomial term in equation 9
n_choose_k_vec = [make_k_chose_e(k_vec.n_vector, n_vec) for k_vec in k_lower]
# (-1)^(n-k) term in equation 9
minus_one_pow_n_min_k_vec = [_make_min_one_pow_n_minus_k(n_vec, k_vec.n_vector) for k_vec in k_lower ]
# alpha term in equation 9
alpha_vec = [_make_alpha(n_vec, k_vec.n_vector, species) for k_vec in k_lower]
# beta term in equation 9
beta_vec = [k_vec.symbol for k_vec in k_lower]
# let us multiply all terms
product = [(n * m * a * b) for (n, m, a, b) in zip(n_choose_k_vec, minus_one_pow_n_min_k_vec, alpha_vec, beta_vec)]
# and store the product
central_in_terms_of_raw.append(sum(product))
return sp.Matrix(central_in_terms_of_raw) | Expresses central moments in terms of raw moments (and other central moments).
Based on equation 8 in the paper:
.. math::
\mathbf{M_{x^n}} = \sum_{k_1=0}^{n_1} ... \sum_{k_d=0}^{n_d} \mathbf{{n \choose k}} (-1)^{\mathbf{n-k}} \mu^{\mathbf{n-k}} \langle \mathbf{x^k} \\rangle
The term :math:`\mu^{\mathbf{n-k}}`, so called alpha term is expressed with respect to `species` values that
are equivalent to :math:`\mu_i` in the paper.
The last term, the beta term, :math:`\langle \mathbf{x^n} \\rangle` is simply obtained
from k_counter as it contains the symbols for raw moments.
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param species: the symbols for species means
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a vector of central moments expressed in terms of raw moment | Below is the the instruction that describes the task:
### Input:
Expresses central moments in terms of raw moments (and other central moments).
Based on equation 8 in the paper:
.. math::
\mathbf{M_{x^n}} = \sum_{k_1=0}^{n_1} ... \sum_{k_d=0}^{n_d} \mathbf{{n \choose k}} (-1)^{\mathbf{n-k}} \mu^{\mathbf{n-k}} \langle \mathbf{x^k} \\rangle
The term :math:`\mu^{\mathbf{n-k}}`, so called alpha term is expressed with respect to `species` values that
are equivalent to :math:`\mu_i` in the paper.
The last term, the beta term, :math:`\langle \mathbf{x^n} \\rangle` is simply obtained
from k_counter as it contains the symbols for raw moments.
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param species: the symbols for species means
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a vector of central moments expressed in terms of raw moment
### Response:
def raw_to_central(n_counter, species, k_counter):
"""
Expresses central moments in terms of raw moments (and other central moments).
Based on equation 8 in the paper:
.. math::
\mathbf{M_{x^n}} = \sum_{k_1=0}^{n_1} ... \sum_{k_d=0}^{n_d} \mathbf{{n \choose k}} (-1)^{\mathbf{n-k}} \mu^{\mathbf{n-k}} \langle \mathbf{x^k} \\rangle
The term :math:`\mu^{\mathbf{n-k}}`, so called alpha term is expressed with respect to `species` values that
are equivalent to :math:`\mu_i` in the paper.
The last term, the beta term, :math:`\langle \mathbf{x^n} \\rangle` is simply obtained
from k_counter as it contains the symbols for raw moments.
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param species: the symbols for species means
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a vector of central moments expressed in terms of raw moment
"""
# create empty output
central_in_terms_of_raw = []
# This loop loops through the ::math::`[n_1, ..., n_d]` vectors of the sums in the beginning of the equation
# i.e. :math:`\sum_{k1=0}^n_1 ... \sum_{kd=0}^n_d` part of the equation.
# Note, this is not the sum over k's in that equation, or at least I think its not
for n_iter in n_counter: #loop through all n1,...,nd combinations
# nothing to do for 0th order central moment
if n_iter.order == 0:
continue
# n_vec is the vector ::math::`[n_1, ... n_d]` in equation 8
n_vec = n_iter.n_vector
# k_lower contains the elements of `k_counter` that are lower than or equal to the current n_vec
# This generates the list of possible k values to satisfy ns in the equation.
# `k_vec` iterators bellow are the vector ::math::`[k_1, ..., k_d]`
k_lower = [k for k in k_counter if n_iter >= k]
# (n k) binomial term in equation 9
n_choose_k_vec = [make_k_chose_e(k_vec.n_vector, n_vec) for k_vec in k_lower]
# (-1)^(n-k) term in equation 9
minus_one_pow_n_min_k_vec = [_make_min_one_pow_n_minus_k(n_vec, k_vec.n_vector) for k_vec in k_lower ]
# alpha term in equation 9
alpha_vec = [_make_alpha(n_vec, k_vec.n_vector, species) for k_vec in k_lower]
# beta term in equation 9
beta_vec = [k_vec.symbol for k_vec in k_lower]
# let us multiply all terms
product = [(n * m * a * b) for (n, m, a, b) in zip(n_choose_k_vec, minus_one_pow_n_min_k_vec, alpha_vec, beta_vec)]
# and store the product
central_in_terms_of_raw.append(sum(product))
return sp.Matrix(central_in_terms_of_raw) |
def def_variables(s):
"""
blabla
"""
frame = inspect.currentframe().f_back
try:
if isinstance(s,str):
s = re.split('\s|,', s)
res = []
for t in s:
# skip empty stringG
if not t:
continue
if t.count("@") > 0:
sym = IndexedSymbol(t,Variable)
t = t.strip('@')
else:
sym = Variable(t)
frame.f_globals[t] = sym
res.append(sym)
if frame.f_globals.get('variables_order'):
# we should avoid to declare symbols twice !
frame.f_globals['variables_order'].extend(res)
else:
frame.f_globals['variables_order'] = res
return res
finally:
del frame | blabla | Below is the the instruction that describes the task:
### Input:
blabla
### Response:
def def_variables(s):
"""
blabla
"""
frame = inspect.currentframe().f_back
try:
if isinstance(s,str):
s = re.split('\s|,', s)
res = []
for t in s:
# skip empty stringG
if not t:
continue
if t.count("@") > 0:
sym = IndexedSymbol(t,Variable)
t = t.strip('@')
else:
sym = Variable(t)
frame.f_globals[t] = sym
res.append(sym)
if frame.f_globals.get('variables_order'):
# we should avoid to declare symbols twice !
frame.f_globals['variables_order'].extend(res)
else:
frame.f_globals['variables_order'] = res
return res
finally:
del frame |
def _calcEnergyBendStretchTwist(self, diff, es, which):
r"""Calculate energy for ``esType='BST'`` using a difference vector.
It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
.. math::
\mathbf{x} = \begin{bmatrix}
(\theta^{x}_{i} - \theta^{x}_0) & (\theta^{y}_{i} - \theta^{y}_0) & (L_i - L_0) & (\phi_i - \phi_0)
\end{bmatrix}
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.getStretchTwistBendModulus` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
see ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value
"""
if which not in self.enGlobalTypes:
raise ValueError('{0} is not a supported energy keywords.\n Use any of the following: \n {1}'.format(
which, self.enGlobalTypes))
energy = None
if which == 'full':
temp = np.matrix(diff)
energy = 0.5 * ((temp * es) * temp.T)
energy = energy[0,0]
if which == 'diag':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[2] ** 2 * es[2][2])
+ (diff[3] ** 2 * es[3][3]))
if which == 'bend':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[0] * diff[1] * es[0][1]))
if which == 'b1':
energy = 0.5 * (diff[0] ** 2 * es[0][0])
if which == 'b2':
energy = 0.5 * (diff[1] ** 2 * es[1][1])
if which == 'stretch':
energy = 0.5 * (diff[2] ** 2 * es[2][2])
if which == 'twist':
energy = 0.5 * (diff[3] ** 2 * es[3][3])
if which == 'st_coupling':
energy = 0.5 * (diff[2] * diff[3] * es[2][3])
if which == 'bs_coupling':
energy = 0.5 * ((diff[0] * diff[2] * es[0][2])
+ (diff[1] * diff[2] * es[1][2]))
if which == 'bt_coupling':
energy = 0.5 * ((diff[0] * diff[3] * es[0][3])
+ (diff[1] * diff[3] * es[1][3]))
if which == 'bb_coupling':
energy = 0.5 * (diff[0] * diff[1] * es[0][1])
if which == 'st':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[2] ** 2 * es[2][2])
+ (diff[3] ** 2 * es[3][3])
+ (diff[2] * diff[3] * es[2][3]))
if which == 'bs':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[2] ** 2 * es[2][2])
+ (diff[0] * diff[2] * es[0][2])
+ (diff[1] * diff[2] * es[1][2]))
if which == 'bt':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[3] ** 2 * es[3][3])
+ (diff[0] * diff[3] * es[0][3])
+ (diff[1] * diff[3] * es[1][3]))
return energy | r"""Calculate energy for ``esType='BST'`` using a difference vector.
It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
.. math::
\mathbf{x} = \begin{bmatrix}
(\theta^{x}_{i} - \theta^{x}_0) & (\theta^{y}_{i} - \theta^{y}_0) & (L_i - L_0) & (\phi_i - \phi_0)
\end{bmatrix}
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.getStretchTwistBendModulus` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
see ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value | Below is the the instruction that describes the task:
### Input:
r"""Calculate energy for ``esType='BST'`` using a difference vector.
It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
.. math::
\mathbf{x} = \begin{bmatrix}
(\theta^{x}_{i} - \theta^{x}_0) & (\theta^{y}_{i} - \theta^{y}_0) & (L_i - L_0) & (\phi_i - \phi_0)
\end{bmatrix}
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.getStretchTwistBendModulus` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
see ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value
### Response:
def _calcEnergyBendStretchTwist(self, diff, es, which):
r"""Calculate energy for ``esType='BST'`` using a difference vector.
It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
.. math::
\mathbf{x} = \begin{bmatrix}
(\theta^{x}_{i} - \theta^{x}_0) & (\theta^{y}_{i} - \theta^{y}_0) & (L_i - L_0) & (\phi_i - \phi_0)
\end{bmatrix}
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.getStretchTwistBendModulus` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
see ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value
"""
if which not in self.enGlobalTypes:
raise ValueError('{0} is not a supported energy keywords.\n Use any of the following: \n {1}'.format(
which, self.enGlobalTypes))
energy = None
if which == 'full':
temp = np.matrix(diff)
energy = 0.5 * ((temp * es) * temp.T)
energy = energy[0,0]
if which == 'diag':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[2] ** 2 * es[2][2])
+ (diff[3] ** 2 * es[3][3]))
if which == 'bend':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[0] * diff[1] * es[0][1]))
if which == 'b1':
energy = 0.5 * (diff[0] ** 2 * es[0][0])
if which == 'b2':
energy = 0.5 * (diff[1] ** 2 * es[1][1])
if which == 'stretch':
energy = 0.5 * (diff[2] ** 2 * es[2][2])
if which == 'twist':
energy = 0.5 * (diff[3] ** 2 * es[3][3])
if which == 'st_coupling':
energy = 0.5 * (diff[2] * diff[3] * es[2][3])
if which == 'bs_coupling':
energy = 0.5 * ((diff[0] * diff[2] * es[0][2])
+ (diff[1] * diff[2] * es[1][2]))
if which == 'bt_coupling':
energy = 0.5 * ((diff[0] * diff[3] * es[0][3])
+ (diff[1] * diff[3] * es[1][3]))
if which == 'bb_coupling':
energy = 0.5 * (diff[0] * diff[1] * es[0][1])
if which == 'st':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[2] ** 2 * es[2][2])
+ (diff[3] ** 2 * es[3][3])
+ (diff[2] * diff[3] * es[2][3]))
if which == 'bs':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[2] ** 2 * es[2][2])
+ (diff[0] * diff[2] * es[0][2])
+ (diff[1] * diff[2] * es[1][2]))
if which == 'bt':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[3] ** 2 * es[3][3])
+ (diff[0] * diff[3] * es[0][3])
+ (diff[1] * diff[3] * es[1][3]))
return energy |
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table | Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables. | Below is the the instruction that describes the task:
### Input:
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
### Response:
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table |
def run(self, path):
'''Test a bunch of files and return a summary JSON report'''
SEPARATOR = '=' * 40
summary = {}
res = True
for _f in utils.get_files_by_path(path):
L.info(SEPARATOR)
status, summ = self._check_file(_f)
res &= status
if summ is not None:
summary.update(summ)
L.info(SEPARATOR)
status = 'PASS' if res else 'FAIL'
return {'files': summary, 'STATUS': status} | Test a bunch of files and return a summary JSON report | Below is the the instruction that describes the task:
### Input:
Test a bunch of files and return a summary JSON report
### Response:
def run(self, path):
'''Test a bunch of files and return a summary JSON report'''
SEPARATOR = '=' * 40
summary = {}
res = True
for _f in utils.get_files_by_path(path):
L.info(SEPARATOR)
status, summ = self._check_file(_f)
res &= status
if summ is not None:
summary.update(summ)
L.info(SEPARATOR)
status = 'PASS' if res else 'FAIL'
return {'files': summary, 'STATUS': status} |
def mip_bipartitions(mechanism, purview, node_labels=None):
r"""Return an generator of all |small_phi| bipartitions of a mechanism over
a purview.
Excludes all bipartitions where one half is entirely empty, *e.g*::
A ∅
─── ✕ ───
B ∅
is not valid, but ::
A ∅
─── ✕ ───
∅ B
is.
Args:
mechanism (tuple[int]): The mechanism to partition
purview (tuple[int]): The purview to partition
Yields:
Bipartition: Where each bipartition is::
bipart[0].mechanism bipart[1].mechanism
─────────────────── ✕ ───────────────────
bipart[0].purview bipart[1].purview
Example:
>>> mechanism = (0,)
>>> purview = (2, 3)
>>> for partition in mip_bipartitions(mechanism, purview):
... print(partition, '\n') # doctest: +NORMALIZE_WHITESPACE
∅ 0
─── ✕ ───
2 3
<BLANKLINE>
∅ 0
─── ✕ ───
3 2
<BLANKLINE>
∅ 0
─── ✕ ───
2,3 ∅
"""
numerators = bipartition(mechanism)
denominators = directed_bipartition(purview)
for n, d in product(numerators, denominators):
if (n[0] or d[0]) and (n[1] or d[1]):
yield Bipartition(Part(n[0], d[0]), Part(n[1], d[1]),
node_labels=node_labels) | r"""Return an generator of all |small_phi| bipartitions of a mechanism over
a purview.
Excludes all bipartitions where one half is entirely empty, *e.g*::
A ∅
─── ✕ ───
B ∅
is not valid, but ::
A ∅
─── ✕ ───
∅ B
is.
Args:
mechanism (tuple[int]): The mechanism to partition
purview (tuple[int]): The purview to partition
Yields:
Bipartition: Where each bipartition is::
bipart[0].mechanism bipart[1].mechanism
─────────────────── ✕ ───────────────────
bipart[0].purview bipart[1].purview
Example:
>>> mechanism = (0,)
>>> purview = (2, 3)
>>> for partition in mip_bipartitions(mechanism, purview):
... print(partition, '\n') # doctest: +NORMALIZE_WHITESPACE
∅ 0
─── ✕ ───
2 3
<BLANKLINE>
∅ 0
─── ✕ ───
3 2
<BLANKLINE>
∅ 0
─── ✕ ───
2,3 ∅ | Below is the the instruction that describes the task:
### Input:
r"""Return an generator of all |small_phi| bipartitions of a mechanism over
a purview.
Excludes all bipartitions where one half is entirely empty, *e.g*::
A ∅
─── ✕ ───
B ∅
is not valid, but ::
A ∅
─── ✕ ───
∅ B
is.
Args:
mechanism (tuple[int]): The mechanism to partition
purview (tuple[int]): The purview to partition
Yields:
Bipartition: Where each bipartition is::
bipart[0].mechanism bipart[1].mechanism
─────────────────── ✕ ───────────────────
bipart[0].purview bipart[1].purview
Example:
>>> mechanism = (0,)
>>> purview = (2, 3)
>>> for partition in mip_bipartitions(mechanism, purview):
... print(partition, '\n') # doctest: +NORMALIZE_WHITESPACE
∅ 0
─── ✕ ───
2 3
<BLANKLINE>
∅ 0
─── ✕ ───
3 2
<BLANKLINE>
∅ 0
─── ✕ ───
2,3 ∅
### Response:
def mip_bipartitions(mechanism, purview, node_labels=None):
r"""Return an generator of all |small_phi| bipartitions of a mechanism over
a purview.
Excludes all bipartitions where one half is entirely empty, *e.g*::
A ∅
─── ✕ ───
B ∅
is not valid, but ::
A ∅
─── ✕ ───
∅ B
is.
Args:
mechanism (tuple[int]): The mechanism to partition
purview (tuple[int]): The purview to partition
Yields:
Bipartition: Where each bipartition is::
bipart[0].mechanism bipart[1].mechanism
─────────────────── ✕ ───────────────────
bipart[0].purview bipart[1].purview
Example:
>>> mechanism = (0,)
>>> purview = (2, 3)
>>> for partition in mip_bipartitions(mechanism, purview):
... print(partition, '\n') # doctest: +NORMALIZE_WHITESPACE
∅ 0
─── ✕ ───
2 3
<BLANKLINE>
∅ 0
─── ✕ ───
3 2
<BLANKLINE>
∅ 0
─── ✕ ───
2,3 ∅
"""
numerators = bipartition(mechanism)
denominators = directed_bipartition(purview)
for n, d in product(numerators, denominators):
if (n[0] or d[0]) and (n[1] or d[1]):
yield Bipartition(Part(n[0], d[0]), Part(n[1], d[1]),
node_labels=node_labels) |
def get_driver(driver='ASCII_RS232', *args, **keywords):
""" Gets a driver for a Parker Motion Gemini drive.
Gets and connects a particular driver in ``drivers`` to a Parker
Motion Gemini GV-6 or GT-6 servo/stepper motor drive.
The only driver currently supported is the ``'ASCII_RS232'`` driver
which corresponds to ``drivers.ASCII_RS232``.
Parameters
----------
driver : str, optional
The driver to communicate to the particular driver with, which
includes the hardware connection and possibly the communications
protocol. The only driver currently supported is the
``'ASCII_RS232'`` driver which corresponds to
``drivers.ASCII_RS232``.
*args : additional positional arguments
Additional positional arguments to pass onto the constructor for
the driver.
**keywords : additional keyword arguments
Additional keyword arguments to pass onto the constructor for
the driver.
Returns
-------
drivers : drivers
The connected drivers class that is connected to the drive.
Raises
------
NotImplementedError
If the `driver` is not supported.
See Also
--------
drivers
drivers.ASCII_RS232
"""
if driver.upper() == 'ASCII_RS232':
return drivers.ASCII_RS232(*args, **keywords)
else:
raise NotImplementedError('Driver not supported: '
+ str(driver)) | Gets a driver for a Parker Motion Gemini drive.
Gets and connects a particular driver in ``drivers`` to a Parker
Motion Gemini GV-6 or GT-6 servo/stepper motor drive.
The only driver currently supported is the ``'ASCII_RS232'`` driver
which corresponds to ``drivers.ASCII_RS232``.
Parameters
----------
driver : str, optional
The driver to communicate to the particular driver with, which
includes the hardware connection and possibly the communications
protocol. The only driver currently supported is the
``'ASCII_RS232'`` driver which corresponds to
``drivers.ASCII_RS232``.
*args : additional positional arguments
Additional positional arguments to pass onto the constructor for
the driver.
**keywords : additional keyword arguments
Additional keyword arguments to pass onto the constructor for
the driver.
Returns
-------
drivers : drivers
The connected drivers class that is connected to the drive.
Raises
------
NotImplementedError
If the `driver` is not supported.
See Also
--------
drivers
drivers.ASCII_RS232 | Below is the the instruction that describes the task:
### Input:
Gets a driver for a Parker Motion Gemini drive.
Gets and connects a particular driver in ``drivers`` to a Parker
Motion Gemini GV-6 or GT-6 servo/stepper motor drive.
The only driver currently supported is the ``'ASCII_RS232'`` driver
which corresponds to ``drivers.ASCII_RS232``.
Parameters
----------
driver : str, optional
The driver to communicate to the particular driver with, which
includes the hardware connection and possibly the communications
protocol. The only driver currently supported is the
``'ASCII_RS232'`` driver which corresponds to
``drivers.ASCII_RS232``.
*args : additional positional arguments
Additional positional arguments to pass onto the constructor for
the driver.
**keywords : additional keyword arguments
Additional keyword arguments to pass onto the constructor for
the driver.
Returns
-------
drivers : drivers
The connected drivers class that is connected to the drive.
Raises
------
NotImplementedError
If the `driver` is not supported.
See Also
--------
drivers
drivers.ASCII_RS232
### Response:
def get_driver(driver='ASCII_RS232', *args, **keywords):
""" Gets a driver for a Parker Motion Gemini drive.
Gets and connects a particular driver in ``drivers`` to a Parker
Motion Gemini GV-6 or GT-6 servo/stepper motor drive.
The only driver currently supported is the ``'ASCII_RS232'`` driver
which corresponds to ``drivers.ASCII_RS232``.
Parameters
----------
driver : str, optional
The driver to communicate to the particular driver with, which
includes the hardware connection and possibly the communications
protocol. The only driver currently supported is the
``'ASCII_RS232'`` driver which corresponds to
``drivers.ASCII_RS232``.
*args : additional positional arguments
Additional positional arguments to pass onto the constructor for
the driver.
**keywords : additional keyword arguments
Additional keyword arguments to pass onto the constructor for
the driver.
Returns
-------
drivers : drivers
The connected drivers class that is connected to the drive.
Raises
------
NotImplementedError
If the `driver` is not supported.
See Also
--------
drivers
drivers.ASCII_RS232
"""
if driver.upper() == 'ASCII_RS232':
return drivers.ASCII_RS232(*args, **keywords)
else:
raise NotImplementedError('Driver not supported: '
+ str(driver)) |
def get(self, *, kind: Type=None, tag: Hashable=None, **kwargs) -> Iterator:
"""
Get an iterator of GameObjects by kind or tag.
kind: Any type. Pass to get a subset of contained GameObjects with the
given type.
tag: Any Hashable object. Pass to get a subset of contained GameObjects
with the given tag.
Pass both kind and tag to get objects that are both that type and that
tag.
Examples:
scene.get(type=MyGameObject)
scene.get(tag="red")
scene.get(type=MyGameObject, tag="red")
"""
return self.game_objects.get(kind=kind, tag=tag, **kwargs) | Get an iterator of GameObjects by kind or tag.
kind: Any type. Pass to get a subset of contained GameObjects with the
given type.
tag: Any Hashable object. Pass to get a subset of contained GameObjects
with the given tag.
Pass both kind and tag to get objects that are both that type and that
tag.
Examples:
scene.get(type=MyGameObject)
scene.get(tag="red")
scene.get(type=MyGameObject, tag="red") | Below is the the instruction that describes the task:
### Input:
Get an iterator of GameObjects by kind or tag.
kind: Any type. Pass to get a subset of contained GameObjects with the
given type.
tag: Any Hashable object. Pass to get a subset of contained GameObjects
with the given tag.
Pass both kind and tag to get objects that are both that type and that
tag.
Examples:
scene.get(type=MyGameObject)
scene.get(tag="red")
scene.get(type=MyGameObject, tag="red")
### Response:
def get(self, *, kind: Type=None, tag: Hashable=None, **kwargs) -> Iterator:
"""
Get an iterator of GameObjects by kind or tag.
kind: Any type. Pass to get a subset of contained GameObjects with the
given type.
tag: Any Hashable object. Pass to get a subset of contained GameObjects
with the given tag.
Pass both kind and tag to get objects that are both that type and that
tag.
Examples:
scene.get(type=MyGameObject)
scene.get(tag="red")
scene.get(type=MyGameObject, tag="red")
"""
return self.game_objects.get(kind=kind, tag=tag, **kwargs) |
def sort_references_dict(refs):
"""Sorts a reference dictionary into a standard order
The keys of the references are also sorted, and the keys for the data for each
reference are put in a more canonical order.
"""
if _use_odict:
refs_sorted = OrderedDict()
else:
refs_sorted = dict()
# We insert this first, That is ok - it will be overwritten
# with the sorted version later
refs_sorted['molssi_bse_schema'] = refs['molssi_bse_schema']
# This sorts the entries by reference key (author1985a, etc)
for k, v in sorted(refs.items()):
refs_sorted[k] = sort_single_reference(v)
return refs_sorted | Sorts a reference dictionary into a standard order
The keys of the references are also sorted, and the keys for the data for each
reference are put in a more canonical order. | Below is the the instruction that describes the task:
### Input:
Sorts a reference dictionary into a standard order
The keys of the references are also sorted, and the keys for the data for each
reference are put in a more canonical order.
### Response:
def sort_references_dict(refs):
"""Sorts a reference dictionary into a standard order
The keys of the references are also sorted, and the keys for the data for each
reference are put in a more canonical order.
"""
if _use_odict:
refs_sorted = OrderedDict()
else:
refs_sorted = dict()
# We insert this first, That is ok - it will be overwritten
# with the sorted version later
refs_sorted['molssi_bse_schema'] = refs['molssi_bse_schema']
# This sorts the entries by reference key (author1985a, etc)
for k, v in sorted(refs.items()):
refs_sorted[k] = sort_single_reference(v)
return refs_sorted |
def check_debug():
"""Check that Django's template debugging is enabled.
Django's built-in "template debugging" records information the plugin needs
to do its work. Check that the setting is correct, and raise an exception
if it is not.
Returns True if the debug check was performed, False otherwise
"""
from django.conf import settings
if not settings.configured:
return False
# I _think_ this check is all that's needed and the 3 "hasattr" checks
# below can be removed, but it's not clear how to verify that
from django.apps import apps
if not apps.ready:
return False
# django.template.backends.django gets loaded lazily, so return false
# until they've been loaded
if not hasattr(django.template, "backends"):
return False
if not hasattr(django.template.backends, "django"):
return False
if not hasattr(django.template.backends.django, "DjangoTemplates"):
raise DjangoTemplatePluginException("Can't use non-Django templates.")
for engine in django.template.engines.all():
if not isinstance(engine, django.template.backends.django.DjangoTemplates):
raise DjangoTemplatePluginException(
"Can't use non-Django templates."
)
if not engine.engine.debug:
raise DjangoTemplatePluginException(
"Template debugging must be enabled in settings."
)
return True | Check that Django's template debugging is enabled.
Django's built-in "template debugging" records information the plugin needs
to do its work. Check that the setting is correct, and raise an exception
if it is not.
Returns True if the debug check was performed, False otherwise | Below is the the instruction that describes the task:
### Input:
Check that Django's template debugging is enabled.
Django's built-in "template debugging" records information the plugin needs
to do its work. Check that the setting is correct, and raise an exception
if it is not.
Returns True if the debug check was performed, False otherwise
### Response:
def check_debug():
"""Check that Django's template debugging is enabled.
Django's built-in "template debugging" records information the plugin needs
to do its work. Check that the setting is correct, and raise an exception
if it is not.
Returns True if the debug check was performed, False otherwise
"""
from django.conf import settings
if not settings.configured:
return False
# I _think_ this check is all that's needed and the 3 "hasattr" checks
# below can be removed, but it's not clear how to verify that
from django.apps import apps
if not apps.ready:
return False
# django.template.backends.django gets loaded lazily, so return false
# until they've been loaded
if not hasattr(django.template, "backends"):
return False
if not hasattr(django.template.backends, "django"):
return False
if not hasattr(django.template.backends.django, "DjangoTemplates"):
raise DjangoTemplatePluginException("Can't use non-Django templates.")
for engine in django.template.engines.all():
if not isinstance(engine, django.template.backends.django.DjangoTemplates):
raise DjangoTemplatePluginException(
"Can't use non-Django templates."
)
if not engine.engine.debug:
raise DjangoTemplatePluginException(
"Template debugging must be enabled in settings."
)
return True |
def prep_for_graph(data_frame, series=None, delta_series=None, smoothing=None,
outlier_stddev=None):
"""Prepare a dataframe for graphing by calculating deltas for
series that need them, resampling, and removing outliers.
"""
series = series or []
delta_series = delta_series or []
graph = calc_deltas(data_frame, delta_series)
for s in series + delta_series:
if smoothing:
graph[s] = graph[s].resample(smoothing)
if outlier_stddev:
graph[s] = remove_outliers(graph[s], outlier_stddev)
return graph[series + delta_series] | Prepare a dataframe for graphing by calculating deltas for
series that need them, resampling, and removing outliers. | Below is the the instruction that describes the task:
### Input:
Prepare a dataframe for graphing by calculating deltas for
series that need them, resampling, and removing outliers.
### Response:
def prep_for_graph(data_frame, series=None, delta_series=None, smoothing=None,
outlier_stddev=None):
"""Prepare a dataframe for graphing by calculating deltas for
series that need them, resampling, and removing outliers.
"""
series = series or []
delta_series = delta_series or []
graph = calc_deltas(data_frame, delta_series)
for s in series + delta_series:
if smoothing:
graph[s] = graph[s].resample(smoothing)
if outlier_stddev:
graph[s] = remove_outliers(graph[s], outlier_stddev)
return graph[series + delta_series] |
def to_python(self):
"""The string ``'True'`` (case insensitive) will be converted
to ``True``, as will any positive integers.
"""
if isinstance(self.data, str):
return self.data.strip().lower() == 'true'
if isinstance(self.data, int):
return self.data > 0
return bool(self.data) | The string ``'True'`` (case insensitive) will be converted
to ``True``, as will any positive integers. | Below is the the instruction that describes the task:
### Input:
The string ``'True'`` (case insensitive) will be converted
to ``True``, as will any positive integers.
### Response:
def to_python(self):
"""The string ``'True'`` (case insensitive) will be converted
to ``True``, as will any positive integers.
"""
if isinstance(self.data, str):
return self.data.strip().lower() == 'true'
if isinstance(self.data, int):
return self.data > 0
return bool(self.data) |
def ip_hide_ext_community_list_holder_extcommunity_list_ext_community_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
hide_ext_community_list_holder = ET.SubElement(ip, "hide-ext-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
extcommunity_list = ET.SubElement(hide_ext_community_list_holder, "extcommunity-list")
extcommunity_list_num_key = ET.SubElement(extcommunity_list, "extcommunity-list-num")
extcommunity_list_num_key.text = kwargs.pop('extcommunity_list_num')
ext_community_action = ET.SubElement(extcommunity_list, "ext-community-action")
ext_community_action.text = kwargs.pop('ext_community_action')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ip_hide_ext_community_list_holder_extcommunity_list_ext_community_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
hide_ext_community_list_holder = ET.SubElement(ip, "hide-ext-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
extcommunity_list = ET.SubElement(hide_ext_community_list_holder, "extcommunity-list")
extcommunity_list_num_key = ET.SubElement(extcommunity_list, "extcommunity-list-num")
extcommunity_list_num_key.text = kwargs.pop('extcommunity_list_num')
ext_community_action = ET.SubElement(extcommunity_list, "ext-community-action")
ext_community_action.text = kwargs.pop('ext_community_action')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def resource_request_send(self, request_id, uri_type, uri, transfer_type, storage, force_mavlink1=False):
'''
The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t)
'''
return self.send(self.resource_request_encode(request_id, uri_type, uri, transfer_type, storage), force_mavlink1=force_mavlink1) | The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t) | Below is the the instruction that describes the task:
### Input:
The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t)
### Response:
def resource_request_send(self, request_id, uri_type, uri, transfer_type, storage, force_mavlink1=False):
'''
The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t)
'''
return self.send(self.resource_request_encode(request_id, uri_type, uri, transfer_type, storage), force_mavlink1=force_mavlink1) |
def update_repodata(self, channels=None):
"""Update repodata from channels or use condarc channels if None."""
norm_channels = self.conda_get_condarc_channels(channels=channels,
normalize=True)
repodata_urls = self._set_repo_urls_from_channels(norm_channels)
self._check_repos(repodata_urls) | Update repodata from channels or use condarc channels if None. | Below is the the instruction that describes the task:
### Input:
Update repodata from channels or use condarc channels if None.
### Response:
def update_repodata(self, channels=None):
"""Update repodata from channels or use condarc channels if None."""
norm_channels = self.conda_get_condarc_channels(channels=channels,
normalize=True)
repodata_urls = self._set_repo_urls_from_channels(norm_channels)
self._check_repos(repodata_urls) |
def _user_config_file():
"""
Check that the config file is present and readable. If not,
copy a template in place.
"""
config_file = Constants.USER_CONFIG
if os.path.exists(config_file) and os.access(config_file, os.R_OK):
return config_file
elif os.path.exists(config_file) and not os.access(config_file, os.R_OK):
raise IOError("Can not read %s" % config_file)
else:
shutil.copy(Constants.EXAMPLE_USER_CONFIG, config_file)
raise JuicerConfigError("Default config file created.\nCheck man 5 juicer.conf.") | Check that the config file is present and readable. If not,
copy a template in place. | Below is the the instruction that describes the task:
### Input:
Check that the config file is present and readable. If not,
copy a template in place.
### Response:
def _user_config_file():
"""
Check that the config file is present and readable. If not,
copy a template in place.
"""
config_file = Constants.USER_CONFIG
if os.path.exists(config_file) and os.access(config_file, os.R_OK):
return config_file
elif os.path.exists(config_file) and not os.access(config_file, os.R_OK):
raise IOError("Can not read %s" % config_file)
else:
shutil.copy(Constants.EXAMPLE_USER_CONFIG, config_file)
raise JuicerConfigError("Default config file created.\nCheck man 5 juicer.conf.") |
def pre_execute(self, execution, context):
"""Make sure the named directory is created if possible"""
path = self._fspath
if path:
path = path.format(
benchmark=context.benchmark,
api=execution['category'],
**execution.get('metas', {})
)
if self.clean_path:
shutil.rmtree(path, ignore_errors=True)
if execution['metas']['file_mode'] == 'onefile':
path = osp.dirname(path)
if not osp.exists(path):
os.makedirs(path) | Make sure the named directory is created if possible | Below is the the instruction that describes the task:
### Input:
Make sure the named directory is created if possible
### Response:
def pre_execute(self, execution, context):
"""Make sure the named directory is created if possible"""
path = self._fspath
if path:
path = path.format(
benchmark=context.benchmark,
api=execution['category'],
**execution.get('metas', {})
)
if self.clean_path:
shutil.rmtree(path, ignore_errors=True)
if execution['metas']['file_mode'] == 'onefile':
path = osp.dirname(path)
if not osp.exists(path):
os.makedirs(path) |
def read(
stream,
resolver: Resolver = None,
data_readers: DataReaders = None,
eof: Any = EOF,
is_eof_error: bool = False,
) -> Iterable[ReaderForm]:
"""Read the contents of a stream as a Lisp expression.
Callers may optionally specify a namespace resolver, which will be used
to adjudicate the fully-qualified name of symbols appearing inside of
a syntax quote.
Callers may optionally specify a map of custom data readers that will
be used to resolve values in reader macros. Data reader tags specified
by callers must be namespaced symbols; non-namespaced symbols are
reserved by the reader. Data reader functions must be functions taking
one argument and returning a value.
The caller is responsible for closing the input stream."""
reader = StreamReader(stream)
ctx = ReaderContext(reader, resolver=resolver, data_readers=data_readers, eof=eof)
while True:
expr = _read_next(ctx)
if expr is ctx.eof:
if is_eof_error:
raise EOFError
return
if expr is COMMENT or isinstance(expr, Comment):
continue
yield expr | Read the contents of a stream as a Lisp expression.
Callers may optionally specify a namespace resolver, which will be used
to adjudicate the fully-qualified name of symbols appearing inside of
a syntax quote.
Callers may optionally specify a map of custom data readers that will
be used to resolve values in reader macros. Data reader tags specified
by callers must be namespaced symbols; non-namespaced symbols are
reserved by the reader. Data reader functions must be functions taking
one argument and returning a value.
The caller is responsible for closing the input stream. | Below is the the instruction that describes the task:
### Input:
Read the contents of a stream as a Lisp expression.
Callers may optionally specify a namespace resolver, which will be used
to adjudicate the fully-qualified name of symbols appearing inside of
a syntax quote.
Callers may optionally specify a map of custom data readers that will
be used to resolve values in reader macros. Data reader tags specified
by callers must be namespaced symbols; non-namespaced symbols are
reserved by the reader. Data reader functions must be functions taking
one argument and returning a value.
The caller is responsible for closing the input stream.
### Response:
def read(
stream,
resolver: Resolver = None,
data_readers: DataReaders = None,
eof: Any = EOF,
is_eof_error: bool = False,
) -> Iterable[ReaderForm]:
"""Read the contents of a stream as a Lisp expression.
Callers may optionally specify a namespace resolver, which will be used
to adjudicate the fully-qualified name of symbols appearing inside of
a syntax quote.
Callers may optionally specify a map of custom data readers that will
be used to resolve values in reader macros. Data reader tags specified
by callers must be namespaced symbols; non-namespaced symbols are
reserved by the reader. Data reader functions must be functions taking
one argument and returning a value.
The caller is responsible for closing the input stream."""
reader = StreamReader(stream)
ctx = ReaderContext(reader, resolver=resolver, data_readers=data_readers, eof=eof)
while True:
expr = _read_next(ctx)
if expr is ctx.eof:
if is_eof_error:
raise EOFError
return
if expr is COMMENT or isinstance(expr, Comment):
continue
yield expr |
def importDirectory(self, login, tableName, importDir, failureDir, setTime):
"""
Parameters:
- login
- tableName
- importDir
- failureDir
- setTime
"""
self.send_importDirectory(login, tableName, importDir, failureDir, setTime)
self.recv_importDirectory() | Parameters:
- login
- tableName
- importDir
- failureDir
- setTime | Below is the the instruction that describes the task:
### Input:
Parameters:
- login
- tableName
- importDir
- failureDir
- setTime
### Response:
def importDirectory(self, login, tableName, importDir, failureDir, setTime):
"""
Parameters:
- login
- tableName
- importDir
- failureDir
- setTime
"""
self.send_importDirectory(login, tableName, importDir, failureDir, setTime)
self.recv_importDirectory() |
def load_config_from_json(self):
"""load config from existing json connector files."""
c = self.config
self.log.debug("loading config from JSON")
# load from engine config
fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
cfg = json.loads(f.read())
key = cfg['exec_key']
# json gives unicode, Session.key wants bytes
c.Session.key = key.encode('ascii')
xport,addr = cfg['url'].split('://')
c.HubFactory.engine_transport = xport
ip,ports = addr.split(':')
c.HubFactory.engine_ip = ip
c.HubFactory.regport = int(ports)
self.location = cfg['location']
if not self.engine_ssh_server:
self.engine_ssh_server = cfg['ssh']
# load client config
fname = os.path.join(self.profile_dir.security_dir, self.client_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
cfg = json.loads(f.read())
assert key == cfg['exec_key'], "exec_key mismatch between engine and client keys"
xport,addr = cfg['url'].split('://')
c.HubFactory.client_transport = xport
ip,ports = addr.split(':')
c.HubFactory.client_ip = ip
if not self.ssh_server:
self.ssh_server = cfg['ssh']
assert int(ports) == c.HubFactory.regport, "regport mismatch" | load config from existing json connector files. | Below is the the instruction that describes the task:
### Input:
load config from existing json connector files.
### Response:
def load_config_from_json(self):
"""load config from existing json connector files."""
c = self.config
self.log.debug("loading config from JSON")
# load from engine config
fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
cfg = json.loads(f.read())
key = cfg['exec_key']
# json gives unicode, Session.key wants bytes
c.Session.key = key.encode('ascii')
xport,addr = cfg['url'].split('://')
c.HubFactory.engine_transport = xport
ip,ports = addr.split(':')
c.HubFactory.engine_ip = ip
c.HubFactory.regport = int(ports)
self.location = cfg['location']
if not self.engine_ssh_server:
self.engine_ssh_server = cfg['ssh']
# load client config
fname = os.path.join(self.profile_dir.security_dir, self.client_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
cfg = json.loads(f.read())
assert key == cfg['exec_key'], "exec_key mismatch between engine and client keys"
xport,addr = cfg['url'].split('://')
c.HubFactory.client_transport = xport
ip,ports = addr.split(':')
c.HubFactory.client_ip = ip
if not self.ssh_server:
self.ssh_server = cfg['ssh']
assert int(ports) == c.HubFactory.regport, "regport mismatch" |
def service(self):
"""
Decrease the countdowns, and remove any expired locks. Should be called once every <decrease_every> seconds.
"""
with self.lock:
# Decrement / remove all attempts
for key in list(self.attempts.keys()):
log.debug('Decrementing count for %s' % key)
if key in self.attempts:
if self.attempts[key] <= 1:
del self.attempts[key]
else:
self.attempts[key] -= 1
# Remove expired locks
now = datetime.datetime.utcnow()
for key in list(self.locks.keys()):
if key in self.locks and self.locks[key] < now:
log.info('Expiring login lock for %s' % key)
del self.locks[key] | Decrease the countdowns, and remove any expired locks. Should be called once every <decrease_every> seconds. | Below is the the instruction that describes the task:
### Input:
Decrease the countdowns, and remove any expired locks. Should be called once every <decrease_every> seconds.
### Response:
def service(self):
"""
Decrease the countdowns, and remove any expired locks. Should be called once every <decrease_every> seconds.
"""
with self.lock:
# Decrement / remove all attempts
for key in list(self.attempts.keys()):
log.debug('Decrementing count for %s' % key)
if key in self.attempts:
if self.attempts[key] <= 1:
del self.attempts[key]
else:
self.attempts[key] -= 1
# Remove expired locks
now = datetime.datetime.utcnow()
for key in list(self.locks.keys()):
if key in self.locks and self.locks[key] < now:
log.info('Expiring login lock for %s' % key)
del self.locks[key] |
def parse_args(self, argv):
"""
parse arguments/options
:param argv: argument list to parse, usually ``sys.argv[1:]``
:type argv: list
:returns: parsed arguments
:rtype: :py:class:`argparse.Namespace`
"""
desc = 'Report on AWS service limits and usage via boto3, optionally ' \
'warn about any services with usage nearing or exceeding their' \
' limits. For further help, see ' \
'<http://awslimitchecker.readthedocs.org/>'
# ###### IMPORTANT license notice ##########
# Pursuant to Sections 5(b) and 13 of the GNU Affero General Public
# License, version 3, this notice MUST NOT be removed, and MUST be
# displayed to ALL USERS of this software, even if they interact with
# it remotely over a network.
#
# See the "Development" section of the awslimitchecker documentation
# (docs/source/development.rst or
# <http://awslimitchecker.readthedocs.org/en/latest/development.html> )
# for further information.
# ###### IMPORTANT license notice ##########
epilog = 'awslimitchecker is AGPLv3-licensed Free Software. Anyone ' \
'using this program, even remotely over a network, is ' \
'entitled to a copy of the source code. Use `--version` for ' \
'information on the source code location.'
p = argparse.ArgumentParser(description=desc, epilog=epilog)
p.add_argument('-S', '--service', action='store', nargs='*',
help='perform action for only the specified service name'
'; see -s|--list-services for valid names')
p.add_argument('--skip-service', action='append', default=[],
dest='skip_service',
help='avoid performing actions for the specified service'
' name; see -s|--list-services for valid names')
p.add_argument('--skip-check', action='append', default=[],
dest='skip_check',
help='avoid performing actions for the specified check'
' name')
p.add_argument('-s', '--list-services', action='store_true',
default=False,
help='print a list of all AWS service types that '
'awslimitchecker knows how to check')
p.add_argument('-l', '--list-limits', action='store_true',
default=False,
help='print all AWS effective limits in "service_name/'
'limit_name" format')
p.add_argument('--list-defaults', action='store_true', default=False,
help='print all AWS default limits in "service_name/'
'limit_name" format')
p.add_argument('-L', '--limit', action=StoreKeyValuePair,
help='override a single AWS limit, specified in '
'"service_name/limit_name=value" format; can be '
'specified multiple times.')
p.add_argument('-u', '--show-usage', action='store_true',
default=False,
help='find and print the current usage of all AWS '
'services with known limits')
p.add_argument('--iam-policy', action='store_true',
default=False,
help='output a JSON serialized IAM Policy '
'listing the required permissions for '
'awslimitchecker to run correctly.')
p.add_argument('-W', '--warning-threshold', action='store',
type=int, default=80,
help='default warning threshold (percentage of '
'limit); default: 80')
p.add_argument('-C', '--critical-threshold', action='store',
type=int, default=99,
help='default critical threshold (percentage of '
'limit); default: 99')
p.add_argument('-P', '--profile', action='store', dest='profile_name',
type=str, default=None,
help='Name of profile in the AWS cross-sdk credentials '
'file to use credentials from; similar to the '
'corresponding awscli option')
p.add_argument('-A', '--sts-account-id', action='store',
type=str, default=None,
help='for use with STS, the Account ID of the '
'destination account (account to assume a role in)')
p.add_argument('-R', '--sts-account-role', action='store',
type=str, default=None,
help='for use with STS, the name of the IAM role to '
'assume')
p.add_argument('-E', '--external-id', action='store', type=str,
default=None, help='External ID to use when assuming '
'a role via STS')
p.add_argument('-M', '--mfa-serial-number', action='store', type=str,
default=None, help='MFA Serial Number to use when '
'assuming a role via STS')
p.add_argument('-T', '--mfa-token', action='store', type=str,
default=None, help='MFA Token to use when assuming '
'a role via STS')
p.add_argument('-r', '--region', action='store',
type=str, default=None,
help='AWS region name to connect to; required for STS')
p.add_argument('--skip-ta', action='store_true', default=False,
help='do not attempt to pull *any* information on limits'
' from Trusted Advisor')
g = p.add_mutually_exclusive_group()
g.add_argument('--ta-refresh-wait', dest='ta_refresh_wait',
action='store_true', default=False,
help='If applicable, refresh all Trusted Advisor '
'limit-related checks, and wait for the refresh to'
' complete before continuing.')
g.add_argument('--ta-refresh-trigger', dest='ta_refresh_trigger',
action='store_true', default=False,
help='If applicable, trigger refreshes for all Trusted '
'Advisor limit-related checks, but do not wait for '
'them to finish refreshing; trigger the refresh '
'and continue on (useful to ensure checks are '
'refreshed before the next scheduled run).')
g.add_argument('--ta-refresh-older', dest='ta_refresh_older',
action='store', type=int, default=None,
help='If applicable, trigger refreshes for all Trusted '
'Advisor limit-related checks with results more '
'than this number of seconds old. Wait for the '
'refresh to complete before continuing.')
p.add_argument('--ta-refresh-timeout', dest='ta_refresh_timeout',
type=int, action='store', default=None,
help='If waiting for TA checks to refresh, wait up to '
'this number of seconds before continuing on '
'anyway.')
p.add_argument('--no-color', action='store_true', default=False,
help='do not colorize output')
p.add_argument('--no-check-version', action='store_false', default=True,
dest='check_version',
help='do not check latest version at startup')
p.add_argument('-v', '--verbose', dest='verbose', action='count',
default=0,
help='verbose output. specify twice for debug-level '
'output.')
p.add_argument('-V', '--version', dest='version', action='store_true',
default=False,
help='print version number and exit.')
args = p.parse_args(argv)
args.ta_refresh_mode = None
if args.ta_refresh_wait:
args.ta_refresh_mode = 'wait'
elif args.ta_refresh_trigger:
args.ta_refresh_mode = 'trigger'
elif args.ta_refresh_older is not None:
args.ta_refresh_mode = args.ta_refresh_older
return args | parse arguments/options
:param argv: argument list to parse, usually ``sys.argv[1:]``
:type argv: list
:returns: parsed arguments
:rtype: :py:class:`argparse.Namespace` | Below is the the instruction that describes the task:
### Input:
parse arguments/options
:param argv: argument list to parse, usually ``sys.argv[1:]``
:type argv: list
:returns: parsed arguments
:rtype: :py:class:`argparse.Namespace`
### Response:
def parse_args(self, argv):
"""
parse arguments/options
:param argv: argument list to parse, usually ``sys.argv[1:]``
:type argv: list
:returns: parsed arguments
:rtype: :py:class:`argparse.Namespace`
"""
desc = 'Report on AWS service limits and usage via boto3, optionally ' \
'warn about any services with usage nearing or exceeding their' \
' limits. For further help, see ' \
'<http://awslimitchecker.readthedocs.org/>'
# ###### IMPORTANT license notice ##########
# Pursuant to Sections 5(b) and 13 of the GNU Affero General Public
# License, version 3, this notice MUST NOT be removed, and MUST be
# displayed to ALL USERS of this software, even if they interact with
# it remotely over a network.
#
# See the "Development" section of the awslimitchecker documentation
# (docs/source/development.rst or
# <http://awslimitchecker.readthedocs.org/en/latest/development.html> )
# for further information.
# ###### IMPORTANT license notice ##########
epilog = 'awslimitchecker is AGPLv3-licensed Free Software. Anyone ' \
'using this program, even remotely over a network, is ' \
'entitled to a copy of the source code. Use `--version` for ' \
'information on the source code location.'
p = argparse.ArgumentParser(description=desc, epilog=epilog)
p.add_argument('-S', '--service', action='store', nargs='*',
help='perform action for only the specified service name'
'; see -s|--list-services for valid names')
p.add_argument('--skip-service', action='append', default=[],
dest='skip_service',
help='avoid performing actions for the specified service'
' name; see -s|--list-services for valid names')
p.add_argument('--skip-check', action='append', default=[],
dest='skip_check',
help='avoid performing actions for the specified check'
' name')
p.add_argument('-s', '--list-services', action='store_true',
default=False,
help='print a list of all AWS service types that '
'awslimitchecker knows how to check')
p.add_argument('-l', '--list-limits', action='store_true',
default=False,
help='print all AWS effective limits in "service_name/'
'limit_name" format')
p.add_argument('--list-defaults', action='store_true', default=False,
help='print all AWS default limits in "service_name/'
'limit_name" format')
p.add_argument('-L', '--limit', action=StoreKeyValuePair,
help='override a single AWS limit, specified in '
'"service_name/limit_name=value" format; can be '
'specified multiple times.')
p.add_argument('-u', '--show-usage', action='store_true',
default=False,
help='find and print the current usage of all AWS '
'services with known limits')
p.add_argument('--iam-policy', action='store_true',
default=False,
help='output a JSON serialized IAM Policy '
'listing the required permissions for '
'awslimitchecker to run correctly.')
p.add_argument('-W', '--warning-threshold', action='store',
type=int, default=80,
help='default warning threshold (percentage of '
'limit); default: 80')
p.add_argument('-C', '--critical-threshold', action='store',
type=int, default=99,
help='default critical threshold (percentage of '
'limit); default: 99')
p.add_argument('-P', '--profile', action='store', dest='profile_name',
type=str, default=None,
help='Name of profile in the AWS cross-sdk credentials '
'file to use credentials from; similar to the '
'corresponding awscli option')
p.add_argument('-A', '--sts-account-id', action='store',
type=str, default=None,
help='for use with STS, the Account ID of the '
'destination account (account to assume a role in)')
p.add_argument('-R', '--sts-account-role', action='store',
type=str, default=None,
help='for use with STS, the name of the IAM role to '
'assume')
p.add_argument('-E', '--external-id', action='store', type=str,
default=None, help='External ID to use when assuming '
'a role via STS')
p.add_argument('-M', '--mfa-serial-number', action='store', type=str,
default=None, help='MFA Serial Number to use when '
'assuming a role via STS')
p.add_argument('-T', '--mfa-token', action='store', type=str,
default=None, help='MFA Token to use when assuming '
'a role via STS')
p.add_argument('-r', '--region', action='store',
type=str, default=None,
help='AWS region name to connect to; required for STS')
p.add_argument('--skip-ta', action='store_true', default=False,
help='do not attempt to pull *any* information on limits'
' from Trusted Advisor')
g = p.add_mutually_exclusive_group()
g.add_argument('--ta-refresh-wait', dest='ta_refresh_wait',
action='store_true', default=False,
help='If applicable, refresh all Trusted Advisor '
'limit-related checks, and wait for the refresh to'
' complete before continuing.')
g.add_argument('--ta-refresh-trigger', dest='ta_refresh_trigger',
action='store_true', default=False,
help='If applicable, trigger refreshes for all Trusted '
'Advisor limit-related checks, but do not wait for '
'them to finish refreshing; trigger the refresh '
'and continue on (useful to ensure checks are '
'refreshed before the next scheduled run).')
g.add_argument('--ta-refresh-older', dest='ta_refresh_older',
action='store', type=int, default=None,
help='If applicable, trigger refreshes for all Trusted '
'Advisor limit-related checks with results more '
'than this number of seconds old. Wait for the '
'refresh to complete before continuing.')
p.add_argument('--ta-refresh-timeout', dest='ta_refresh_timeout',
type=int, action='store', default=None,
help='If waiting for TA checks to refresh, wait up to '
'this number of seconds before continuing on '
'anyway.')
p.add_argument('--no-color', action='store_true', default=False,
help='do not colorize output')
p.add_argument('--no-check-version', action='store_false', default=True,
dest='check_version',
help='do not check latest version at startup')
p.add_argument('-v', '--verbose', dest='verbose', action='count',
default=0,
help='verbose output. specify twice for debug-level '
'output.')
p.add_argument('-V', '--version', dest='version', action='store_true',
default=False,
help='print version number and exit.')
args = p.parse_args(argv)
args.ta_refresh_mode = None
if args.ta_refresh_wait:
args.ta_refresh_mode = 'wait'
elif args.ta_refresh_trigger:
args.ta_refresh_mode = 'trigger'
elif args.ta_refresh_older is not None:
args.ta_refresh_mode = args.ta_refresh_older
return args |
def add_directory(self, path, ignore=None):
"""Add ``*.py`` files under the directory ``path`` to the archive.
"""
for root, dirs, files in os.walk(path):
arc_prefix = os.path.relpath(root, os.path.dirname(path))
# py3 remove pyc cache dirs.
if '__pycache__' in dirs:
dirs.remove('__pycache__')
for f in files:
dest_path = os.path.join(arc_prefix, f)
# ignore specific files
if ignore and ignore(dest_path):
continue
if f.endswith('.pyc') or f.endswith('.c'):
continue
f_path = os.path.join(root, f)
self.add_file(f_path, dest_path) | Add ``*.py`` files under the directory ``path`` to the archive. | Below is the the instruction that describes the task:
### Input:
Add ``*.py`` files under the directory ``path`` to the archive.
### Response:
def add_directory(self, path, ignore=None):
"""Add ``*.py`` files under the directory ``path`` to the archive.
"""
for root, dirs, files in os.walk(path):
arc_prefix = os.path.relpath(root, os.path.dirname(path))
# py3 remove pyc cache dirs.
if '__pycache__' in dirs:
dirs.remove('__pycache__')
for f in files:
dest_path = os.path.join(arc_prefix, f)
# ignore specific files
if ignore and ignore(dest_path):
continue
if f.endswith('.pyc') or f.endswith('.c'):
continue
f_path = os.path.join(root, f)
self.add_file(f_path, dest_path) |
def insert_record(self,
table: str,
fields: Sequence[str],
values: Sequence[Any],
update_on_duplicate_key: bool = False) -> int:
"""Inserts a record into database, table "table", using the list of
fieldnames and the list of values. Returns the new PK (or None)."""
self.ensure_db_open()
if len(fields) != len(values):
raise AssertionError("Field/value mismatch")
if update_on_duplicate_key:
sql = get_sql_insert_or_update(table, fields, self.get_delims())
else:
sql = get_sql_insert(table, fields, self.get_delims())
sql = self.localize_sql(sql)
log.debug("About to insert_record with SQL template: " + sql)
try:
cursor = self.db.cursor()
debug_sql(sql, values)
cursor.execute(sql, values)
# ... binds the placeholders (?, %s) to values in the process
new_pk = get_pk_of_last_insert(cursor)
log.debug("Record inserted.")
return new_pk
except: # nopep8
log.exception("insert_record: Failed to insert record.")
raise | Inserts a record into database, table "table", using the list of
fieldnames and the list of values. Returns the new PK (or None). | Below is the the instruction that describes the task:
### Input:
Inserts a record into database, table "table", using the list of
fieldnames and the list of values. Returns the new PK (or None).
### Response:
def insert_record(self,
table: str,
fields: Sequence[str],
values: Sequence[Any],
update_on_duplicate_key: bool = False) -> int:
"""Inserts a record into database, table "table", using the list of
fieldnames and the list of values. Returns the new PK (or None)."""
self.ensure_db_open()
if len(fields) != len(values):
raise AssertionError("Field/value mismatch")
if update_on_duplicate_key:
sql = get_sql_insert_or_update(table, fields, self.get_delims())
else:
sql = get_sql_insert(table, fields, self.get_delims())
sql = self.localize_sql(sql)
log.debug("About to insert_record with SQL template: " + sql)
try:
cursor = self.db.cursor()
debug_sql(sql, values)
cursor.execute(sql, values)
# ... binds the placeholders (?, %s) to values in the process
new_pk = get_pk_of_last_insert(cursor)
log.debug("Record inserted.")
return new_pk
except: # nopep8
log.exception("insert_record: Failed to insert record.")
raise |
def prog(self):
"""Program name."""
if not self._prog:
self._prog = self._parser.prog
return self._prog | Program name. | Below is the the instruction that describes the task:
### Input:
Program name.
### Response:
def prog(self):
"""Program name."""
if not self._prog:
self._prog = self._parser.prog
return self._prog |
def from_commandline(cmdline, classname=None):
"""
Creates an OptionHandler based on the provided commandline string.
:param cmdline: the commandline string to use
:type cmdline: str
:param classname: the classname of the wrapper to return other than OptionHandler (in dot-notation)
:type classname: str
:return: the generated option handler instance
:rtype: object
"""
params = split_options(cmdline)
cls = params[0]
params = params[1:]
handler = OptionHandler(javabridge.static_call(
"Lweka/core/Utils;", "forName",
"(Ljava/lang/Class;Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/Object;",
javabridge.class_for_name("java.lang.Object"), cls, params))
if classname is None:
return handler
else:
c = get_class(classname)
return c(jobject=handler.jobject) | Creates an OptionHandler based on the provided commandline string.
:param cmdline: the commandline string to use
:type cmdline: str
:param classname: the classname of the wrapper to return other than OptionHandler (in dot-notation)
:type classname: str
:return: the generated option handler instance
:rtype: object | Below is the the instruction that describes the task:
### Input:
Creates an OptionHandler based on the provided commandline string.
:param cmdline: the commandline string to use
:type cmdline: str
:param classname: the classname of the wrapper to return other than OptionHandler (in dot-notation)
:type classname: str
:return: the generated option handler instance
:rtype: object
### Response:
def from_commandline(cmdline, classname=None):
"""
Creates an OptionHandler based on the provided commandline string.
:param cmdline: the commandline string to use
:type cmdline: str
:param classname: the classname of the wrapper to return other than OptionHandler (in dot-notation)
:type classname: str
:return: the generated option handler instance
:rtype: object
"""
params = split_options(cmdline)
cls = params[0]
params = params[1:]
handler = OptionHandler(javabridge.static_call(
"Lweka/core/Utils;", "forName",
"(Ljava/lang/Class;Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/Object;",
javabridge.class_for_name("java.lang.Object"), cls, params))
if classname is None:
return handler
else:
c = get_class(classname)
return c(jobject=handler.jobject) |
def auto_unsubscribe(self, sid, limit=1):
"""
Sends an UNSUB command to the server. Unsubscribe is one of the basic building
blocks in order to be able to define request/response semantics via pub/sub
by announcing the server limited interest a priori.
"""
if self.is_draining:
raise ErrConnectionDraining
yield self._unsubscribe(sid, limit) | Sends an UNSUB command to the server. Unsubscribe is one of the basic building
blocks in order to be able to define request/response semantics via pub/sub
by announcing the server limited interest a priori. | Below is the the instruction that describes the task:
### Input:
Sends an UNSUB command to the server. Unsubscribe is one of the basic building
blocks in order to be able to define request/response semantics via pub/sub
by announcing the server limited interest a priori.
### Response:
def auto_unsubscribe(self, sid, limit=1):
"""
Sends an UNSUB command to the server. Unsubscribe is one of the basic building
blocks in order to be able to define request/response semantics via pub/sub
by announcing the server limited interest a priori.
"""
if self.is_draining:
raise ErrConnectionDraining
yield self._unsubscribe(sid, limit) |
def generate_random_string(
length, using_digits=False, using_ascii_letters=False, using_punctuation=False
):
"""
Example:
opting out for 50 symbol-long, [a-z][A-Z][0-9] string
would yield log_2((26+26+50)^50) ~= 334 bit strength.
"""
if not using_sysrandom:
return None
symbols = []
if using_digits:
symbols += string.digits
if using_ascii_letters:
symbols += string.ascii_letters
if using_punctuation:
all_punctuation = set(string.punctuation)
# These symbols can cause issues in environment variables
unsuitable = {"'", '"', "\\", "$"}
suitable = all_punctuation.difference(unsuitable)
symbols += "".join(suitable)
return "".join([random.choice(symbols) for _ in range(length)]) | Example:
opting out for 50 symbol-long, [a-z][A-Z][0-9] string
would yield log_2((26+26+50)^50) ~= 334 bit strength. | Below is the the instruction that describes the task:
### Input:
Example:
opting out for 50 symbol-long, [a-z][A-Z][0-9] string
would yield log_2((26+26+50)^50) ~= 334 bit strength.
### Response:
def generate_random_string(
length, using_digits=False, using_ascii_letters=False, using_punctuation=False
):
"""
Example:
opting out for 50 symbol-long, [a-z][A-Z][0-9] string
would yield log_2((26+26+50)^50) ~= 334 bit strength.
"""
if not using_sysrandom:
return None
symbols = []
if using_digits:
symbols += string.digits
if using_ascii_letters:
symbols += string.ascii_letters
if using_punctuation:
all_punctuation = set(string.punctuation)
# These symbols can cause issues in environment variables
unsuitable = {"'", '"', "\\", "$"}
suitable = all_punctuation.difference(unsuitable)
symbols += "".join(suitable)
return "".join([random.choice(symbols) for _ in range(length)]) |
def url(self):
"""
:return:
None or a unicode string of the distribution point's URL
"""
if self._url is False:
self._url = None
name = self['distribution_point']
if name.name != 'full_name':
raise ValueError(unwrap(
'''
CRL distribution points that are relative to the issuer are
not supported
'''
))
for general_name in name.chosen:
if general_name.name == 'uniform_resource_identifier':
url = general_name.native
if url.lower().startswith(('http://', 'https://', 'ldap://', 'ldaps://')):
self._url = url
break
return self._url | :return:
None or a unicode string of the distribution point's URL | Below is the the instruction that describes the task:
### Input:
:return:
None or a unicode string of the distribution point's URL
### Response:
def url(self):
"""
:return:
None or a unicode string of the distribution point's URL
"""
if self._url is False:
self._url = None
name = self['distribution_point']
if name.name != 'full_name':
raise ValueError(unwrap(
'''
CRL distribution points that are relative to the issuer are
not supported
'''
))
for general_name in name.chosen:
if general_name.name == 'uniform_resource_identifier':
url = general_name.native
if url.lower().startswith(('http://', 'https://', 'ldap://', 'ldaps://')):
self._url = url
break
return self._url |
def in_fill(self, x, y):
"""Tests whether the given point is inside the area
that would be affected by a :meth:`fill` operation
given the current path and filling parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
"""
return bool(cairo.cairo_in_fill(self._pointer, x, y)) | Tests whether the given point is inside the area
that would be affected by a :meth:`fill` operation
given the current path and filling parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean. | Below is the the instruction that describes the task:
### Input:
Tests whether the given point is inside the area
that would be affected by a :meth:`fill` operation
given the current path and filling parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
### Response:
def in_fill(self, x, y):
"""Tests whether the given point is inside the area
that would be affected by a :meth:`fill` operation
given the current path and filling parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
"""
return bool(cairo.cairo_in_fill(self._pointer, x, y)) |
def create_input_peptides_files(
peptides,
max_peptides_per_file=None,
group_by_length=False):
"""
Creates one or more files containing one peptide per line,
returns names of files.
"""
if group_by_length:
peptide_lengths = {len(p) for p in peptides}
peptide_groups = {l: [] for l in peptide_lengths}
for p in peptides:
peptide_groups[len(p)].append(p)
else:
peptide_groups = {"": peptides}
file_names = []
for key, group in peptide_groups.items():
n_peptides = len(group)
if not max_peptides_per_file:
max_peptides_per_file = n_peptides
input_file = None
for i, p in enumerate(group):
if i % max_peptides_per_file == 0:
if input_file is not None:
file_names.append(input_file.name)
input_file.close()
input_file = make_writable_tempfile(
prefix_number=i // max_peptides_per_file,
prefix_name=key,
suffix=".txt")
input_file.write("%s\n" % p)
if input_file is not None:
file_names.append(input_file.name)
input_file.close()
return file_names | Creates one or more files containing one peptide per line,
returns names of files. | Below is the the instruction that describes the task:
### Input:
Creates one or more files containing one peptide per line,
returns names of files.
### Response:
def create_input_peptides_files(
peptides,
max_peptides_per_file=None,
group_by_length=False):
"""
Creates one or more files containing one peptide per line,
returns names of files.
"""
if group_by_length:
peptide_lengths = {len(p) for p in peptides}
peptide_groups = {l: [] for l in peptide_lengths}
for p in peptides:
peptide_groups[len(p)].append(p)
else:
peptide_groups = {"": peptides}
file_names = []
for key, group in peptide_groups.items():
n_peptides = len(group)
if not max_peptides_per_file:
max_peptides_per_file = n_peptides
input_file = None
for i, p in enumerate(group):
if i % max_peptides_per_file == 0:
if input_file is not None:
file_names.append(input_file.name)
input_file.close()
input_file = make_writable_tempfile(
prefix_number=i // max_peptides_per_file,
prefix_name=key,
suffix=".txt")
input_file.write("%s\n" % p)
if input_file is not None:
file_names.append(input_file.name)
input_file.close()
return file_names |
def syslog_configured(name,
syslog_configs,
firewall=True,
reset_service=True,
reset_syslog_config=False,
reset_configs=None):
'''
Ensures the specified syslog configuration parameters. By default,
this state will reset the syslog service after any new or changed
parameters are set successfully.
name
Name of the state.
syslog_configs
Name of parameter to set (corresponds to the command line switch for
esxcli without the double dashes (--))
Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``,
``default-rotate``, ``default-size``, and ``default-timeout``.
Each syslog_config option also needs a configuration value to set.
For example, ``loghost`` requires URLs or IP addresses to use for
logging. Multiple log servers can be specified by listing them,
comma-separated, but without spaces before or after commas
(reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html)
firewall
Enable the firewall rule set for syslog. Defaults to ``True``.
reset_service
After a successful parameter set, reset the service. Defaults to ``True``.
reset_syslog_config
Resets the syslog service to it's default settings. Defaults to ``False``.
If set to ``True``, default settings defined by the list of syslog configs
in ``reset_configs`` will be reset before running any other syslog settings.
reset_configs
A comma-delimited list of parameters to reset. Only runs if
``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set
to ``True``, but no syslog configs are listed in ``reset_configs``, then
``reset_configs`` will be set to ``all`` by default.
See ``syslog_configs`` parameter above for a list of valid options.
Example:
.. code-block:: yaml
configure-host-syslog:
esxi.syslog_configured:
- syslog_configs:
loghost: ssl://localhost:5432,tcp://10.1.0.1:1514
default-timeout: 120
- firewall: True
- reset_service: True
- reset_syslog_config: True
- reset_configs: loghost,default-timeout
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
esxi_cmd = 'esxi.cmd'
host = __pillar__['proxy']['host']
if reset_syslog_config:
if not reset_configs:
reset_configs = 'all'
# Only run the command if not using test=True
if not __opts__['test']:
reset = __salt__[esxi_cmd]('reset_syslog_config',
syslog_config=reset_configs).get(host)
for key, val in six.iteritems(reset):
if isinstance(val, bool):
continue
if not val.get('success'):
msg = val.get('message')
if not msg:
msg = 'There was an error resetting a syslog config \'{0}\'.' \
'Please check debug logs.'.format(val)
ret['comment'] = 'Error: {0}'.format(msg)
return ret
ret['changes'].update({'reset_syslog_config':
{'old': '',
'new': reset_configs}})
current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host)
error = current_firewall.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
current_firewall = current_firewall.get('rulesets').get('syslog')
if current_firewall != firewall:
# Only run the command if not using test=True
if not __opts__['test']:
enabled = __salt__[esxi_cmd]('enable_firewall_ruleset',
ruleset_enable=firewall,
ruleset_name='syslog').get(host)
if enabled.get('retcode') != 0:
err = enabled.get('stderr')
out = enabled.get('stdout')
ret['comment'] = 'Error: {0}'.format(err if err else out)
return ret
ret['changes'].update({'firewall':
{'old': current_firewall,
'new': firewall}})
current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host)
for key, val in six.iteritems(syslog_configs):
# The output of get_syslog_config has different keys than the keys
# Used to set syslog_config values. We need to look them up first.
try:
lookup_key = _lookup_syslog_config(key)
except KeyError:
ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key)
return ret
current_val = current_syslog_config[lookup_key]
if six.text_type(current_val) != six.text_type(val):
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('set_syslog_config',
syslog_config=key,
config_value=val,
firewall=firewall,
reset_service=reset_service).get(host)
success = response.get(key).get('success')
if not success:
msg = response.get(key).get('message')
if not msg:
msg = 'There was an error setting syslog config \'{0}\'. ' \
'Please check debug logs.'.format(key)
ret['comment'] = msg
return ret
if not ret['changes'].get('syslog_config'):
ret['changes'].update({'syslog_config': {}})
ret['changes']['syslog_config'].update({key:
{'old': current_val,
'new': val}})
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = 'Syslog is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Syslog state will change.'
return ret | Ensures the specified syslog configuration parameters. By default,
this state will reset the syslog service after any new or changed
parameters are set successfully.
name
Name of the state.
syslog_configs
Name of parameter to set (corresponds to the command line switch for
esxcli without the double dashes (--))
Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``,
``default-rotate``, ``default-size``, and ``default-timeout``.
Each syslog_config option also needs a configuration value to set.
For example, ``loghost`` requires URLs or IP addresses to use for
logging. Multiple log servers can be specified by listing them,
comma-separated, but without spaces before or after commas
(reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html)
firewall
Enable the firewall rule set for syslog. Defaults to ``True``.
reset_service
After a successful parameter set, reset the service. Defaults to ``True``.
reset_syslog_config
Resets the syslog service to it's default settings. Defaults to ``False``.
If set to ``True``, default settings defined by the list of syslog configs
in ``reset_configs`` will be reset before running any other syslog settings.
reset_configs
A comma-delimited list of parameters to reset. Only runs if
``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set
to ``True``, but no syslog configs are listed in ``reset_configs``, then
``reset_configs`` will be set to ``all`` by default.
See ``syslog_configs`` parameter above for a list of valid options.
Example:
.. code-block:: yaml
configure-host-syslog:
esxi.syslog_configured:
- syslog_configs:
loghost: ssl://localhost:5432,tcp://10.1.0.1:1514
default-timeout: 120
- firewall: True
- reset_service: True
- reset_syslog_config: True
- reset_configs: loghost,default-timeout | Below is the the instruction that describes the task:
### Input:
Ensures the specified syslog configuration parameters. By default,
this state will reset the syslog service after any new or changed
parameters are set successfully.
name
Name of the state.
syslog_configs
Name of parameter to set (corresponds to the command line switch for
esxcli without the double dashes (--))
Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``,
``default-rotate``, ``default-size``, and ``default-timeout``.
Each syslog_config option also needs a configuration value to set.
For example, ``loghost`` requires URLs or IP addresses to use for
logging. Multiple log servers can be specified by listing them,
comma-separated, but without spaces before or after commas
(reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html)
firewall
Enable the firewall rule set for syslog. Defaults to ``True``.
reset_service
After a successful parameter set, reset the service. Defaults to ``True``.
reset_syslog_config
Resets the syslog service to it's default settings. Defaults to ``False``.
If set to ``True``, default settings defined by the list of syslog configs
in ``reset_configs`` will be reset before running any other syslog settings.
reset_configs
A comma-delimited list of parameters to reset. Only runs if
``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set
to ``True``, but no syslog configs are listed in ``reset_configs``, then
``reset_configs`` will be set to ``all`` by default.
See ``syslog_configs`` parameter above for a list of valid options.
Example:
.. code-block:: yaml
configure-host-syslog:
esxi.syslog_configured:
- syslog_configs:
loghost: ssl://localhost:5432,tcp://10.1.0.1:1514
default-timeout: 120
- firewall: True
- reset_service: True
- reset_syslog_config: True
- reset_configs: loghost,default-timeout
### Response:
def syslog_configured(name,
syslog_configs,
firewall=True,
reset_service=True,
reset_syslog_config=False,
reset_configs=None):
'''
Ensures the specified syslog configuration parameters. By default,
this state will reset the syslog service after any new or changed
parameters are set successfully.
name
Name of the state.
syslog_configs
Name of parameter to set (corresponds to the command line switch for
esxcli without the double dashes (--))
Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``,
``default-rotate``, ``default-size``, and ``default-timeout``.
Each syslog_config option also needs a configuration value to set.
For example, ``loghost`` requires URLs or IP addresses to use for
logging. Multiple log servers can be specified by listing them,
comma-separated, but without spaces before or after commas
(reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html)
firewall
Enable the firewall rule set for syslog. Defaults to ``True``.
reset_service
After a successful parameter set, reset the service. Defaults to ``True``.
reset_syslog_config
Resets the syslog service to it's default settings. Defaults to ``False``.
If set to ``True``, default settings defined by the list of syslog configs
in ``reset_configs`` will be reset before running any other syslog settings.
reset_configs
A comma-delimited list of parameters to reset. Only runs if
``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set
to ``True``, but no syslog configs are listed in ``reset_configs``, then
``reset_configs`` will be set to ``all`` by default.
See ``syslog_configs`` parameter above for a list of valid options.
Example:
.. code-block:: yaml
configure-host-syslog:
esxi.syslog_configured:
- syslog_configs:
loghost: ssl://localhost:5432,tcp://10.1.0.1:1514
default-timeout: 120
- firewall: True
- reset_service: True
- reset_syslog_config: True
- reset_configs: loghost,default-timeout
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
esxi_cmd = 'esxi.cmd'
host = __pillar__['proxy']['host']
if reset_syslog_config:
if not reset_configs:
reset_configs = 'all'
# Only run the command if not using test=True
if not __opts__['test']:
reset = __salt__[esxi_cmd]('reset_syslog_config',
syslog_config=reset_configs).get(host)
for key, val in six.iteritems(reset):
if isinstance(val, bool):
continue
if not val.get('success'):
msg = val.get('message')
if not msg:
msg = 'There was an error resetting a syslog config \'{0}\'.' \
'Please check debug logs.'.format(val)
ret['comment'] = 'Error: {0}'.format(msg)
return ret
ret['changes'].update({'reset_syslog_config':
{'old': '',
'new': reset_configs}})
current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host)
error = current_firewall.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
current_firewall = current_firewall.get('rulesets').get('syslog')
if current_firewall != firewall:
# Only run the command if not using test=True
if not __opts__['test']:
enabled = __salt__[esxi_cmd]('enable_firewall_ruleset',
ruleset_enable=firewall,
ruleset_name='syslog').get(host)
if enabled.get('retcode') != 0:
err = enabled.get('stderr')
out = enabled.get('stdout')
ret['comment'] = 'Error: {0}'.format(err if err else out)
return ret
ret['changes'].update({'firewall':
{'old': current_firewall,
'new': firewall}})
current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host)
for key, val in six.iteritems(syslog_configs):
# The output of get_syslog_config has different keys than the keys
# Used to set syslog_config values. We need to look them up first.
try:
lookup_key = _lookup_syslog_config(key)
except KeyError:
ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key)
return ret
current_val = current_syslog_config[lookup_key]
if six.text_type(current_val) != six.text_type(val):
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('set_syslog_config',
syslog_config=key,
config_value=val,
firewall=firewall,
reset_service=reset_service).get(host)
success = response.get(key).get('success')
if not success:
msg = response.get(key).get('message')
if not msg:
msg = 'There was an error setting syslog config \'{0}\'. ' \
'Please check debug logs.'.format(key)
ret['comment'] = msg
return ret
if not ret['changes'].get('syslog_config'):
ret['changes'].update({'syslog_config': {}})
ret['changes']['syslog_config'].update({key:
{'old': current_val,
'new': val}})
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = 'Syslog is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Syslog state will change.'
return ret |
def __get_doi(pub):
"""
Get DOI from this ONE publication entry.
:param dict pub: Single publication entry
:return:
"""
doi = ""
# Doi location: d["pub"][idx]["identifier"][0]["id"]
try:
doi = pub["DOI"][0]["id"]
doi = clean_doi(doi)
except KeyError:
logger_lpd_noaa.info("get_dois: KeyError: missing a doi key")
except Exception:
logger_lpd_noaa.info("get_dois: Exception: something went wrong")
# if we received a doi that's a list, we want to concat into a single string
if isinstance(doi, list):
if len(doi) == 1:
doi = doi[0]
else:
", ".join(doi)
return doi | Get DOI from this ONE publication entry.
:param dict pub: Single publication entry
:return: | Below is the the instruction that describes the task:
### Input:
Get DOI from this ONE publication entry.
:param dict pub: Single publication entry
:return:
### Response:
def __get_doi(pub):
"""
Get DOI from this ONE publication entry.
:param dict pub: Single publication entry
:return:
"""
doi = ""
# Doi location: d["pub"][idx]["identifier"][0]["id"]
try:
doi = pub["DOI"][0]["id"]
doi = clean_doi(doi)
except KeyError:
logger_lpd_noaa.info("get_dois: KeyError: missing a doi key")
except Exception:
logger_lpd_noaa.info("get_dois: Exception: something went wrong")
# if we received a doi that's a list, we want to concat into a single string
if isinstance(doi, list):
if len(doi) == 1:
doi = doi[0]
else:
", ".join(doi)
return doi |
def georadiusbymember(self, key, member, radius, unit='m', *,
with_dist=False, with_hash=False, with_coord=False,
count=None, sort=None, encoding=_NOTSET):
"""Query a sorted set representing a geospatial index to fetch members
matching a given maximum distance from a member.
Return value follows Redis convention:
* if none of ``WITH*`` flags are set -- list of strings returned:
>>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km')
[b"Palermo", b"Catania"]
* if any flag (or all) is set -- list of named tuples returned:
>>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km',
... with_dist=True)
[GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None),
GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)]
:raises TypeError: radius is not float or int
:raises TypeError: count is not int
:raises ValueError: if unit not equal ``m``, ``km``, ``mi`` or ``ft``
:raises ValueError: if sort not equal ``ASC`` or ``DESC``
:rtype: list[str] or list[GeoMember]
"""
args = validate_georadius_options(
radius, unit, with_dist, with_hash, with_coord, count, sort
)
fut = self.execute(
b'GEORADIUSBYMEMBER', key, member, radius,
unit, *args, encoding=encoding)
if with_dist or with_hash or with_coord:
return wait_convert(fut, make_geomember,
with_dist=with_dist,
with_hash=with_hash,
with_coord=with_coord)
return fut | Query a sorted set representing a geospatial index to fetch members
matching a given maximum distance from a member.
Return value follows Redis convention:
* if none of ``WITH*`` flags are set -- list of strings returned:
>>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km')
[b"Palermo", b"Catania"]
* if any flag (or all) is set -- list of named tuples returned:
>>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km',
... with_dist=True)
[GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None),
GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)]
:raises TypeError: radius is not float or int
:raises TypeError: count is not int
:raises ValueError: if unit not equal ``m``, ``km``, ``mi`` or ``ft``
:raises ValueError: if sort not equal ``ASC`` or ``DESC``
:rtype: list[str] or list[GeoMember] | Below is the the instruction that describes the task:
### Input:
Query a sorted set representing a geospatial index to fetch members
matching a given maximum distance from a member.
Return value follows Redis convention:
* if none of ``WITH*`` flags are set -- list of strings returned:
>>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km')
[b"Palermo", b"Catania"]
* if any flag (or all) is set -- list of named tuples returned:
>>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km',
... with_dist=True)
[GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None),
GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)]
:raises TypeError: radius is not float or int
:raises TypeError: count is not int
:raises ValueError: if unit not equal ``m``, ``km``, ``mi`` or ``ft``
:raises ValueError: if sort not equal ``ASC`` or ``DESC``
:rtype: list[str] or list[GeoMember]
### Response:
def georadiusbymember(self, key, member, radius, unit='m', *,
with_dist=False, with_hash=False, with_coord=False,
count=None, sort=None, encoding=_NOTSET):
"""Query a sorted set representing a geospatial index to fetch members
matching a given maximum distance from a member.
Return value follows Redis convention:
* if none of ``WITH*`` flags are set -- list of strings returned:
>>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km')
[b"Palermo", b"Catania"]
* if any flag (or all) is set -- list of named tuples returned:
>>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km',
... with_dist=True)
[GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None),
GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)]
:raises TypeError: radius is not float or int
:raises TypeError: count is not int
:raises ValueError: if unit not equal ``m``, ``km``, ``mi`` or ``ft``
:raises ValueError: if sort not equal ``ASC`` or ``DESC``
:rtype: list[str] or list[GeoMember]
"""
args = validate_georadius_options(
radius, unit, with_dist, with_hash, with_coord, count, sort
)
fut = self.execute(
b'GEORADIUSBYMEMBER', key, member, radius,
unit, *args, encoding=encoding)
if with_dist or with_hash or with_coord:
return wait_convert(fut, make_geomember,
with_dist=with_dist,
with_hash=with_hash,
with_coord=with_coord)
return fut |
def _ordered_load(stream, Loader=yaml.Loader,
object_pairs_hook=dict):
'''Loads the contents of the YAML stream into :py:class:`collections.OrderedDict`'s
See: https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
'''
class OrderedLoader(Loader): pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader) | Loads the contents of the YAML stream into :py:class:`collections.OrderedDict`'s
See: https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts | Below is the the instruction that describes the task:
### Input:
Loads the contents of the YAML stream into :py:class:`collections.OrderedDict`'s
See: https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
### Response:
def _ordered_load(stream, Loader=yaml.Loader,
object_pairs_hook=dict):
'''Loads the contents of the YAML stream into :py:class:`collections.OrderedDict`'s
See: https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
'''
class OrderedLoader(Loader): pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader) |
def _maintain_parent(self, request, response):
"""
Maintain the parent ID in the querystring for response_add and
response_change.
"""
location = response._headers.get("location")
parent = request.GET.get("parent")
if parent and location and "?" not in location[1]:
url = "%s?parent=%s" % (location[1], parent)
return HttpResponseRedirect(url)
return response | Maintain the parent ID in the querystring for response_add and
response_change. | Below is the the instruction that describes the task:
### Input:
Maintain the parent ID in the querystring for response_add and
response_change.
### Response:
def _maintain_parent(self, request, response):
"""
Maintain the parent ID in the querystring for response_add and
response_change.
"""
location = response._headers.get("location")
parent = request.GET.get("parent")
if parent and location and "?" not in location[1]:
url = "%s?parent=%s" % (location[1], parent)
return HttpResponseRedirect(url)
return response |
def _make_query(self, ID: str, methodname: str, *args: Any, **kwargs: Any):
"""将调用请求的ID,方法名,参数包装为请求数据.
Parameters:
ID (str): - 任务ID
methodname (str): - 要调用的方法名
args (Any): - 要调用的方法的位置参数
kwargs (Any): - 要调用的方法的关键字参数
Return:
(Dict[str, Any]) : - 请求的python字典形式
"""
query = {
"MPRPC": self.VERSION,
"ID": ID,
"METHOD": methodname,
"RETURN": True,
"ARGS": args,
"KWARGS": kwargs
}
print(query)
return query | 将调用请求的ID,方法名,参数包装为请求数据.
Parameters:
ID (str): - 任务ID
methodname (str): - 要调用的方法名
args (Any): - 要调用的方法的位置参数
kwargs (Any): - 要调用的方法的关键字参数
Return:
(Dict[str, Any]) : - 请求的python字典形式 | Below is the the instruction that describes the task:
### Input:
将调用请求的ID,方法名,参数包装为请求数据.
Parameters:
ID (str): - 任务ID
methodname (str): - 要调用的方法名
args (Any): - 要调用的方法的位置参数
kwargs (Any): - 要调用的方法的关键字参数
Return:
(Dict[str, Any]) : - 请求的python字典形式
### Response:
def _make_query(self, ID: str, methodname: str, *args: Any, **kwargs: Any):
"""将调用请求的ID,方法名,参数包装为请求数据.
Parameters:
ID (str): - 任务ID
methodname (str): - 要调用的方法名
args (Any): - 要调用的方法的位置参数
kwargs (Any): - 要调用的方法的关键字参数
Return:
(Dict[str, Any]) : - 请求的python字典形式
"""
query = {
"MPRPC": self.VERSION,
"ID": ID,
"METHOD": methodname,
"RETURN": True,
"ARGS": args,
"KWARGS": kwargs
}
print(query)
return query |
def pause(self, payload):
"""Start the daemon and all processes or only specific processes."""
# Pause specific processes, if `keys` is given in the payload
if payload.get('keys'):
succeeded = []
failed = []
for key in payload.get('keys'):
success = self.process_handler.pause_process(key)
if success:
succeeded.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Paused processes: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo running process for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
# Pause all processes and the daemon
else:
if payload.get('wait'):
self.paused = True
answer = {'message': 'Pausing daemon, but waiting for processes to finish.',
'status': 'success'}
else:
self.process_handler.pause_all()
if not self.paused:
self.paused = True
answer = {'message': 'Daemon and all processes paused.',
'status': 'success'}
else:
answer = {'message': 'Daemon already paused, pausing all processes anyway.',
'status': 'success'}
return answer | Start the daemon and all processes or only specific processes. | Below is the the instruction that describes the task:
### Input:
Start the daemon and all processes or only specific processes.
### Response:
def pause(self, payload):
"""Start the daemon and all processes or only specific processes."""
# Pause specific processes, if `keys` is given in the payload
if payload.get('keys'):
succeeded = []
failed = []
for key in payload.get('keys'):
success = self.process_handler.pause_process(key)
if success:
succeeded.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Paused processes: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo running process for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
# Pause all processes and the daemon
else:
if payload.get('wait'):
self.paused = True
answer = {'message': 'Pausing daemon, but waiting for processes to finish.',
'status': 'success'}
else:
self.process_handler.pause_all()
if not self.paused:
self.paused = True
answer = {'message': 'Daemon and all processes paused.',
'status': 'success'}
else:
answer = {'message': 'Daemon already paused, pausing all processes anyway.',
'status': 'success'}
return answer |
def get_sum_w2(self, ix, iy=0, iz=0):
"""
Obtain the true number of entries in the bin weighted by w^2
"""
if self.GetSumw2N() == 0:
raise RuntimeError(
"Attempting to access Sumw2 in histogram "
"where weights were not stored")
xl = self.nbins(axis=0, overflow=True)
yl = self.nbins(axis=1, overflow=True)
idx = xl * yl * iz + xl * iy + ix
if not 0 <= idx < self.GetSumw2N():
raise IndexError("bin index out of range")
return self.GetSumw2().At(idx) | Obtain the true number of entries in the bin weighted by w^2 | Below is the the instruction that describes the task:
### Input:
Obtain the true number of entries in the bin weighted by w^2
### Response:
def get_sum_w2(self, ix, iy=0, iz=0):
"""
Obtain the true number of entries in the bin weighted by w^2
"""
if self.GetSumw2N() == 0:
raise RuntimeError(
"Attempting to access Sumw2 in histogram "
"where weights were not stored")
xl = self.nbins(axis=0, overflow=True)
yl = self.nbins(axis=1, overflow=True)
idx = xl * yl * iz + xl * iy + ix
if not 0 <= idx < self.GetSumw2N():
raise IndexError("bin index out of range")
return self.GetSumw2().At(idx) |
def human_to_bytes(size):
'''
Given a human-readable byte string (e.g. 2G, 30M),
return the number of bytes. Will return 0 if the argument has
unexpected form.
.. versionadded:: 2018.3.0
'''
sbytes = size[:-1]
unit = size[-1]
if sbytes.isdigit():
sbytes = int(sbytes)
if unit == 'P':
sbytes *= 1125899906842624
elif unit == 'T':
sbytes *= 1099511627776
elif unit == 'G':
sbytes *= 1073741824
elif unit == 'M':
sbytes *= 1048576
else:
sbytes = 0
else:
sbytes = 0
return sbytes | Given a human-readable byte string (e.g. 2G, 30M),
return the number of bytes. Will return 0 if the argument has
unexpected form.
.. versionadded:: 2018.3.0 | Below is the the instruction that describes the task:
### Input:
Given a human-readable byte string (e.g. 2G, 30M),
return the number of bytes. Will return 0 if the argument has
unexpected form.
.. versionadded:: 2018.3.0
### Response:
def human_to_bytes(size):
'''
Given a human-readable byte string (e.g. 2G, 30M),
return the number of bytes. Will return 0 if the argument has
unexpected form.
.. versionadded:: 2018.3.0
'''
sbytes = size[:-1]
unit = size[-1]
if sbytes.isdigit():
sbytes = int(sbytes)
if unit == 'P':
sbytes *= 1125899906842624
elif unit == 'T':
sbytes *= 1099511627776
elif unit == 'G':
sbytes *= 1073741824
elif unit == 'M':
sbytes *= 1048576
else:
sbytes = 0
else:
sbytes = 0
return sbytes |
def pop(queue, quantity=1, is_runner=False):
'''
Pop one or more or all items from the queue return them.
'''
cmd = 'SELECT name FROM {0}'.format(queue)
if quantity != 'all':
try:
quantity = int(quantity)
except ValueError as exc:
error_txt = ('Quantity must be an integer or "all".\n'
'Error: "{0}".'.format(exc))
raise SaltInvocationError(error_txt)
cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)])
log.debug('SQL Query: %s', cmd)
con = _conn(queue)
items = []
with con:
cur = con.cursor()
result = cur.execute(cmd).fetchall()
if result:
items = [item[0] for item in result]
itemlist = '","'.join(items)
_quote_escape(itemlist)
del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format(
queue, itemlist)
log.debug('SQL Query: %s', del_cmd)
cur.execute(del_cmd)
con.commit()
if is_runner:
items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result]
log.info(items)
return items | Pop one or more or all items from the queue return them. | Below is the the instruction that describes the task:
### Input:
Pop one or more or all items from the queue return them.
### Response:
def pop(queue, quantity=1, is_runner=False):
'''
Pop one or more or all items from the queue return them.
'''
cmd = 'SELECT name FROM {0}'.format(queue)
if quantity != 'all':
try:
quantity = int(quantity)
except ValueError as exc:
error_txt = ('Quantity must be an integer or "all".\n'
'Error: "{0}".'.format(exc))
raise SaltInvocationError(error_txt)
cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)])
log.debug('SQL Query: %s', cmd)
con = _conn(queue)
items = []
with con:
cur = con.cursor()
result = cur.execute(cmd).fetchall()
if result:
items = [item[0] for item in result]
itemlist = '","'.join(items)
_quote_escape(itemlist)
del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format(
queue, itemlist)
log.debug('SQL Query: %s', del_cmd)
cur.execute(del_cmd)
con.commit()
if is_runner:
items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result]
log.info(items)
return items |
def validateField(self, field) :
"""Validatie a field"""
if field not in self.validators and not self.collection._validation['allow_foreign_fields'] :
raise SchemaViolation(self.collection.__class__, field)
if field in self.store:
if isinstance(self.store[field], DocumentStore) :
return self[field].validate()
if field in self.patchStore :
return self.validators[field].validate(self.patchStore[field])
else :
try :
return self.validators[field].validate(self.store[field])
except ValidationError as e:
raise ValidationError( "'%s' -> %s" % ( field, str(e)) )
except AttributeError:
if isinstance(self.validators[field], dict) and not isinstance(self.store[field], dict) :
raise ValueError("Validator expected a sub document for field '%s', got '%s' instead" % (field, self.store[field]) )
else :
raise
return True | Validatie a field | Below is the the instruction that describes the task:
### Input:
Validatie a field
### Response:
def validateField(self, field) :
"""Validatie a field"""
if field not in self.validators and not self.collection._validation['allow_foreign_fields'] :
raise SchemaViolation(self.collection.__class__, field)
if field in self.store:
if isinstance(self.store[field], DocumentStore) :
return self[field].validate()
if field in self.patchStore :
return self.validators[field].validate(self.patchStore[field])
else :
try :
return self.validators[field].validate(self.store[field])
except ValidationError as e:
raise ValidationError( "'%s' -> %s" % ( field, str(e)) )
except AttributeError:
if isinstance(self.validators[field], dict) and not isinstance(self.store[field], dict) :
raise ValueError("Validator expected a sub document for field '%s', got '%s' instead" % (field, self.store[field]) )
else :
raise
return True |
def get_condition(self):
"""
Determines the condition to be used in the condition part of the join sql.
:return: The condition for the join clause
:rtype: str or None
"""
if self.condition:
return self.condition
if type(self.right_table) is ModelTable and type(self.right_table) is ModelTable:
# loop through fields to find the field for this model
# check if this join type is for a related field
for field in self.get_all_related_objects(self.right_table):
related_model = field.model
if hasattr(field, 'related_model'):
related_model = field.related_model
if related_model == self.left_table.model:
table_join_field = field.field.column
# self.table_join_name = field.get_accessor_name()
condition = '{0}.{1} = {2}.{3}'.format(
self.right_table.get_identifier(),
self.right_table.model._meta.pk.name,
self.left_table.get_identifier(),
table_join_field,
)
return condition
# check if this join type is for a foreign key
for field in self.right_table.model._meta.fields:
if (
field.get_internal_type() == 'OneToOneField' or
field.get_internal_type() == 'ForeignKey'
):
if field.remote_field.model == self.left_table.model:
table_join_field = field.column
# self.table_join_name = field.name
condition = '{0}.{1} = {2}.{3}'.format(
self.right_table.get_identifier(),
table_join_field,
self.left_table.get_identifier(),
self.left_table.model._meta.pk.name
)
return condition
return None | Determines the condition to be used in the condition part of the join sql.
:return: The condition for the join clause
:rtype: str or None | Below is the the instruction that describes the task:
### Input:
Determines the condition to be used in the condition part of the join sql.
:return: The condition for the join clause
:rtype: str or None
### Response:
def get_condition(self):
"""
Determines the condition to be used in the condition part of the join sql.
:return: The condition for the join clause
:rtype: str or None
"""
if self.condition:
return self.condition
if type(self.right_table) is ModelTable and type(self.right_table) is ModelTable:
# loop through fields to find the field for this model
# check if this join type is for a related field
for field in self.get_all_related_objects(self.right_table):
related_model = field.model
if hasattr(field, 'related_model'):
related_model = field.related_model
if related_model == self.left_table.model:
table_join_field = field.field.column
# self.table_join_name = field.get_accessor_name()
condition = '{0}.{1} = {2}.{3}'.format(
self.right_table.get_identifier(),
self.right_table.model._meta.pk.name,
self.left_table.get_identifier(),
table_join_field,
)
return condition
# check if this join type is for a foreign key
for field in self.right_table.model._meta.fields:
if (
field.get_internal_type() == 'OneToOneField' or
field.get_internal_type() == 'ForeignKey'
):
if field.remote_field.model == self.left_table.model:
table_join_field = field.column
# self.table_join_name = field.name
condition = '{0}.{1} = {2}.{3}'.format(
self.right_table.get_identifier(),
table_join_field,
self.left_table.get_identifier(),
self.left_table.model._meta.pk.name
)
return condition
return None |
def plot_theta(self, colorbar=True, cb_orientation='vertical',
cb_label='$g_\\theta$, m s$^{-2}$', ax=None, show=True,
fname=None, **kwargs):
"""
Plot the theta component of the gravity field.
Usage
-----
x.plot_theta([tick_interval, xlabel, ylabel, ax, colorbar,
cb_orientation, cb_label, show, fname, **kwargs])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
colorbar : bool, optional, default = True
If True, plot a colorbar.
cb_orientation : str, optional, default = 'vertical'
Orientation of the colorbar: either 'vertical' or 'horizontal'.
cb_label : str, optional, default = '$g_\\theta$, m s$^{-2}$'
Text label for the colorbar.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
kwargs : optional
Keyword arguements that will be sent to the SHGrid.plot()
and plt.imshow() methods.
"""
if ax is None:
fig, axes = self.theta.plot(colorbar=colorbar,
cb_orientation=cb_orientation,
cb_label=cb_label, show=False,
**kwargs)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
else:
self.theta.plot(colorbar=colorbar, cb_orientation=cb_orientation,
cb_label=cb_label, ax=ax, **kwargs) | Plot the theta component of the gravity field.
Usage
-----
x.plot_theta([tick_interval, xlabel, ylabel, ax, colorbar,
cb_orientation, cb_label, show, fname, **kwargs])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
colorbar : bool, optional, default = True
If True, plot a colorbar.
cb_orientation : str, optional, default = 'vertical'
Orientation of the colorbar: either 'vertical' or 'horizontal'.
cb_label : str, optional, default = '$g_\\theta$, m s$^{-2}$'
Text label for the colorbar.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
kwargs : optional
Keyword arguements that will be sent to the SHGrid.plot()
and plt.imshow() methods. | Below is the the instruction that describes the task:
### Input:
Plot the theta component of the gravity field.
Usage
-----
x.plot_theta([tick_interval, xlabel, ylabel, ax, colorbar,
cb_orientation, cb_label, show, fname, **kwargs])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
colorbar : bool, optional, default = True
If True, plot a colorbar.
cb_orientation : str, optional, default = 'vertical'
Orientation of the colorbar: either 'vertical' or 'horizontal'.
cb_label : str, optional, default = '$g_\\theta$, m s$^{-2}$'
Text label for the colorbar.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
kwargs : optional
Keyword arguements that will be sent to the SHGrid.plot()
and plt.imshow() methods.
### Response:
def plot_theta(self, colorbar=True, cb_orientation='vertical',
cb_label='$g_\\theta$, m s$^{-2}$', ax=None, show=True,
fname=None, **kwargs):
"""
Plot the theta component of the gravity field.
Usage
-----
x.plot_theta([tick_interval, xlabel, ylabel, ax, colorbar,
cb_orientation, cb_label, show, fname, **kwargs])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
colorbar : bool, optional, default = True
If True, plot a colorbar.
cb_orientation : str, optional, default = 'vertical'
Orientation of the colorbar: either 'vertical' or 'horizontal'.
cb_label : str, optional, default = '$g_\\theta$, m s$^{-2}$'
Text label for the colorbar.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
kwargs : optional
Keyword arguements that will be sent to the SHGrid.plot()
and plt.imshow() methods.
"""
if ax is None:
fig, axes = self.theta.plot(colorbar=colorbar,
cb_orientation=cb_orientation,
cb_label=cb_label, show=False,
**kwargs)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
else:
self.theta.plot(colorbar=colorbar, cb_orientation=cb_orientation,
cb_label=cb_label, ax=ax, **kwargs) |
def constant(self, name, value):
"""Declare and set a project global constant.
Project global constants are normal variables but should
not be changed. They are applied to every child Jamfile."""
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(value, basestring)
self.registry.current().add_constant(name[0], value) | Declare and set a project global constant.
Project global constants are normal variables but should
not be changed. They are applied to every child Jamfile. | Below is the the instruction that describes the task:
### Input:
Declare and set a project global constant.
Project global constants are normal variables but should
not be changed. They are applied to every child Jamfile.
### Response:
def constant(self, name, value):
"""Declare and set a project global constant.
Project global constants are normal variables but should
not be changed. They are applied to every child Jamfile."""
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(value, basestring)
self.registry.current().add_constant(name[0], value) |
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f | Rounds a float value off to the desired precision | Below is the the instruction that describes the task:
### Input:
Rounds a float value off to the desired precision
### Response:
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f |
def _absolute(self, path):
""" Convert a filename to an absolute path """
path = FilePath(path)
if isabs(path):
return path
else:
# these are both Path objects, so joining with + is acceptable
return self.WorkingDir + path | Convert a filename to an absolute path | Below is the the instruction that describes the task:
### Input:
Convert a filename to an absolute path
### Response:
def _absolute(self, path):
""" Convert a filename to an absolute path """
path = FilePath(path)
if isabs(path):
return path
else:
# these are both Path objects, so joining with + is acceptable
return self.WorkingDir + path |
def default(self, statement: Statement) -> Optional[bool]:
"""Executed when the command given isn't a recognized command implemented by a do_* method.
:param statement: Statement object with parsed input
"""
if self.default_to_shell:
if 'shell' not in self.exclude_from_history:
self.history.append(statement)
return self.do_shell(statement.command_and_args)
else:
err_msg = self.default_error.format(statement.command)
self.decolorized_write(sys.stderr, "{}\n".format(err_msg)) | Executed when the command given isn't a recognized command implemented by a do_* method.
:param statement: Statement object with parsed input | Below is the the instruction that describes the task:
### Input:
Executed when the command given isn't a recognized command implemented by a do_* method.
:param statement: Statement object with parsed input
### Response:
def default(self, statement: Statement) -> Optional[bool]:
"""Executed when the command given isn't a recognized command implemented by a do_* method.
:param statement: Statement object with parsed input
"""
if self.default_to_shell:
if 'shell' not in self.exclude_from_history:
self.history.append(statement)
return self.do_shell(statement.command_and_args)
else:
err_msg = self.default_error.format(statement.command)
self.decolorized_write(sys.stderr, "{}\n".format(err_msg)) |
def get_terminals_as_list(self):
"""
Iterator that returns all the terminal objects
@rtype: L{Cterminal}
@return: terminal objects as list
"""
terminalList = []
for t_node in self.__get_t_nodes():
terminalList.append(Cterminal(t_node))
return terminalList | Iterator that returns all the terminal objects
@rtype: L{Cterminal}
@return: terminal objects as list | Below is the the instruction that describes the task:
### Input:
Iterator that returns all the terminal objects
@rtype: L{Cterminal}
@return: terminal objects as list
### Response:
def get_terminals_as_list(self):
"""
Iterator that returns all the terminal objects
@rtype: L{Cterminal}
@return: terminal objects as list
"""
terminalList = []
for t_node in self.__get_t_nodes():
terminalList.append(Cterminal(t_node))
return terminalList |
def main(program=None,
version=None,
doc_template=None,
commands=None,
argv=None,
exit_at_end=True):
"""Top-level driver for creating subcommand-based programs.
Args:
program: The name of your program.
version: The version string for your program.
doc_template: The top-level docstring template for your program. If
`None`, a standard default version is applied.
commands: A `Subcommands` instance.
argv: The command-line arguments to parse. If `None`, this defaults to
`sys.argv[1:]`
exit_at_end: Whether to call `sys.exit()` at the end of the function.
There are two ways to use this function. First, you can pass `program`,
`version`, and `doc_template`, in which case `docopt_subcommands` will use
these arguments along with the subcommands registered with `command()` to
define you program.
The second way to use this function is to pass in a `Subcommands` objects
via the `commands` argument. In this case the `program`, `version`, and
`doc_template` arguments are ignored, and the `Subcommands` instance takes
precedence.
In both cases the `argv` argument can be used to specify the arguments to
be parsed.
"""
if commands is None:
if program is None:
raise ValueError(
'`program` required if subcommand object not provided')
if version is None:
raise ValueError(
'`version` required if subcommand object not provided')
commands = Subcommands(program,
version,
doc_template=doc_template)
for name, handler in _commands:
commands.add_command(handler, name)
if argv is None:
argv = sys.argv[1:]
result = commands(argv)
if exit_at_end:
sys.exit(result)
else:
return result | Top-level driver for creating subcommand-based programs.
Args:
program: The name of your program.
version: The version string for your program.
doc_template: The top-level docstring template for your program. If
`None`, a standard default version is applied.
commands: A `Subcommands` instance.
argv: The command-line arguments to parse. If `None`, this defaults to
`sys.argv[1:]`
exit_at_end: Whether to call `sys.exit()` at the end of the function.
There are two ways to use this function. First, you can pass `program`,
`version`, and `doc_template`, in which case `docopt_subcommands` will use
these arguments along with the subcommands registered with `command()` to
define you program.
The second way to use this function is to pass in a `Subcommands` objects
via the `commands` argument. In this case the `program`, `version`, and
`doc_template` arguments are ignored, and the `Subcommands` instance takes
precedence.
In both cases the `argv` argument can be used to specify the arguments to
be parsed. | Below is the the instruction that describes the task:
### Input:
Top-level driver for creating subcommand-based programs.
Args:
program: The name of your program.
version: The version string for your program.
doc_template: The top-level docstring template for your program. If
`None`, a standard default version is applied.
commands: A `Subcommands` instance.
argv: The command-line arguments to parse. If `None`, this defaults to
`sys.argv[1:]`
exit_at_end: Whether to call `sys.exit()` at the end of the function.
There are two ways to use this function. First, you can pass `program`,
`version`, and `doc_template`, in which case `docopt_subcommands` will use
these arguments along with the subcommands registered with `command()` to
define you program.
The second way to use this function is to pass in a `Subcommands` objects
via the `commands` argument. In this case the `program`, `version`, and
`doc_template` arguments are ignored, and the `Subcommands` instance takes
precedence.
In both cases the `argv` argument can be used to specify the arguments to
be parsed.
### Response:
def main(program=None,
version=None,
doc_template=None,
commands=None,
argv=None,
exit_at_end=True):
"""Top-level driver for creating subcommand-based programs.
Args:
program: The name of your program.
version: The version string for your program.
doc_template: The top-level docstring template for your program. If
`None`, a standard default version is applied.
commands: A `Subcommands` instance.
argv: The command-line arguments to parse. If `None`, this defaults to
`sys.argv[1:]`
exit_at_end: Whether to call `sys.exit()` at the end of the function.
There are two ways to use this function. First, you can pass `program`,
`version`, and `doc_template`, in which case `docopt_subcommands` will use
these arguments along with the subcommands registered with `command()` to
define you program.
The second way to use this function is to pass in a `Subcommands` objects
via the `commands` argument. In this case the `program`, `version`, and
`doc_template` arguments are ignored, and the `Subcommands` instance takes
precedence.
In both cases the `argv` argument can be used to specify the arguments to
be parsed.
"""
if commands is None:
if program is None:
raise ValueError(
'`program` required if subcommand object not provided')
if version is None:
raise ValueError(
'`version` required if subcommand object not provided')
commands = Subcommands(program,
version,
doc_template=doc_template)
for name, handler in _commands:
commands.add_command(handler, name)
if argv is None:
argv = sys.argv[1:]
result = commands(argv)
if exit_at_end:
sys.exit(result)
else:
return result |
def _updateSequenceInfo(self, r):
"""Keep track of sequence and make sure time goes forward
Check if the current record is the beginning of a new sequence
A new sequence starts in 2 cases:
1. The sequence id changed (if there is a sequence id field)
2. The reset field is 1 (if there is a reset field)
Note that if there is no sequenceId field or resetId field then the entire
dataset is technically one big sequence. The function will not return True
for the first record in this case. This is Ok because it is important to
detect new sequences only when there are multiple sequences in the file.
"""
# Get current sequence id (if any)
newSequence = False
sequenceId = (r[self._sequenceIdIdx]
if self._sequenceIdIdx is not None else None)
if sequenceId != self._currSequence:
# verify that the new sequence didn't show up before
if sequenceId in self._sequences:
raise Exception('Broken sequence: %s, record: %s' % \
(sequenceId, r))
# add the finished sequence to the set of sequence
self._sequences.add(self._currSequence)
self._currSequence = sequenceId
# Verify that the reset is consistent (if there is one)
if self._resetIdx:
assert r[self._resetIdx] == 1
newSequence = True
else:
# Check the reset
reset = False
if self._resetIdx:
reset = r[self._resetIdx]
if reset == 1:
newSequence = True
# If it's still the same old sequence make sure the time flows forward
if not newSequence:
if self._timeStampIdx and self._currTime is not None:
t = r[self._timeStampIdx]
if t < self._currTime:
raise Exception('No time travel. Early timestamp for record: %s' % r)
if self._timeStampIdx:
self._currTime = r[self._timeStampIdx] | Keep track of sequence and make sure time goes forward
Check if the current record is the beginning of a new sequence
A new sequence starts in 2 cases:
1. The sequence id changed (if there is a sequence id field)
2. The reset field is 1 (if there is a reset field)
Note that if there is no sequenceId field or resetId field then the entire
dataset is technically one big sequence. The function will not return True
for the first record in this case. This is Ok because it is important to
detect new sequences only when there are multiple sequences in the file. | Below is the the instruction that describes the task:
### Input:
Keep track of sequence and make sure time goes forward
Check if the current record is the beginning of a new sequence
A new sequence starts in 2 cases:
1. The sequence id changed (if there is a sequence id field)
2. The reset field is 1 (if there is a reset field)
Note that if there is no sequenceId field or resetId field then the entire
dataset is technically one big sequence. The function will not return True
for the first record in this case. This is Ok because it is important to
detect new sequences only when there are multiple sequences in the file.
### Response:
def _updateSequenceInfo(self, r):
"""Keep track of sequence and make sure time goes forward
Check if the current record is the beginning of a new sequence
A new sequence starts in 2 cases:
1. The sequence id changed (if there is a sequence id field)
2. The reset field is 1 (if there is a reset field)
Note that if there is no sequenceId field or resetId field then the entire
dataset is technically one big sequence. The function will not return True
for the first record in this case. This is Ok because it is important to
detect new sequences only when there are multiple sequences in the file.
"""
# Get current sequence id (if any)
newSequence = False
sequenceId = (r[self._sequenceIdIdx]
if self._sequenceIdIdx is not None else None)
if sequenceId != self._currSequence:
# verify that the new sequence didn't show up before
if sequenceId in self._sequences:
raise Exception('Broken sequence: %s, record: %s' % \
(sequenceId, r))
# add the finished sequence to the set of sequence
self._sequences.add(self._currSequence)
self._currSequence = sequenceId
# Verify that the reset is consistent (if there is one)
if self._resetIdx:
assert r[self._resetIdx] == 1
newSequence = True
else:
# Check the reset
reset = False
if self._resetIdx:
reset = r[self._resetIdx]
if reset == 1:
newSequence = True
# If it's still the same old sequence make sure the time flows forward
if not newSequence:
if self._timeStampIdx and self._currTime is not None:
t = r[self._timeStampIdx]
if t < self._currTime:
raise Exception('No time travel. Early timestamp for record: %s' % r)
if self._timeStampIdx:
self._currTime = r[self._timeStampIdx] |
def fast_group_adder(wires_to_add, reducer=wallace_reducer, final_adder=kogge_stone):
"""
A generalization of the carry save adder, this is designed to add many numbers
together in a both area and time efficient manner. Uses a tree reducer
to achieve this performance
:param [WireVector] wires_to_add: an array of wirevectors to add
:param reducer: the tree reducer to use
:param final_adder: The two value adder to use at the end
:return: a wirevector with the result of the addition
The length of the result is:
max(len(w) for w in wires_to_add) + ceil(len(wires_to_add))
"""
import math
longest_wire_len = max(len(w) for w in wires_to_add)
result_bitwidth = longest_wire_len + int(math.ceil(math.log(len(wires_to_add), 2)))
bits = [[] for i in range(longest_wire_len)]
for wire in wires_to_add:
for bit_loc, bit in enumerate(wire):
bits[bit_loc].append(bit)
return reducer(bits, result_bitwidth, final_adder) | A generalization of the carry save adder, this is designed to add many numbers
together in a both area and time efficient manner. Uses a tree reducer
to achieve this performance
:param [WireVector] wires_to_add: an array of wirevectors to add
:param reducer: the tree reducer to use
:param final_adder: The two value adder to use at the end
:return: a wirevector with the result of the addition
The length of the result is:
max(len(w) for w in wires_to_add) + ceil(len(wires_to_add)) | Below is the the instruction that describes the task:
### Input:
A generalization of the carry save adder, this is designed to add many numbers
together in a both area and time efficient manner. Uses a tree reducer
to achieve this performance
:param [WireVector] wires_to_add: an array of wirevectors to add
:param reducer: the tree reducer to use
:param final_adder: The two value adder to use at the end
:return: a wirevector with the result of the addition
The length of the result is:
max(len(w) for w in wires_to_add) + ceil(len(wires_to_add))
### Response:
def fast_group_adder(wires_to_add, reducer=wallace_reducer, final_adder=kogge_stone):
"""
A generalization of the carry save adder, this is designed to add many numbers
together in a both area and time efficient manner. Uses a tree reducer
to achieve this performance
:param [WireVector] wires_to_add: an array of wirevectors to add
:param reducer: the tree reducer to use
:param final_adder: The two value adder to use at the end
:return: a wirevector with the result of the addition
The length of the result is:
max(len(w) for w in wires_to_add) + ceil(len(wires_to_add))
"""
import math
longest_wire_len = max(len(w) for w in wires_to_add)
result_bitwidth = longest_wire_len + int(math.ceil(math.log(len(wires_to_add), 2)))
bits = [[] for i in range(longest_wire_len)]
for wire in wires_to_add:
for bit_loc, bit in enumerate(wire):
bits[bit_loc].append(bit)
return reducer(bits, result_bitwidth, final_adder) |
def create_ondemand_instances(ec2, image_id, spec, num_instances=1):
"""
Requests the RunInstances EC2 API call but accounts for the race between recently created
instance profiles, IAM roles and an instance creation that refers to them.
:rtype: list[Instance]
"""
instance_type = spec['instance_type']
log.info('Creating %s instance(s) ... ', instance_type)
for attempt in retry_ec2(retry_for=a_long_time,
retry_while=inconsistencies_detected):
with attempt:
return ec2.run_instances(image_id,
min_count=num_instances,
max_count=num_instances,
**spec).instances | Requests the RunInstances EC2 API call but accounts for the race between recently created
instance profiles, IAM roles and an instance creation that refers to them.
:rtype: list[Instance] | Below is the the instruction that describes the task:
### Input:
Requests the RunInstances EC2 API call but accounts for the race between recently created
instance profiles, IAM roles and an instance creation that refers to them.
:rtype: list[Instance]
### Response:
def create_ondemand_instances(ec2, image_id, spec, num_instances=1):
"""
Requests the RunInstances EC2 API call but accounts for the race between recently created
instance profiles, IAM roles and an instance creation that refers to them.
:rtype: list[Instance]
"""
instance_type = spec['instance_type']
log.info('Creating %s instance(s) ... ', instance_type)
for attempt in retry_ec2(retry_for=a_long_time,
retry_while=inconsistencies_detected):
with attempt:
return ec2.run_instances(image_id,
min_count=num_instances,
max_count=num_instances,
**spec).instances |
def describe_vpc(record):
"""Attempts to describe vpc ids."""
account_id = record['account']
vpc_name = cloudwatch.filter_request_parameters('vpcName', record)
vpc_id = cloudwatch.filter_request_parameters('vpcId', record)
try:
if vpc_id and vpc_name: # pylint: disable=R1705
return describe_vpcs(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=CURRENT_REGION,
Filters=[
{
'Name': 'vpc-id',
'Values': [vpc_id]
}
]
)
elif vpc_id:
return describe_vpcs(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=CURRENT_REGION,
VpcIds=[vpc_id]
)
else:
raise Exception('[X] Describe requires VpcId.')
except ClientError as exc:
if exc.response['Error']['Code'] == 'InvalidVpc.NotFound':
return []
raise exc | Attempts to describe vpc ids. | Below is the the instruction that describes the task:
### Input:
Attempts to describe vpc ids.
### Response:
def describe_vpc(record):
"""Attempts to describe vpc ids."""
account_id = record['account']
vpc_name = cloudwatch.filter_request_parameters('vpcName', record)
vpc_id = cloudwatch.filter_request_parameters('vpcId', record)
try:
if vpc_id and vpc_name: # pylint: disable=R1705
return describe_vpcs(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=CURRENT_REGION,
Filters=[
{
'Name': 'vpc-id',
'Values': [vpc_id]
}
]
)
elif vpc_id:
return describe_vpcs(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=CURRENT_REGION,
VpcIds=[vpc_id]
)
else:
raise Exception('[X] Describe requires VpcId.')
except ClientError as exc:
if exc.response['Error']['Code'] == 'InvalidVpc.NotFound':
return []
raise exc |
def managed(name, port, services=None, user=None, password=None, bypass_domains=None, network_service='Ethernet'):
'''
Manages proxy settings for this mininon
name
The proxy server to use
port
The port used by the proxy server
services
A list of the services that should use the given proxy settings, valid services include http, https and ftp.
If no service is given all of the valid services will be used.
user
The username to use for the proxy server if required
password
The password to use for the proxy server if required
bypass_domains
An array of the domains that should bypass the proxy
network_service
The network service to apply the changes to, this only necessary on
macOS
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
valid_services = ['http', 'https', 'ftp']
if services is None:
services = valid_services
# Darwin
if __grains__['os'] in ['MacOS', 'Darwin']:
ret['changes'] = {'new': []}
for service in services:
current_settings = __salt__['proxy.get_{0}_proxy'.format(service)]()
if current_settings.get('server') == name and current_settings.get('port') == six.text_type(port):
ret['comment'] += '{0} proxy settings already set.\n'.format(service)
elif __salt__['proxy.set_{0}_proxy'.format(service)](name, port, user, password, network_service):
ret['comment'] += '{0} proxy settings updated correctly\n'.format(service)
ret['changes']['new'].append({'service': service, 'server': name, 'port': port, 'user': user})
else:
ret['result'] = False
ret['comment'] += 'Failed to set {0} proxy settings.\n'
if bypass_domains is not None:
current_domains = __salt__['proxy.get_proxy_bypass']()
if len(set(current_domains).intersection(bypass_domains)) == len(bypass_domains):
ret['comment'] += 'Proxy bypass domains are already set correctly.\n'
elif __salt__['proxy.set_proxy_bypass'](bypass_domains, network_service):
ret['comment'] += 'Proxy bypass domains updated correctly\n'
ret['changes']['new'].append({'bypass_domains': list(set(bypass_domains).difference(current_domains))})
else:
ret['result'] = False
ret['comment'] += 'Failed to set bypass proxy domains.\n'
if not ret['changes']['new']:
del ret['changes']['new']
return ret
# Windows - Needs its own branch as all settings need to be set at the same time
if __grains__['os'] in ['Windows']:
changes_needed = False
current_settings = __salt__['proxy.get_proxy_win']()
current_domains = __salt__['proxy.get_proxy_bypass']()
if current_settings.get('enabled', False) is True:
for service in services:
# We need to update one of our proxy servers
if service not in current_settings:
changes_needed = True
break
if current_settings[service]['server'] != name or current_settings[service]['port'] != six.text_type(port):
changes_needed = True
break
else:
# Proxy settings aren't enabled
changes_needed = True
# We need to update our bypass domains
if len(set(current_domains).intersection(bypass_domains)) != len(bypass_domains):
changes_needed = True
if changes_needed:
if __salt__['proxy.set_proxy_win'](name, port, services, bypass_domains):
ret['comment'] = 'Proxy settings updated correctly'
else:
ret['result'] = False
ret['comment'] = 'Failed to set {0} proxy settings.'
else:
ret['comment'] = 'Proxy settings already correct.'
return ret | Manages proxy settings for this mininon
name
The proxy server to use
port
The port used by the proxy server
services
A list of the services that should use the given proxy settings, valid services include http, https and ftp.
If no service is given all of the valid services will be used.
user
The username to use for the proxy server if required
password
The password to use for the proxy server if required
bypass_domains
An array of the domains that should bypass the proxy
network_service
The network service to apply the changes to, this only necessary on
macOS | Below is the the instruction that describes the task:
### Input:
Manages proxy settings for this mininon
name
The proxy server to use
port
The port used by the proxy server
services
A list of the services that should use the given proxy settings, valid services include http, https and ftp.
If no service is given all of the valid services will be used.
user
The username to use for the proxy server if required
password
The password to use for the proxy server if required
bypass_domains
An array of the domains that should bypass the proxy
network_service
The network service to apply the changes to, this only necessary on
macOS
### Response:
def managed(name, port, services=None, user=None, password=None, bypass_domains=None, network_service='Ethernet'):
'''
Manages proxy settings for this mininon
name
The proxy server to use
port
The port used by the proxy server
services
A list of the services that should use the given proxy settings, valid services include http, https and ftp.
If no service is given all of the valid services will be used.
user
The username to use for the proxy server if required
password
The password to use for the proxy server if required
bypass_domains
An array of the domains that should bypass the proxy
network_service
The network service to apply the changes to, this only necessary on
macOS
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
valid_services = ['http', 'https', 'ftp']
if services is None:
services = valid_services
# Darwin
if __grains__['os'] in ['MacOS', 'Darwin']:
ret['changes'] = {'new': []}
for service in services:
current_settings = __salt__['proxy.get_{0}_proxy'.format(service)]()
if current_settings.get('server') == name and current_settings.get('port') == six.text_type(port):
ret['comment'] += '{0} proxy settings already set.\n'.format(service)
elif __salt__['proxy.set_{0}_proxy'.format(service)](name, port, user, password, network_service):
ret['comment'] += '{0} proxy settings updated correctly\n'.format(service)
ret['changes']['new'].append({'service': service, 'server': name, 'port': port, 'user': user})
else:
ret['result'] = False
ret['comment'] += 'Failed to set {0} proxy settings.\n'
if bypass_domains is not None:
current_domains = __salt__['proxy.get_proxy_bypass']()
if len(set(current_domains).intersection(bypass_domains)) == len(bypass_domains):
ret['comment'] += 'Proxy bypass domains are already set correctly.\n'
elif __salt__['proxy.set_proxy_bypass'](bypass_domains, network_service):
ret['comment'] += 'Proxy bypass domains updated correctly\n'
ret['changes']['new'].append({'bypass_domains': list(set(bypass_domains).difference(current_domains))})
else:
ret['result'] = False
ret['comment'] += 'Failed to set bypass proxy domains.\n'
if not ret['changes']['new']:
del ret['changes']['new']
return ret
# Windows - Needs its own branch as all settings need to be set at the same time
if __grains__['os'] in ['Windows']:
changes_needed = False
current_settings = __salt__['proxy.get_proxy_win']()
current_domains = __salt__['proxy.get_proxy_bypass']()
if current_settings.get('enabled', False) is True:
for service in services:
# We need to update one of our proxy servers
if service not in current_settings:
changes_needed = True
break
if current_settings[service]['server'] != name or current_settings[service]['port'] != six.text_type(port):
changes_needed = True
break
else:
# Proxy settings aren't enabled
changes_needed = True
# We need to update our bypass domains
if len(set(current_domains).intersection(bypass_domains)) != len(bypass_domains):
changes_needed = True
if changes_needed:
if __salt__['proxy.set_proxy_win'](name, port, services, bypass_domains):
ret['comment'] = 'Proxy settings updated correctly'
else:
ret['result'] = False
ret['comment'] = 'Failed to set {0} proxy settings.'
else:
ret['comment'] = 'Proxy settings already correct.'
return ret |
def validate(self, cmd, messages=None):
"""Returns True if the given Command is valid, False otherwise.
Validation error messages are appended to an optional messages
array.
"""
valid = True
args = [ arg for arg in cmd.args if arg is not None ]
if self.nargs != len(args):
valid = False
if messages is not None:
msg = 'Expected %d arguments, but received %d.'
messages.append(msg % (self.nargs, len(args)))
for defn, value in zip(self.args, cmd.args):
if value is None:
valid = False
if messages is not None:
messages.append('Argument "%s" is missing.' % defn.name)
elif defn.validate(value, messages) is False:
valid = False
if len(cmd._unrecognized) > 0:
valid = False
if messages is not None:
for name in cmd.unrecognized:
messages.append('Argument "%s" is unrecognized.' % name)
return valid | Returns True if the given Command is valid, False otherwise.
Validation error messages are appended to an optional messages
array. | Below is the the instruction that describes the task:
### Input:
Returns True if the given Command is valid, False otherwise.
Validation error messages are appended to an optional messages
array.
### Response:
def validate(self, cmd, messages=None):
"""Returns True if the given Command is valid, False otherwise.
Validation error messages are appended to an optional messages
array.
"""
valid = True
args = [ arg for arg in cmd.args if arg is not None ]
if self.nargs != len(args):
valid = False
if messages is not None:
msg = 'Expected %d arguments, but received %d.'
messages.append(msg % (self.nargs, len(args)))
for defn, value in zip(self.args, cmd.args):
if value is None:
valid = False
if messages is not None:
messages.append('Argument "%s" is missing.' % defn.name)
elif defn.validate(value, messages) is False:
valid = False
if len(cmd._unrecognized) > 0:
valid = False
if messages is not None:
for name in cmd.unrecognized:
messages.append('Argument "%s" is unrecognized.' % name)
return valid |
def close(self):
"""Flush the file and close it.
A closed file cannot be written any more. Calling
:meth:`close` more than once is allowed.
"""
if not self._closed:
self.__flush()
object.__setattr__(self, "_closed", True) | Flush the file and close it.
A closed file cannot be written any more. Calling
:meth:`close` more than once is allowed. | Below is the the instruction that describes the task:
### Input:
Flush the file and close it.
A closed file cannot be written any more. Calling
:meth:`close` more than once is allowed.
### Response:
def close(self):
"""Flush the file and close it.
A closed file cannot be written any more. Calling
:meth:`close` more than once is allowed.
"""
if not self._closed:
self.__flush()
object.__setattr__(self, "_closed", True) |
def _converged(self, X):
"""Covergence if || likehood - last_likelihood || < tolerance"""
if len(self.responsibilities) < 2:
return False
diff = np.linalg.norm(self.responsibilities[-1] - self.responsibilities[-2])
return diff <= self.tolerance | Covergence if || likehood - last_likelihood || < tolerance | Below is the the instruction that describes the task:
### Input:
Covergence if || likehood - last_likelihood || < tolerance
### Response:
def _converged(self, X):
"""Covergence if || likehood - last_likelihood || < tolerance"""
if len(self.responsibilities) < 2:
return False
diff = np.linalg.norm(self.responsibilities[-1] - self.responsibilities[-2])
return diff <= self.tolerance |
def _getFromTime(self, atDate=None):
"""
Time that the event starts (in the local time zone).
"""
return getLocalTime(self.date, self.time_from, self.tz) | Time that the event starts (in the local time zone). | Below is the the instruction that describes the task:
### Input:
Time that the event starts (in the local time zone).
### Response:
def _getFromTime(self, atDate=None):
"""
Time that the event starts (in the local time zone).
"""
return getLocalTime(self.date, self.time_from, self.tz) |
def insert_cols(self, col, no_cols=1):
"""Adds no_cols columns before col, appends if col > maxcols
and marks grid as changed
"""
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
tab = self.grid.current_table
self.code_array.insert(col, no_cols, axis=1, tab=tab) | Adds no_cols columns before col, appends if col > maxcols
and marks grid as changed | Below is the the instruction that describes the task:
### Input:
Adds no_cols columns before col, appends if col > maxcols
and marks grid as changed
### Response:
def insert_cols(self, col, no_cols=1):
"""Adds no_cols columns before col, appends if col > maxcols
and marks grid as changed
"""
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
tab = self.grid.current_table
self.code_array.insert(col, no_cols, axis=1, tab=tab) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.