code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _add(self, ctx, table_name, record_id, column_values):
"""
:type column_values: list of (column, value_json)
"""
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
for column, value in column_values:
ctx.add_column(ovsrec_row, column, value)
ctx.invalidate_cache() | :type column_values: list of (column, value_json) | Below is the the instruction that describes the task:
### Input:
:type column_values: list of (column, value_json)
### Response:
def _add(self, ctx, table_name, record_id, column_values):
"""
:type column_values: list of (column, value_json)
"""
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
for column, value in column_values:
ctx.add_column(ovsrec_row, column, value)
ctx.invalidate_cache() |
async def search_participant(self, name, force_update=False):
""" search a participant by (display) name
|methcoro|
Args:
name: display name of the participant
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException
"""
if force_update or self.participants is None:
await self.get_participants()
if self.participants is not None:
for p in self.participants:
if p.name == name:
return p
return None | search a participant by (display) name
|methcoro|
Args:
name: display name of the participant
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException | Below is the the instruction that describes the task:
### Input:
search a participant by (display) name
|methcoro|
Args:
name: display name of the participant
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException
### Response:
async def search_participant(self, name, force_update=False):
""" search a participant by (display) name
|methcoro|
Args:
name: display name of the participant
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException
"""
if force_update or self.participants is None:
await self.get_participants()
if self.participants is not None:
for p in self.participants:
if p.name == name:
return p
return None |
def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a byte string
:arg obj: An object to make sure is a byte string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. There are three additional error strategies
specifically aimed at helping people to port code. The first two are:
:surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``strict``
:surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``replace``.
Because ``surrogateescape`` was added in Python3 this usually means that
Python3 will use ``surrogateescape`` and Python2 will use the fallback
error handler. Note that the code checks for ``surrogateescape`` when the
module is imported. If you have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler.
"""
if isinstance(obj, binary_type):
return obj
# We're given a text string
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, 'surrogate_then_replace'):
# Slow but works
return_string = obj.encode('utf-8', 'surrogateescape')
return_string = return_string.decode('utf-8', 'replace')
return return_string.encode(encoding, 'replace')
raise
# Note: We do these last even though we have to call to_bytes again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes('')
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
# python2.4 doesn't have b''
return to_bytes('')
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
return to_bytes(value, encoding, errors) | Make sure that a string is a byte string
:arg obj: An object to make sure is a byte string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. There are three additional error strategies
specifically aimed at helping people to port code. The first two are:
:surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``strict``
:surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``replace``.
Because ``surrogateescape`` was added in Python3 this usually means that
Python3 will use ``surrogateescape`` and Python2 will use the fallback
error handler. Note that the code checks for ``surrogateescape`` when the
module is imported. If you have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler. | Below is the the instruction that describes the task:
### Input:
Make sure that a string is a byte string
:arg obj: An object to make sure is a byte string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. There are three additional error strategies
specifically aimed at helping people to port code. The first two are:
:surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``strict``
:surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``replace``.
Because ``surrogateescape`` was added in Python3 this usually means that
Python3 will use ``surrogateescape`` and Python2 will use the fallback
error handler. Note that the code checks for ``surrogateescape`` when the
module is imported. If you have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler.
### Response:
def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a byte string
:arg obj: An object to make sure is a byte string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. There are three additional error strategies
specifically aimed at helping people to port code. The first two are:
:surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``strict``
:surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``replace``.
Because ``surrogateescape`` was added in Python3 this usually means that
Python3 will use ``surrogateescape`` and Python2 will use the fallback
error handler. Note that the code checks for ``surrogateescape`` when the
module is imported. If you have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler.
"""
if isinstance(obj, binary_type):
return obj
# We're given a text string
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, 'surrogate_then_replace'):
# Slow but works
return_string = obj.encode('utf-8', 'surrogateescape')
return_string = return_string.decode('utf-8', 'replace')
return return_string.encode(encoding, 'replace')
raise
# Note: We do these last even though we have to call to_bytes again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes('')
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
# python2.4 doesn't have b''
return to_bytes('')
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
return to_bytes(value, encoding, errors) |
def _parse_contract_headers(self, table):
"""
Parse the years on the contract.
The years are listed as the headers on the contract. The first header
contains 'Team' which specifies the player's current team and should
not be included in the years.
Parameters
----------
table : PyQuery object
A PyQuery object containing the contract table.
Returns
-------
list
Returns a list where each element is a string denoting the season,
such as '2017-18'.
"""
years = [i.text() for i in table('th').items()]
years.remove('Team')
return years | Parse the years on the contract.
The years are listed as the headers on the contract. The first header
contains 'Team' which specifies the player's current team and should
not be included in the years.
Parameters
----------
table : PyQuery object
A PyQuery object containing the contract table.
Returns
-------
list
Returns a list where each element is a string denoting the season,
such as '2017-18'. | Below is the the instruction that describes the task:
### Input:
Parse the years on the contract.
The years are listed as the headers on the contract. The first header
contains 'Team' which specifies the player's current team and should
not be included in the years.
Parameters
----------
table : PyQuery object
A PyQuery object containing the contract table.
Returns
-------
list
Returns a list where each element is a string denoting the season,
such as '2017-18'.
### Response:
def _parse_contract_headers(self, table):
"""
Parse the years on the contract.
The years are listed as the headers on the contract. The first header
contains 'Team' which specifies the player's current team and should
not be included in the years.
Parameters
----------
table : PyQuery object
A PyQuery object containing the contract table.
Returns
-------
list
Returns a list where each element is a string denoting the season,
such as '2017-18'.
"""
years = [i.text() for i in table('th').items()]
years.remove('Team')
return years |
def table_delete(self, table_name):
"""Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True) | Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | Below is the the instruction that describes the task:
### Input:
Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
### Response:
def table_delete(self, table_name):
"""Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True) |
def enabled(self):
""" Check whether we're enabled (or if parent is). """
# Cache into ._enabled
if self._enabled is None:
if self.parent is not None and self.parent.enabled():
self._enabled = True
else:
# Default to Enabled if not otherwise disabled
self._enabled = getattr(self.options, self.dest, True)
return self._enabled | Check whether we're enabled (or if parent is). | Below is the the instruction that describes the task:
### Input:
Check whether we're enabled (or if parent is).
### Response:
def enabled(self):
""" Check whether we're enabled (or if parent is). """
# Cache into ._enabled
if self._enabled is None:
if self.parent is not None and self.parent.enabled():
self._enabled = True
else:
# Default to Enabled if not otherwise disabled
self._enabled = getattr(self.options, self.dest, True)
return self._enabled |
def add(path=None, force=False, quiet=False):
"""Add that path to git's staging area (default current dir)
so that it will be included in next commit
"""
option = '-f' if force else ''
return run('add %s %s' % (option, path) or '.', quiet=quiet) | Add that path to git's staging area (default current dir)
so that it will be included in next commit | Below is the the instruction that describes the task:
### Input:
Add that path to git's staging area (default current dir)
so that it will be included in next commit
### Response:
def add(path=None, force=False, quiet=False):
"""Add that path to git's staging area (default current dir)
so that it will be included in next commit
"""
option = '-f' if force else ''
return run('add %s %s' % (option, path) or '.', quiet=quiet) |
def write_worksheets(workbook, data_list, result_info_key, identifier_keys):
"""Writes rest of the worksheets to workbook.
Args:
workbook: workbook to write into
data_list: Analytics API data as a list of dicts
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc)
"""
# we can use the first item to figure out the worksheet keys
worksheet_keys = get_worksheet_keys(data_list[0], result_info_key)
for key in worksheet_keys:
title = key.split('/')[1]
title = utilities.convert_snake_to_title_case(title)
title = KEY_TO_WORKSHEET_MAP.get(title, title)
if key == 'property/nod':
# the property/nod endpoint needs to be split into two worksheets
create_property_nod_worksheets(workbook, data_list, result_info_key, identifier_keys)
else:
# all other endpoints are written to a single worksheet
# Maximum 31 characters allowed in sheet title
worksheet = workbook.create_sheet(title=title[:31])
processed_data = process_data(key, data_list, result_info_key, identifier_keys)
write_data(worksheet, processed_data)
# remove the first, unused empty sheet
workbook.remove_sheet(workbook.active) | Writes rest of the worksheets to workbook.
Args:
workbook: workbook to write into
data_list: Analytics API data as a list of dicts
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc) | Below is the the instruction that describes the task:
### Input:
Writes rest of the worksheets to workbook.
Args:
workbook: workbook to write into
data_list: Analytics API data as a list of dicts
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc)
### Response:
def write_worksheets(workbook, data_list, result_info_key, identifier_keys):
"""Writes rest of the worksheets to workbook.
Args:
workbook: workbook to write into
data_list: Analytics API data as a list of dicts
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc)
"""
# we can use the first item to figure out the worksheet keys
worksheet_keys = get_worksheet_keys(data_list[0], result_info_key)
for key in worksheet_keys:
title = key.split('/')[1]
title = utilities.convert_snake_to_title_case(title)
title = KEY_TO_WORKSHEET_MAP.get(title, title)
if key == 'property/nod':
# the property/nod endpoint needs to be split into two worksheets
create_property_nod_worksheets(workbook, data_list, result_info_key, identifier_keys)
else:
# all other endpoints are written to a single worksheet
# Maximum 31 characters allowed in sheet title
worksheet = workbook.create_sheet(title=title[:31])
processed_data = process_data(key, data_list, result_info_key, identifier_keys)
write_data(worksheet, processed_data)
# remove the first, unused empty sheet
workbook.remove_sheet(workbook.active) |
def do_statement(source, start):
"""returns none if not found other functions that begin with 'do_' raise
also this do_ type function passes white space"""
start = pass_white(source, start)
# start is the fist position after initial start that is not a white space or \n
if not start < len(source): #if finished parsing return None
return None, start
if any(startswith_keyword(source[start:], e) for e in {'case', 'default'}):
return None, start
rest = source[start:]
for key, meth in KEYWORD_METHODS.iteritems(
): # check for statements that are uniquely defined by their keywords
if rest.startswith(key):
# has to startwith this keyword and the next letter after keyword must be either EOF or not in IDENTIFIER_PART
if len(key) == len(rest) or rest[len(key)] not in IDENTIFIER_PART:
return meth(source, start)
if rest[0] == '{': #Block
return do_block(source, start)
# Now only label and expression left
cand = parse_identifier(source, start, False)
if cand is not None: # it can mean that its a label
label, cand_start = cand
cand_start = pass_white(source, cand_start)
if source[cand_start] == ':':
return do_label(source, start)
return do_expression(source, start) | returns none if not found other functions that begin with 'do_' raise
also this do_ type function passes white space | Below is the the instruction that describes the task:
### Input:
returns none if not found other functions that begin with 'do_' raise
also this do_ type function passes white space
### Response:
def do_statement(source, start):
"""returns none if not found other functions that begin with 'do_' raise
also this do_ type function passes white space"""
start = pass_white(source, start)
# start is the fist position after initial start that is not a white space or \n
if not start < len(source): #if finished parsing return None
return None, start
if any(startswith_keyword(source[start:], e) for e in {'case', 'default'}):
return None, start
rest = source[start:]
for key, meth in KEYWORD_METHODS.iteritems(
): # check for statements that are uniquely defined by their keywords
if rest.startswith(key):
# has to startwith this keyword and the next letter after keyword must be either EOF or not in IDENTIFIER_PART
if len(key) == len(rest) or rest[len(key)] not in IDENTIFIER_PART:
return meth(source, start)
if rest[0] == '{': #Block
return do_block(source, start)
# Now only label and expression left
cand = parse_identifier(source, start, False)
if cand is not None: # it can mean that its a label
label, cand_start = cand
cand_start = pass_white(source, cand_start)
if source[cand_start] == ':':
return do_label(source, start)
return do_expression(source, start) |
def _connectIfNecessarySingle(self, node):
"""
Connect to a node if necessary.
:param node: node to connect to
:type node: Node
"""
if node in self._connections and self._connections[node].state != CONNECTION_STATE.DISCONNECTED:
return True
if not self._shouldConnect(node):
return False
assert node in self._connections # Since we "should connect" to this node, there should always be a connection object already in place.
if node in self._lastConnectAttempt and time.time() - self._lastConnectAttempt[node] < self._syncObj.conf.connectionRetryTime:
return False
self._lastConnectAttempt[node] = time.time()
return self._connections[node].connect(node.ip, node.port) | Connect to a node if necessary.
:param node: node to connect to
:type node: Node | Below is the the instruction that describes the task:
### Input:
Connect to a node if necessary.
:param node: node to connect to
:type node: Node
### Response:
def _connectIfNecessarySingle(self, node):
"""
Connect to a node if necessary.
:param node: node to connect to
:type node: Node
"""
if node in self._connections and self._connections[node].state != CONNECTION_STATE.DISCONNECTED:
return True
if not self._shouldConnect(node):
return False
assert node in self._connections # Since we "should connect" to this node, there should always be a connection object already in place.
if node in self._lastConnectAttempt and time.time() - self._lastConnectAttempt[node] < self._syncObj.conf.connectionRetryTime:
return False
self._lastConnectAttempt[node] = time.time()
return self._connections[node].connect(node.ip, node.port) |
def send(self, obj_id):
"""
Send email to the assigned lists
:param obj_id: int
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/send'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response) | Send email to the assigned lists
:param obj_id: int
:return: dict|str | Below is the the instruction that describes the task:
### Input:
Send email to the assigned lists
:param obj_id: int
:return: dict|str
### Response:
def send(self, obj_id):
"""
Send email to the assigned lists
:param obj_id: int
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/send'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response) |
def seek(self, position, modifier=0):
"""move the cursor on the file descriptor to a different location
:param position:
an integer offset from the location indicated by the modifier
:type position: int
:param modifier:
an indicator of how to find the seek location.
- ``os.SEEK_SET`` means start from the beginning of the file
- ``os.SEEK_CUR`` means start wherever the cursor already is
- ``os.SEEK_END`` means start from the end of the file
the default is ``os.SEEK_SET``
"""
os.lseek(self._fileno, position, modifier)
# clear out the buffer
buf = self._rbuf
buf.seek(0)
buf.truncate() | move the cursor on the file descriptor to a different location
:param position:
an integer offset from the location indicated by the modifier
:type position: int
:param modifier:
an indicator of how to find the seek location.
- ``os.SEEK_SET`` means start from the beginning of the file
- ``os.SEEK_CUR`` means start wherever the cursor already is
- ``os.SEEK_END`` means start from the end of the file
the default is ``os.SEEK_SET`` | Below is the the instruction that describes the task:
### Input:
move the cursor on the file descriptor to a different location
:param position:
an integer offset from the location indicated by the modifier
:type position: int
:param modifier:
an indicator of how to find the seek location.
- ``os.SEEK_SET`` means start from the beginning of the file
- ``os.SEEK_CUR`` means start wherever the cursor already is
- ``os.SEEK_END`` means start from the end of the file
the default is ``os.SEEK_SET``
### Response:
def seek(self, position, modifier=0):
"""move the cursor on the file descriptor to a different location
:param position:
an integer offset from the location indicated by the modifier
:type position: int
:param modifier:
an indicator of how to find the seek location.
- ``os.SEEK_SET`` means start from the beginning of the file
- ``os.SEEK_CUR`` means start wherever the cursor already is
- ``os.SEEK_END`` means start from the end of the file
the default is ``os.SEEK_SET``
"""
os.lseek(self._fileno, position, modifier)
# clear out the buffer
buf = self._rbuf
buf.seek(0)
buf.truncate() |
def superseeded_by(self, other_service):
"""Return True if input service has login id and this has not."""
if not other_service or \
other_service.__class__ != self.__class__ or \
other_service.protocol != self.protocol or \
other_service.port != self.port:
return False
# If this service does not have a login id but the other one does, then
# we should return True here
return not self.device_credentials and other_service.device_credentials | Return True if input service has login id and this has not. | Below is the the instruction that describes the task:
### Input:
Return True if input service has login id and this has not.
### Response:
def superseeded_by(self, other_service):
"""Return True if input service has login id and this has not."""
if not other_service or \
other_service.__class__ != self.__class__ or \
other_service.protocol != self.protocol or \
other_service.port != self.port:
return False
# If this service does not have a login id but the other one does, then
# we should return True here
return not self.device_credentials and other_service.device_credentials |
def lines_of_content(content, width):
"""
计算内容在特定输出宽度下实际显示的行数
calculate the actual rows with specific terminal width
"""
result = 0
if isinstance(content, list):
for line in content:
_line = preprocess(line)
result += ceil(line_width(_line) / width)
elif isinstance(content, dict):
for k, v in content.items():
# 加2是算上行内冒号和空格的宽度
# adding 2 for the for the colon and space ": "
_k, _v = map(preprocess, (k, v))
result += ceil((line_width(_k) + line_width(_v) + 2) / width)
return int(result) | 计算内容在特定输出宽度下实际显示的行数
calculate the actual rows with specific terminal width | Below is the the instruction that describes the task:
### Input:
计算内容在特定输出宽度下实际显示的行数
calculate the actual rows with specific terminal width
### Response:
def lines_of_content(content, width):
"""
计算内容在特定输出宽度下实际显示的行数
calculate the actual rows with specific terminal width
"""
result = 0
if isinstance(content, list):
for line in content:
_line = preprocess(line)
result += ceil(line_width(_line) / width)
elif isinstance(content, dict):
for k, v in content.items():
# 加2是算上行内冒号和空格的宽度
# adding 2 for the for the colon and space ": "
_k, _v = map(preprocess, (k, v))
result += ceil((line_width(_k) + line_width(_v) + 2) / width)
return int(result) |
def enable_yum_priority_obsoletes(path="/etc/yum/pluginconf.d/priorities.conf"):
"""Configure Yum priorities to include obsoletes"""
config = configparser.ConfigParser()
config.read(path)
config.set('main', 'check_obsoletes', '1')
with open(path, 'w') as fout:
config.write(fout) | Configure Yum priorities to include obsoletes | Below is the the instruction that describes the task:
### Input:
Configure Yum priorities to include obsoletes
### Response:
def enable_yum_priority_obsoletes(path="/etc/yum/pluginconf.d/priorities.conf"):
"""Configure Yum priorities to include obsoletes"""
config = configparser.ConfigParser()
config.read(path)
config.set('main', 'check_obsoletes', '1')
with open(path, 'w') as fout:
config.write(fout) |
def on_change(self, attr, old, new):
"""
Process change events adding timeout to process multiple concerted
value change at once rather than firing off multiple plot updates.
"""
self._queue.append((attr, old, new))
if not self._active and self.plot.document:
self.plot.document.add_timeout_callback(self.process_on_change, 50)
self._active = True | Process change events adding timeout to process multiple concerted
value change at once rather than firing off multiple plot updates. | Below is the the instruction that describes the task:
### Input:
Process change events adding timeout to process multiple concerted
value change at once rather than firing off multiple plot updates.
### Response:
def on_change(self, attr, old, new):
"""
Process change events adding timeout to process multiple concerted
value change at once rather than firing off multiple plot updates.
"""
self._queue.append((attr, old, new))
if not self._active and self.plot.document:
self.plot.document.add_timeout_callback(self.process_on_change, 50)
self._active = True |
def create_pre_execute(task_params, parameter_map):
"""
Builds the code block for the GPTool Execute method before the job is
submitted based on the input task_params.
:param task_params: A list of task parameters from the task info structure.
:return: A string representing the code block to the GPTool Execute method.
"""
gp_params = [_PRE_EXECUTE_INIT_TEMPLATE]
for task_param in task_params:
if task_param['direction'].upper() == 'OUTPUT':
continue
# Convert DataType
data_type = task_param['type'].upper()
if 'dimensions' in task_param:
data_type += 'ARRAY'
if data_type in parameter_map:
gp_params.append(parameter_map[data_type].pre_execute().substitute(task_param))
gp_params.append(_PRE_EXECUTE_CLEANUP_TEMPLATE)
return ''.join(gp_params) | Builds the code block for the GPTool Execute method before the job is
submitted based on the input task_params.
:param task_params: A list of task parameters from the task info structure.
:return: A string representing the code block to the GPTool Execute method. | Below is the the instruction that describes the task:
### Input:
Builds the code block for the GPTool Execute method before the job is
submitted based on the input task_params.
:param task_params: A list of task parameters from the task info structure.
:return: A string representing the code block to the GPTool Execute method.
### Response:
def create_pre_execute(task_params, parameter_map):
"""
Builds the code block for the GPTool Execute method before the job is
submitted based on the input task_params.
:param task_params: A list of task parameters from the task info structure.
:return: A string representing the code block to the GPTool Execute method.
"""
gp_params = [_PRE_EXECUTE_INIT_TEMPLATE]
for task_param in task_params:
if task_param['direction'].upper() == 'OUTPUT':
continue
# Convert DataType
data_type = task_param['type'].upper()
if 'dimensions' in task_param:
data_type += 'ARRAY'
if data_type in parameter_map:
gp_params.append(parameter_map[data_type].pre_execute().substitute(task_param))
gp_params.append(_PRE_EXECUTE_CLEANUP_TEMPLATE)
return ''.join(gp_params) |
def ExportModelOperationsMixin(model_name):
"""Returns a mixin for models to export counters for lifecycle operations.
Usage:
class User(ExportModelOperationsMixin('user'), Model):
...
"""
# Force create the labels for this model in the counters. This
# is not necessary but it avoids gaps in the aggregated data.
model_inserts.labels(model_name)
model_updates.labels(model_name)
model_deletes.labels(model_name)
class Mixin(object):
def _do_insert(self, *args, **kwargs):
model_inserts.labels(model_name).inc()
return super(Mixin, self)._do_insert(*args, **kwargs)
def _do_update(self, *args, **kwargs):
model_updates.labels(model_name).inc()
return super(Mixin, self)._do_update(*args, **kwargs)
def delete(self, *args, **kwargs):
model_deletes.labels(model_name).inc()
return super(Mixin, self).delete(*args, **kwargs)
return Mixin | Returns a mixin for models to export counters for lifecycle operations.
Usage:
class User(ExportModelOperationsMixin('user'), Model):
... | Below is the the instruction that describes the task:
### Input:
Returns a mixin for models to export counters for lifecycle operations.
Usage:
class User(ExportModelOperationsMixin('user'), Model):
...
### Response:
def ExportModelOperationsMixin(model_name):
"""Returns a mixin for models to export counters for lifecycle operations.
Usage:
class User(ExportModelOperationsMixin('user'), Model):
...
"""
# Force create the labels for this model in the counters. This
# is not necessary but it avoids gaps in the aggregated data.
model_inserts.labels(model_name)
model_updates.labels(model_name)
model_deletes.labels(model_name)
class Mixin(object):
def _do_insert(self, *args, **kwargs):
model_inserts.labels(model_name).inc()
return super(Mixin, self)._do_insert(*args, **kwargs)
def _do_update(self, *args, **kwargs):
model_updates.labels(model_name).inc()
return super(Mixin, self)._do_update(*args, **kwargs)
def delete(self, *args, **kwargs):
model_deletes.labels(model_name).inc()
return super(Mixin, self).delete(*args, **kwargs)
return Mixin |
def lastz_to_blast(row):
"""
Convert the lastz tabular to the blast tabular, see headers above
Obsolete after LASTZ version 1.02.40
"""
atoms = row.strip().split("\t")
name1, name2, coverage, identity, nmismatch, ngap, \
start1, end1, strand1, start2, end2, strand2, score = atoms
identity = identity.replace("%", "")
hitlen = coverage.split("/")[1]
score = float(score)
same_strand = (strand1 == strand2)
if not same_strand:
start2, end2 = end2, start2
evalue = blastz_score_to_ncbi_expectation(score)
score = blastz_score_to_ncbi_bits(score)
evalue, score = "%.2g" % evalue, "%.1f" % score
return "\t".join((name1, name2, identity, hitlen, nmismatch, ngap, \
start1, end1, start2, end2, evalue, score)) | Convert the lastz tabular to the blast tabular, see headers above
Obsolete after LASTZ version 1.02.40 | Below is the the instruction that describes the task:
### Input:
Convert the lastz tabular to the blast tabular, see headers above
Obsolete after LASTZ version 1.02.40
### Response:
def lastz_to_blast(row):
"""
Convert the lastz tabular to the blast tabular, see headers above
Obsolete after LASTZ version 1.02.40
"""
atoms = row.strip().split("\t")
name1, name2, coverage, identity, nmismatch, ngap, \
start1, end1, strand1, start2, end2, strand2, score = atoms
identity = identity.replace("%", "")
hitlen = coverage.split("/")[1]
score = float(score)
same_strand = (strand1 == strand2)
if not same_strand:
start2, end2 = end2, start2
evalue = blastz_score_to_ncbi_expectation(score)
score = blastz_score_to_ncbi_bits(score)
evalue, score = "%.2g" % evalue, "%.1f" % score
return "\t".join((name1, name2, identity, hitlen, nmismatch, ngap, \
start1, end1, start2, end2, evalue, score)) |
def shadows(self, data=None, t=None, dt=None, latitude=None,
init='empty', resolution='mid'):
'''
Initializes a ShadowManager object for this ``pyny.Space``
instance.
The 'empty' initialization accepts ``data`` and ``t`` and ``dt``
but the ShadowsManager will not start the calculations. It will
wait the user to manually insert the rest of the parameters.
Call ``ShadowsManager.run()`` to start the shadowing
computations.
The 'auto' initialization pre-sets all the required parameters
to run the computations\*. The available resolutions are:
* 'low'
* 'mid'
* 'high'
The 'auto' mode will use all the arguments different than
``None`` and the ``set_of_points`` of this ``pyny.Space`` if
any.
:param data: Data timeseries to project on the 3D model
(radiation, for example).
:type data: ndarray (shape=N), None
:param t: Time vector in absolute minutes or datetime objects
:type t: ndarray or list, None
:param dt: Interval time to generate t vector.
:type dt: int, None
:param latitude: Local latitude.
:type latitude: float (radians)
:param init: Initialization mode
:type init: str
:param init: Resolution for the time vector generation (if
``None``), for setting the sensible points and for the
Voronoi diagram.
:type init: str
:returns: ``ShadowsManager`` object
'''
from pyny3d.shadows import ShadowsManager
if init == 'auto':
# Resolution
if resolution == 'low':
factor = 20
elif resolution == 'mid':
factor = 40
elif resolution == 'high':
factor = 70
if dt is None: dt = 6e4/factor
if latitude is None: latitude = 0.65
# Autofill ShadowsManager Object
sm = ShadowsManager(self, data=data, t=t, dt=dt,
latitude=latitude)
if self.get_sets_of_points().shape[0] == 0:
max_bound = np.diff(self.get_domain(), axis=0).max()
sm.space.mesh(mesh_size=max_bound/factor, edge=True)
## General parameters
sm.arg_vor_size = 3.5/factor
sm.run()
return sm
elif init == 'empty':
return ShadowsManager(self, data=data, t=t, dt=dt,
latitude=latitude) | Initializes a ShadowManager object for this ``pyny.Space``
instance.
The 'empty' initialization accepts ``data`` and ``t`` and ``dt``
but the ShadowsManager will not start the calculations. It will
wait the user to manually insert the rest of the parameters.
Call ``ShadowsManager.run()`` to start the shadowing
computations.
The 'auto' initialization pre-sets all the required parameters
to run the computations\*. The available resolutions are:
* 'low'
* 'mid'
* 'high'
The 'auto' mode will use all the arguments different than
``None`` and the ``set_of_points`` of this ``pyny.Space`` if
any.
:param data: Data timeseries to project on the 3D model
(radiation, for example).
:type data: ndarray (shape=N), None
:param t: Time vector in absolute minutes or datetime objects
:type t: ndarray or list, None
:param dt: Interval time to generate t vector.
:type dt: int, None
:param latitude: Local latitude.
:type latitude: float (radians)
:param init: Initialization mode
:type init: str
:param init: Resolution for the time vector generation (if
``None``), for setting the sensible points and for the
Voronoi diagram.
:type init: str
:returns: ``ShadowsManager`` object | Below is the the instruction that describes the task:
### Input:
Initializes a ShadowManager object for this ``pyny.Space``
instance.
The 'empty' initialization accepts ``data`` and ``t`` and ``dt``
but the ShadowsManager will not start the calculations. It will
wait the user to manually insert the rest of the parameters.
Call ``ShadowsManager.run()`` to start the shadowing
computations.
The 'auto' initialization pre-sets all the required parameters
to run the computations\*. The available resolutions are:
* 'low'
* 'mid'
* 'high'
The 'auto' mode will use all the arguments different than
``None`` and the ``set_of_points`` of this ``pyny.Space`` if
any.
:param data: Data timeseries to project on the 3D model
(radiation, for example).
:type data: ndarray (shape=N), None
:param t: Time vector in absolute minutes or datetime objects
:type t: ndarray or list, None
:param dt: Interval time to generate t vector.
:type dt: int, None
:param latitude: Local latitude.
:type latitude: float (radians)
:param init: Initialization mode
:type init: str
:param init: Resolution for the time vector generation (if
``None``), for setting the sensible points and for the
Voronoi diagram.
:type init: str
:returns: ``ShadowsManager`` object
### Response:
def shadows(self, data=None, t=None, dt=None, latitude=None,
init='empty', resolution='mid'):
'''
Initializes a ShadowManager object for this ``pyny.Space``
instance.
The 'empty' initialization accepts ``data`` and ``t`` and ``dt``
but the ShadowsManager will not start the calculations. It will
wait the user to manually insert the rest of the parameters.
Call ``ShadowsManager.run()`` to start the shadowing
computations.
The 'auto' initialization pre-sets all the required parameters
to run the computations\*. The available resolutions are:
* 'low'
* 'mid'
* 'high'
The 'auto' mode will use all the arguments different than
``None`` and the ``set_of_points`` of this ``pyny.Space`` if
any.
:param data: Data timeseries to project on the 3D model
(radiation, for example).
:type data: ndarray (shape=N), None
:param t: Time vector in absolute minutes or datetime objects
:type t: ndarray or list, None
:param dt: Interval time to generate t vector.
:type dt: int, None
:param latitude: Local latitude.
:type latitude: float (radians)
:param init: Initialization mode
:type init: str
:param init: Resolution for the time vector generation (if
``None``), for setting the sensible points and for the
Voronoi diagram.
:type init: str
:returns: ``ShadowsManager`` object
'''
from pyny3d.shadows import ShadowsManager
if init == 'auto':
# Resolution
if resolution == 'low':
factor = 20
elif resolution == 'mid':
factor = 40
elif resolution == 'high':
factor = 70
if dt is None: dt = 6e4/factor
if latitude is None: latitude = 0.65
# Autofill ShadowsManager Object
sm = ShadowsManager(self, data=data, t=t, dt=dt,
latitude=latitude)
if self.get_sets_of_points().shape[0] == 0:
max_bound = np.diff(self.get_domain(), axis=0).max()
sm.space.mesh(mesh_size=max_bound/factor, edge=True)
## General parameters
sm.arg_vor_size = 3.5/factor
sm.run()
return sm
elif init == 'empty':
return ShadowsManager(self, data=data, t=t, dt=dt,
latitude=latitude) |
def search_requests(self, query=None, params=None, callback=None, mine_ids=None):
"""Mine Archive.org search results.
:param query: The Archive.org search query to yield results for.
Refer to https://archive.org/advancedsearch.php#raw
for help formatting your query.
:type query: str
:param params: The URL parameters to send with each request sent
to the Archive.org Advancedsearch Api.
:type params: dict
"""
# If mining ids, devote half the workers to search and half to item mining.
if mine_ids:
self.max_tasks = self.max_tasks/2
# When mining id's, the only field we need returned is "identifier".
if mine_ids and params:
params = dict((k, v) for k, v in params.items() if 'fl' not in k)
params['fl[]'] = 'identifier'
# Make sure "identifier" is always returned in search results.
fields = [k for k in params if 'fl' in k]
if (len(fields) == 1) and (not any('identifier' == params[k] for k in params)):
# Make sure to not overwrite the existing fl[] key.
i = 0
while params.get('fl[{}]'.format(i)):
i += 1
params['fl[{}]'.format(i)] = 'identifier'
search_params = self.get_search_params(query, params)
url = make_url('/advancedsearch.php', self.protocol, self.hosts)
search_info = self.get_search_info(search_params)
total_results = search_info.get('response', {}).get('numFound', 0)
total_pages = (int(total_results/search_params['rows']) + 1)
for page in range(1, (total_pages + 1)):
params = deepcopy(search_params)
params['page'] = page
if not callback and mine_ids:
callback = self._handle_search_results
req = MineRequest('GET', url, self.access,
callback=callback,
max_retries=self.max_retries,
debug=self.debug,
params=params,
connector=self.connector)
yield req | Mine Archive.org search results.
:param query: The Archive.org search query to yield results for.
Refer to https://archive.org/advancedsearch.php#raw
for help formatting your query.
:type query: str
:param params: The URL parameters to send with each request sent
to the Archive.org Advancedsearch Api.
:type params: dict | Below is the the instruction that describes the task:
### Input:
Mine Archive.org search results.
:param query: The Archive.org search query to yield results for.
Refer to https://archive.org/advancedsearch.php#raw
for help formatting your query.
:type query: str
:param params: The URL parameters to send with each request sent
to the Archive.org Advancedsearch Api.
:type params: dict
### Response:
def search_requests(self, query=None, params=None, callback=None, mine_ids=None):
"""Mine Archive.org search results.
:param query: The Archive.org search query to yield results for.
Refer to https://archive.org/advancedsearch.php#raw
for help formatting your query.
:type query: str
:param params: The URL parameters to send with each request sent
to the Archive.org Advancedsearch Api.
:type params: dict
"""
# If mining ids, devote half the workers to search and half to item mining.
if mine_ids:
self.max_tasks = self.max_tasks/2
# When mining id's, the only field we need returned is "identifier".
if mine_ids and params:
params = dict((k, v) for k, v in params.items() if 'fl' not in k)
params['fl[]'] = 'identifier'
# Make sure "identifier" is always returned in search results.
fields = [k for k in params if 'fl' in k]
if (len(fields) == 1) and (not any('identifier' == params[k] for k in params)):
# Make sure to not overwrite the existing fl[] key.
i = 0
while params.get('fl[{}]'.format(i)):
i += 1
params['fl[{}]'.format(i)] = 'identifier'
search_params = self.get_search_params(query, params)
url = make_url('/advancedsearch.php', self.protocol, self.hosts)
search_info = self.get_search_info(search_params)
total_results = search_info.get('response', {}).get('numFound', 0)
total_pages = (int(total_results/search_params['rows']) + 1)
for page in range(1, (total_pages + 1)):
params = deepcopy(search_params)
params['page'] = page
if not callback and mine_ids:
callback = self._handle_search_results
req = MineRequest('GET', url, self.access,
callback=callback,
max_retries=self.max_retries,
debug=self.debug,
params=params,
connector=self.connector)
yield req |
def convert_to_gt( text, layer_name=GT_WORDS ):
''' Converts all words in a morphologically analysed Text from FS format to
giellatekno (GT) format, and stores in a new layer named GT_WORDS.
If the keyword argument *layer_name=='words'* , overwrites the old 'words'
layer with the new layer containing GT format annotations.
Parameters
-----------
text : estnltk.text.Text
Morphologically annotated text that needs to be converted from FS format
to GT format;
layer_name : str
Name of the Text's layer in which GT format morphological annotations
are stored;
Defaults to GT_WORDS;
'''
assert WORDS in text, \
'(!) The input text should contain "'+str(WORDS)+'" layer.'
assert len(text[WORDS])==0 or (len(text[WORDS])>0 and ANALYSIS in text[WORDS][0]), \
'(!) Words in the input text should contain "'+str(ANALYSIS)+'" layer.'
new_words_layer = []
# 1) Perform the conversion
for word in text[WORDS]:
new_analysis = []
new_analysis.extend( convert_analysis( word[ANALYSIS] ) )
new_words_layer.append( {TEXT:word[TEXT], ANALYSIS:new_analysis, START:word[START], END:word[END]} )
# 2) Perform some context-specific disambiguation
_disambiguate_neg( new_words_layer )
_disambiguate_sid_ksid( new_words_layer, text, scope=CLAUSES )
_disambiguate_sid_ksid( new_words_layer, text, scope=SENTENCES )
_make_postfixes_2( new_words_layer )
# 3) Attach the layer
if layer_name != WORDS:
# Simply attach the new layer
text[layer_name] = new_words_layer
else:
# Perform word-by-word replacements
# (because simple attaching won't work here)
for wid, new_word in enumerate( new_words_layer ):
text[WORDS][wid] = new_word
return text | Converts all words in a morphologically analysed Text from FS format to
giellatekno (GT) format, and stores in a new layer named GT_WORDS.
If the keyword argument *layer_name=='words'* , overwrites the old 'words'
layer with the new layer containing GT format annotations.
Parameters
-----------
text : estnltk.text.Text
Morphologically annotated text that needs to be converted from FS format
to GT format;
layer_name : str
Name of the Text's layer in which GT format morphological annotations
are stored;
Defaults to GT_WORDS; | Below is the the instruction that describes the task:
### Input:
Converts all words in a morphologically analysed Text from FS format to
giellatekno (GT) format, and stores in a new layer named GT_WORDS.
If the keyword argument *layer_name=='words'* , overwrites the old 'words'
layer with the new layer containing GT format annotations.
Parameters
-----------
text : estnltk.text.Text
Morphologically annotated text that needs to be converted from FS format
to GT format;
layer_name : str
Name of the Text's layer in which GT format morphological annotations
are stored;
Defaults to GT_WORDS;
### Response:
def convert_to_gt( text, layer_name=GT_WORDS ):
''' Converts all words in a morphologically analysed Text from FS format to
giellatekno (GT) format, and stores in a new layer named GT_WORDS.
If the keyword argument *layer_name=='words'* , overwrites the old 'words'
layer with the new layer containing GT format annotations.
Parameters
-----------
text : estnltk.text.Text
Morphologically annotated text that needs to be converted from FS format
to GT format;
layer_name : str
Name of the Text's layer in which GT format morphological annotations
are stored;
Defaults to GT_WORDS;
'''
assert WORDS in text, \
'(!) The input text should contain "'+str(WORDS)+'" layer.'
assert len(text[WORDS])==0 or (len(text[WORDS])>0 and ANALYSIS in text[WORDS][0]), \
'(!) Words in the input text should contain "'+str(ANALYSIS)+'" layer.'
new_words_layer = []
# 1) Perform the conversion
for word in text[WORDS]:
new_analysis = []
new_analysis.extend( convert_analysis( word[ANALYSIS] ) )
new_words_layer.append( {TEXT:word[TEXT], ANALYSIS:new_analysis, START:word[START], END:word[END]} )
# 2) Perform some context-specific disambiguation
_disambiguate_neg( new_words_layer )
_disambiguate_sid_ksid( new_words_layer, text, scope=CLAUSES )
_disambiguate_sid_ksid( new_words_layer, text, scope=SENTENCES )
_make_postfixes_2( new_words_layer )
# 3) Attach the layer
if layer_name != WORDS:
# Simply attach the new layer
text[layer_name] = new_words_layer
else:
# Perform word-by-word replacements
# (because simple attaching won't work here)
for wid, new_word in enumerate( new_words_layer ):
text[WORDS][wid] = new_word
return text |
def nmf_init(data, clusters, k, init='enhanced'):
"""
Generates initial M and W given a data set and an array of cluster labels.
There are 3 options for init:
enhanced - uses EIn-NMF from Gong 2013
basic - uses means for M, assigns W such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1).
nmf - uses means for M, and assigns W using the NMF objective while holding M constant.
"""
init_m = np.zeros((data.shape[0], k))
if sparse.issparse(data):
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].toarray().flatten()
else:
init_m[:,i] = np.array(data[:,clusters==i].mean(1)).flatten()
else:
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].flatten()
else:
init_m[:,i] = data[:,clusters==i].mean(1)
init_w = np.zeros((k, data.shape[1]))
if init == 'enhanced':
distances = np.zeros((k, data.shape[1]))
for i in range(k):
for j in range(data.shape[1]):
distances[i,j] = np.sqrt(((data[:,j] - init_m[:,i])**2).sum())
for i in range(k):
for j in range(data.shape[1]):
init_w[i,j] = 1/((distances[:,j]/distances[i,j])**(-2)).sum()
elif init == 'basic':
init_w = initialize_from_assignments(clusters, k)
elif init == 'nmf':
init_w_, _, n_iter = non_negative_factorization(data.T, n_components=k, init='custom', update_W=False, W=init_m.T)
init_w = init_w_.T
return init_m, init_w | Generates initial M and W given a data set and an array of cluster labels.
There are 3 options for init:
enhanced - uses EIn-NMF from Gong 2013
basic - uses means for M, assigns W such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1).
nmf - uses means for M, and assigns W using the NMF objective while holding M constant. | Below is the the instruction that describes the task:
### Input:
Generates initial M and W given a data set and an array of cluster labels.
There are 3 options for init:
enhanced - uses EIn-NMF from Gong 2013
basic - uses means for M, assigns W such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1).
nmf - uses means for M, and assigns W using the NMF objective while holding M constant.
### Response:
def nmf_init(data, clusters, k, init='enhanced'):
"""
Generates initial M and W given a data set and an array of cluster labels.
There are 3 options for init:
enhanced - uses EIn-NMF from Gong 2013
basic - uses means for M, assigns W such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1).
nmf - uses means for M, and assigns W using the NMF objective while holding M constant.
"""
init_m = np.zeros((data.shape[0], k))
if sparse.issparse(data):
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].toarray().flatten()
else:
init_m[:,i] = np.array(data[:,clusters==i].mean(1)).flatten()
else:
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].flatten()
else:
init_m[:,i] = data[:,clusters==i].mean(1)
init_w = np.zeros((k, data.shape[1]))
if init == 'enhanced':
distances = np.zeros((k, data.shape[1]))
for i in range(k):
for j in range(data.shape[1]):
distances[i,j] = np.sqrt(((data[:,j] - init_m[:,i])**2).sum())
for i in range(k):
for j in range(data.shape[1]):
init_w[i,j] = 1/((distances[:,j]/distances[i,j])**(-2)).sum()
elif init == 'basic':
init_w = initialize_from_assignments(clusters, k)
elif init == 'nmf':
init_w_, _, n_iter = non_negative_factorization(data.T, n_components=k, init='custom', update_W=False, W=init_m.T)
init_w = init_w_.T
return init_m, init_w |
def parse_sidebar(self, manga_page):
"""Parses the DOM and returns manga attributes in the sidebar.
:type manga_page: :class:`bs4.BeautifulSoup`
:param manga_page: MAL manga page's DOM
:rtype: dict
:return: manga attributes
:raises: :class:`.InvalidMangaError`, :class:`.MalformedMangaPageError`
"""
# if MAL says the series doesn't exist, raise an InvalidMangaError.
error_tag = manga_page.find(u'div', {'class': 'badresult'})
if error_tag:
raise InvalidMangaError(self.id)
try:
title_tag = manga_page.find(u'div', {'id': 'contentWrapper'}).find(u'h1')
if not title_tag.find(u'div'):
# otherwise, raise a MalformedMangaPageError.
raise MalformedMangaPageError(self.id, manga_page, message="Could not find title div")
except:
if not self.session.suppress_parse_exceptions:
raise
# otherwise, begin parsing.
manga_info = super(Manga, self).parse_sidebar(manga_page)
info_panel_first = manga_page.find(u'div', {'id': 'content'}).find(u'table').find(u'td')
try:
volumes_tag = info_panel_first.find(text=u'Volumes:').parent.parent
utilities.extract_tags(volumes_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'volumes'] = int(volumes_tag.text.strip()) if volumes_tag.text.strip() != 'Unknown' else None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
chapters_tag = info_panel_first.find(text=u'Chapters:').parent.parent
utilities.extract_tags(chapters_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'chapters'] = int(chapters_tag.text.strip()) if chapters_tag.text.strip() != 'Unknown' else None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
published_tag = info_panel_first.find(text=u'Published:').parent.parent
utilities.extract_tags(published_tag.find_all(u'span', {'class': 'dark_text'}))
published_parts = published_tag.text.strip().split(u' to ')
if len(published_parts) == 1:
# this published once.
try:
published_date = utilities.parse_profile_date(published_parts[0])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[0], message="Could not parse single publish date")
manga_info[u'published'] = (published_date,)
else:
# two publishing dates.
try:
publish_start = utilities.parse_profile_date(published_parts[0])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[0], message="Could not parse first of two publish dates")
if published_parts == u'?':
# this is still publishing.
publish_end = None
else:
try:
publish_end = utilities.parse_profile_date(published_parts[1])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[1], message="Could not parse second of two publish dates")
manga_info[u'published'] = (publish_start, publish_end)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
authors_tag = info_panel_first.find(text=u'Authors:').parent.parent
utilities.extract_tags(authors_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'authors'] = {}
for author_link in authors_tag.find_all('a'):
link_parts = author_link.get('href').split('/')
# of the form /people/1867/Naoki_Urasawa
person = self.session.person(int(link_parts[2])).set({'name': author_link.text})
role = author_link.nextSibling.replace(' (', '').replace(')', '')
manga_info[u'authors'][person] = role
except:
if not self.session.suppress_parse_exceptions:
raise
try:
serialization_tag = info_panel_first.find(text=u'Serialization:').parent.parent
publication_link = serialization_tag.find('a')
manga_info[u'serialization'] = None
if publication_link:
link_parts = publication_link.get('href').split('mid=')
# of the form /manga.php?mid=1
manga_info[u'serialization'] = self.session.publication(int(link_parts[1])).set({'name': publication_link.text})
except:
if not self.session.suppress_parse_exceptions:
raise
return manga_info | Parses the DOM and returns manga attributes in the sidebar.
:type manga_page: :class:`bs4.BeautifulSoup`
:param manga_page: MAL manga page's DOM
:rtype: dict
:return: manga attributes
:raises: :class:`.InvalidMangaError`, :class:`.MalformedMangaPageError` | Below is the the instruction that describes the task:
### Input:
Parses the DOM and returns manga attributes in the sidebar.
:type manga_page: :class:`bs4.BeautifulSoup`
:param manga_page: MAL manga page's DOM
:rtype: dict
:return: manga attributes
:raises: :class:`.InvalidMangaError`, :class:`.MalformedMangaPageError`
### Response:
def parse_sidebar(self, manga_page):
"""Parses the DOM and returns manga attributes in the sidebar.
:type manga_page: :class:`bs4.BeautifulSoup`
:param manga_page: MAL manga page's DOM
:rtype: dict
:return: manga attributes
:raises: :class:`.InvalidMangaError`, :class:`.MalformedMangaPageError`
"""
# if MAL says the series doesn't exist, raise an InvalidMangaError.
error_tag = manga_page.find(u'div', {'class': 'badresult'})
if error_tag:
raise InvalidMangaError(self.id)
try:
title_tag = manga_page.find(u'div', {'id': 'contentWrapper'}).find(u'h1')
if not title_tag.find(u'div'):
# otherwise, raise a MalformedMangaPageError.
raise MalformedMangaPageError(self.id, manga_page, message="Could not find title div")
except:
if not self.session.suppress_parse_exceptions:
raise
# otherwise, begin parsing.
manga_info = super(Manga, self).parse_sidebar(manga_page)
info_panel_first = manga_page.find(u'div', {'id': 'content'}).find(u'table').find(u'td')
try:
volumes_tag = info_panel_first.find(text=u'Volumes:').parent.parent
utilities.extract_tags(volumes_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'volumes'] = int(volumes_tag.text.strip()) if volumes_tag.text.strip() != 'Unknown' else None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
chapters_tag = info_panel_first.find(text=u'Chapters:').parent.parent
utilities.extract_tags(chapters_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'chapters'] = int(chapters_tag.text.strip()) if chapters_tag.text.strip() != 'Unknown' else None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
published_tag = info_panel_first.find(text=u'Published:').parent.parent
utilities.extract_tags(published_tag.find_all(u'span', {'class': 'dark_text'}))
published_parts = published_tag.text.strip().split(u' to ')
if len(published_parts) == 1:
# this published once.
try:
published_date = utilities.parse_profile_date(published_parts[0])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[0], message="Could not parse single publish date")
manga_info[u'published'] = (published_date,)
else:
# two publishing dates.
try:
publish_start = utilities.parse_profile_date(published_parts[0])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[0], message="Could not parse first of two publish dates")
if published_parts == u'?':
# this is still publishing.
publish_end = None
else:
try:
publish_end = utilities.parse_profile_date(published_parts[1])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[1], message="Could not parse second of two publish dates")
manga_info[u'published'] = (publish_start, publish_end)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
authors_tag = info_panel_first.find(text=u'Authors:').parent.parent
utilities.extract_tags(authors_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'authors'] = {}
for author_link in authors_tag.find_all('a'):
link_parts = author_link.get('href').split('/')
# of the form /people/1867/Naoki_Urasawa
person = self.session.person(int(link_parts[2])).set({'name': author_link.text})
role = author_link.nextSibling.replace(' (', '').replace(')', '')
manga_info[u'authors'][person] = role
except:
if not self.session.suppress_parse_exceptions:
raise
try:
serialization_tag = info_panel_first.find(text=u'Serialization:').parent.parent
publication_link = serialization_tag.find('a')
manga_info[u'serialization'] = None
if publication_link:
link_parts = publication_link.get('href').split('mid=')
# of the form /manga.php?mid=1
manga_info[u'serialization'] = self.session.publication(int(link_parts[1])).set({'name': publication_link.text})
except:
if not self.session.suppress_parse_exceptions:
raise
return manga_info |
def extent_to_array(extent, source_crs, dest_crs=None):
"""Convert the supplied extent to geographic and return as an array.
:param extent: Rectangle defining a spatial extent in any CRS.
:type extent: QgsRectangle
:param source_crs: Coordinate system used for input extent.
:type source_crs: QgsCoordinateReferenceSystem
:param dest_crs: Coordinate system used for output extent. Defaults to
EPSG:4326 if not specified.
:type dest_crs: QgsCoordinateReferenceSystem
:returns: a list in the form [xmin, ymin, xmax, ymax] where all
coordinates provided are in Geographic / EPSG:4326.
:rtype: list
"""
if dest_crs is None:
geo_crs = QgsCoordinateReferenceSystem()
geo_crs.createFromSrid(4326)
else:
geo_crs = dest_crs
transform = QgsCoordinateTransform(source_crs, geo_crs,
QgsProject.instance())
# Get the clip area in the layer's crs
transformed_extent = transform.transformBoundingBox(extent)
geo_extent = [
transformed_extent.xMinimum(),
transformed_extent.yMinimum(),
transformed_extent.xMaximum(),
transformed_extent.yMaximum()]
return geo_extent | Convert the supplied extent to geographic and return as an array.
:param extent: Rectangle defining a spatial extent in any CRS.
:type extent: QgsRectangle
:param source_crs: Coordinate system used for input extent.
:type source_crs: QgsCoordinateReferenceSystem
:param dest_crs: Coordinate system used for output extent. Defaults to
EPSG:4326 if not specified.
:type dest_crs: QgsCoordinateReferenceSystem
:returns: a list in the form [xmin, ymin, xmax, ymax] where all
coordinates provided are in Geographic / EPSG:4326.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Convert the supplied extent to geographic and return as an array.
:param extent: Rectangle defining a spatial extent in any CRS.
:type extent: QgsRectangle
:param source_crs: Coordinate system used for input extent.
:type source_crs: QgsCoordinateReferenceSystem
:param dest_crs: Coordinate system used for output extent. Defaults to
EPSG:4326 if not specified.
:type dest_crs: QgsCoordinateReferenceSystem
:returns: a list in the form [xmin, ymin, xmax, ymax] where all
coordinates provided are in Geographic / EPSG:4326.
:rtype: list
### Response:
def extent_to_array(extent, source_crs, dest_crs=None):
"""Convert the supplied extent to geographic and return as an array.
:param extent: Rectangle defining a spatial extent in any CRS.
:type extent: QgsRectangle
:param source_crs: Coordinate system used for input extent.
:type source_crs: QgsCoordinateReferenceSystem
:param dest_crs: Coordinate system used for output extent. Defaults to
EPSG:4326 if not specified.
:type dest_crs: QgsCoordinateReferenceSystem
:returns: a list in the form [xmin, ymin, xmax, ymax] where all
coordinates provided are in Geographic / EPSG:4326.
:rtype: list
"""
if dest_crs is None:
geo_crs = QgsCoordinateReferenceSystem()
geo_crs.createFromSrid(4326)
else:
geo_crs = dest_crs
transform = QgsCoordinateTransform(source_crs, geo_crs,
QgsProject.instance())
# Get the clip area in the layer's crs
transformed_extent = transform.transformBoundingBox(extent)
geo_extent = [
transformed_extent.xMinimum(),
transformed_extent.yMinimum(),
transformed_extent.xMaximum(),
transformed_extent.yMaximum()]
return geo_extent |
def __initialize_menu_bar(self):
"""
Initializes Component menu_bar.
"""
self.__file_menu = QMenu("&File", parent=self.__menu_bar)
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|&New",
shortcut=QKeySequence.New,
slot=self.__new_file_action__triggered))
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|&Load ...",
shortcut=QKeySequence.Open,
slot=self.__load_file_action__triggered))
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Source ...",
slot=self.__source_file_action__triggered))
self.__file_menu.addSeparator()
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Add Project ...",
slot=self.__add_project_action__triggered))
self.__file_menu.addSeparator()
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|&Save",
shortcut=QKeySequence.Save,
slot=self.__save_file_action__triggered))
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Save As ...",
shortcut=QKeySequence.SaveAs,
slot=self.__save_file_as_action__triggered))
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Save All",
slot=self.__save_all_files_action__triggered))
self.__file_menu.addSeparator()
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Revert",
slot=self.__revert_file_action__triggered))
self.__file_menu.addSeparator()
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Close ...",
shortcut=QKeySequence.Close,
slot=self.__close_file_action__triggered))
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Close All ...",
shortcut=Qt.SHIFT + Qt.ControlModifier + Qt.Key_W,
slot=self.__close_all_files_action__triggered))
self.__file_menu.addSeparator()
for action in self.__recent_files_actions:
self.__file_menu.addAction(action)
self.__set_recent_files_actions()
self.__menu_bar.addMenu(self.__file_menu)
self.__edit_menu = QMenu("&Edit", parent=self.__menu_bar)
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|&Undo",
shortcut=QKeySequence.Undo,
slot=self.__undo_action__triggered))
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|&Redo",
shortcut=QKeySequence.Redo,
slot=self.__redo_action__triggered))
self.__edit_menu.addSeparator()
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|Cu&t",
shortcut=QKeySequence.Cut,
slot=self.__cut_action__triggered))
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|&Copy",
shortcut=QKeySequence.Copy,
slot=self.__copy_action__triggered))
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|&Paste",
shortcut=QKeySequence.Paste,
slot=self.__paste_action__triggered))
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|Delete",
slot=self.__delete_action__triggered))
self.__edit_menu.addSeparator()
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|Select All",
shortcut=QKeySequence.SelectAll,
slot=self.__select_all_action__triggered))
self.__menu_bar.addMenu(self.__edit_menu)
self.__source_menu = QMenu("&Source", parent=self.__menu_bar)
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Delete Line(s)",
shortcut=Qt.ControlModifier + Qt.Key_D,
slot=self.__delete_lines_action__triggered))
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Duplicate Line(s)",
shortcut=Qt.SHIFT + Qt.ControlModifier + Qt.Key_D,
slot=self.__duplicate_lines_action__triggered))
self.__source_menu.addSeparator()
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Move Up",
shortcut=Qt.SHIFT + Qt.ControlModifier + Qt.ALT + Qt.Key_Up,
slot=self.__move_up_action__triggered))
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Move Down",
shortcut=Qt.SHIFT + Qt.ControlModifier + Qt.ALT + Qt.Key_Down,
slot=self.__move_down_action__triggered))
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Indent Selection",
shortcut=Qt.Key_Tab,
slot=self.__indent_selection_action__triggered))
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Unindent Selection",
shortcut=Qt.Key_Backtab,
slot=self.__unindent_selection_action__triggered))
self.__source_menu.addSeparator()
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Convert Indentation To Tabs",
slot=self.__convert_indentation_to_tabs_action__triggered))
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Convert Indentation To Spaces",
slot=self.__convert_indentation_to_spaces_action__triggered))
self.__source_menu.addSeparator()
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Remove Trailing WhiteSpaces",
slot=self.__remove_trailing_white_spaces_action__triggered))
self.__source_menu.addSeparator()
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Toggle Comments",
shortcut=Qt.ControlModifier + Qt.Key_Slash,
slot=self.__toggle_comments_action__triggered))
self.__menu_bar.addMenu(self.__source_menu)
self.__navigate_menu = QMenu("&Navigate", parent=self.__menu_bar)
self.__navigate_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Navigate|Goto Line ...",
shortcut=Qt.ControlModifier + Qt.Key_L,
slot=self.__go_to_line_action__triggered))
self.__navigate_menu.addSeparator()
self.__menu_bar.addMenu(self.__navigate_menu)
self.__search_menu = QMenu("&Search", parent=self.__menu_bar)
self.__search_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Search|Search And Replace ...",
shortcut=Qt.ControlModifier + Qt.Key_F,
slot=self.__search_and_replace_action__triggered))
self.__search_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Search|Search In Files ...",
shortcut=Qt.ALT + Qt.ControlModifier + Qt.Key_F,
slot=self.__search_in_files_action__triggered))
self.__search_menu.addSeparator()
self.__search_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Search|Search Next",
shortcut=Qt.ControlModifier + Qt.Key_K,
slot=self.__search_next_action__triggered))
self.__search_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Search|Search Previous",
shortcut=Qt.SHIFT + Qt.ControlModifier + Qt.Key_K,
slot=self.__search_previous_action__triggered))
self.__menu_bar.addMenu(self.__search_menu)
self.__command_menu = QMenu("&Command", parent=self.__menu_bar)
self.__command_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Command|&Evaluate Selection",
shortcut=Qt.ControlModifier + Qt.Key_Return,
slot=self.__evaluate_selection_action__triggered))
self.__command_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Command|Evaluate &Script",
shortcut=Qt.SHIFT + Qt.CTRL + Qt.Key_Return,
slot=self.__evaluate_script_action__triggered))
self.__menu_bar.addMenu(self.__command_menu)
self.__view_menu = QMenu("&View", parent=self.__menu_bar)
self.__view_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&View|Increase Font Size",
shortcut=Qt.ControlModifier + Qt.Key_Plus,
slot=self.__increase_font_size_action__triggered))
self.__view_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&View|Decrease Font Size",
shortcut=Qt.ControlModifier + Qt.Key_Minus,
slot=self.__decrease_font_size_action__triggered))
self.__view_menu.addSeparator()
self.__view_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&View|Toggle Word Wrap",
slot=self.__toggle_word_wrap_action__triggered))
self.__view_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&View|Toggle White Spaces",
slot=self.__toggle_white_spaces_action__triggered))
self.__view_menu.addSeparator()
self.__view_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&View|Loop Through Editors",
shortcut=Qt.AltModifier + Qt.SHIFT + Qt.Key_Tab,
slot=self.__loop_through_editors_action__triggered))
self.__menu_bar.addMenu(self.__view_menu) | Initializes Component menu_bar. | Below is the the instruction that describes the task:
### Input:
Initializes Component menu_bar.
### Response:
def __initialize_menu_bar(self):
"""
Initializes Component menu_bar.
"""
self.__file_menu = QMenu("&File", parent=self.__menu_bar)
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|&New",
shortcut=QKeySequence.New,
slot=self.__new_file_action__triggered))
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|&Load ...",
shortcut=QKeySequence.Open,
slot=self.__load_file_action__triggered))
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Source ...",
slot=self.__source_file_action__triggered))
self.__file_menu.addSeparator()
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Add Project ...",
slot=self.__add_project_action__triggered))
self.__file_menu.addSeparator()
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|&Save",
shortcut=QKeySequence.Save,
slot=self.__save_file_action__triggered))
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Save As ...",
shortcut=QKeySequence.SaveAs,
slot=self.__save_file_as_action__triggered))
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Save All",
slot=self.__save_all_files_action__triggered))
self.__file_menu.addSeparator()
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Revert",
slot=self.__revert_file_action__triggered))
self.__file_menu.addSeparator()
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Close ...",
shortcut=QKeySequence.Close,
slot=self.__close_file_action__triggered))
self.__file_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&File|Close All ...",
shortcut=Qt.SHIFT + Qt.ControlModifier + Qt.Key_W,
slot=self.__close_all_files_action__triggered))
self.__file_menu.addSeparator()
for action in self.__recent_files_actions:
self.__file_menu.addAction(action)
self.__set_recent_files_actions()
self.__menu_bar.addMenu(self.__file_menu)
self.__edit_menu = QMenu("&Edit", parent=self.__menu_bar)
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|&Undo",
shortcut=QKeySequence.Undo,
slot=self.__undo_action__triggered))
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|&Redo",
shortcut=QKeySequence.Redo,
slot=self.__redo_action__triggered))
self.__edit_menu.addSeparator()
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|Cu&t",
shortcut=QKeySequence.Cut,
slot=self.__cut_action__triggered))
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|&Copy",
shortcut=QKeySequence.Copy,
slot=self.__copy_action__triggered))
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|&Paste",
shortcut=QKeySequence.Paste,
slot=self.__paste_action__triggered))
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|Delete",
slot=self.__delete_action__triggered))
self.__edit_menu.addSeparator()
self.__edit_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Edit|Select All",
shortcut=QKeySequence.SelectAll,
slot=self.__select_all_action__triggered))
self.__menu_bar.addMenu(self.__edit_menu)
self.__source_menu = QMenu("&Source", parent=self.__menu_bar)
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Delete Line(s)",
shortcut=Qt.ControlModifier + Qt.Key_D,
slot=self.__delete_lines_action__triggered))
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Duplicate Line(s)",
shortcut=Qt.SHIFT + Qt.ControlModifier + Qt.Key_D,
slot=self.__duplicate_lines_action__triggered))
self.__source_menu.addSeparator()
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Move Up",
shortcut=Qt.SHIFT + Qt.ControlModifier + Qt.ALT + Qt.Key_Up,
slot=self.__move_up_action__triggered))
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Move Down",
shortcut=Qt.SHIFT + Qt.ControlModifier + Qt.ALT + Qt.Key_Down,
slot=self.__move_down_action__triggered))
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Indent Selection",
shortcut=Qt.Key_Tab,
slot=self.__indent_selection_action__triggered))
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Unindent Selection",
shortcut=Qt.Key_Backtab,
slot=self.__unindent_selection_action__triggered))
self.__source_menu.addSeparator()
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Convert Indentation To Tabs",
slot=self.__convert_indentation_to_tabs_action__triggered))
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Convert Indentation To Spaces",
slot=self.__convert_indentation_to_spaces_action__triggered))
self.__source_menu.addSeparator()
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Remove Trailing WhiteSpaces",
slot=self.__remove_trailing_white_spaces_action__triggered))
self.__source_menu.addSeparator()
self.__source_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Source|Toggle Comments",
shortcut=Qt.ControlModifier + Qt.Key_Slash,
slot=self.__toggle_comments_action__triggered))
self.__menu_bar.addMenu(self.__source_menu)
self.__navigate_menu = QMenu("&Navigate", parent=self.__menu_bar)
self.__navigate_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Navigate|Goto Line ...",
shortcut=Qt.ControlModifier + Qt.Key_L,
slot=self.__go_to_line_action__triggered))
self.__navigate_menu.addSeparator()
self.__menu_bar.addMenu(self.__navigate_menu)
self.__search_menu = QMenu("&Search", parent=self.__menu_bar)
self.__search_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Search|Search And Replace ...",
shortcut=Qt.ControlModifier + Qt.Key_F,
slot=self.__search_and_replace_action__triggered))
self.__search_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Search|Search In Files ...",
shortcut=Qt.ALT + Qt.ControlModifier + Qt.Key_F,
slot=self.__search_in_files_action__triggered))
self.__search_menu.addSeparator()
self.__search_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Search|Search Next",
shortcut=Qt.ControlModifier + Qt.Key_K,
slot=self.__search_next_action__triggered))
self.__search_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Search|Search Previous",
shortcut=Qt.SHIFT + Qt.ControlModifier + Qt.Key_K,
slot=self.__search_previous_action__triggered))
self.__menu_bar.addMenu(self.__search_menu)
self.__command_menu = QMenu("&Command", parent=self.__menu_bar)
self.__command_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Command|&Evaluate Selection",
shortcut=Qt.ControlModifier + Qt.Key_Return,
slot=self.__evaluate_selection_action__triggered))
self.__command_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&Command|Evaluate &Script",
shortcut=Qt.SHIFT + Qt.CTRL + Qt.Key_Return,
slot=self.__evaluate_script_action__triggered))
self.__menu_bar.addMenu(self.__command_menu)
self.__view_menu = QMenu("&View", parent=self.__menu_bar)
self.__view_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&View|Increase Font Size",
shortcut=Qt.ControlModifier + Qt.Key_Plus,
slot=self.__increase_font_size_action__triggered))
self.__view_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&View|Decrease Font Size",
shortcut=Qt.ControlModifier + Qt.Key_Minus,
slot=self.__decrease_font_size_action__triggered))
self.__view_menu.addSeparator()
self.__view_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&View|Toggle Word Wrap",
slot=self.__toggle_word_wrap_action__triggered))
self.__view_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&View|Toggle White Spaces",
slot=self.__toggle_white_spaces_action__triggered))
self.__view_menu.addSeparator()
self.__view_menu.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.script_editor|&View|Loop Through Editors",
shortcut=Qt.AltModifier + Qt.SHIFT + Qt.Key_Tab,
slot=self.__loop_through_editors_action__triggered))
self.__menu_bar.addMenu(self.__view_menu) |
def proc_state(self, state_data, state_id):
"""Processes a state into an `Assembly`.
Parameters
----------
state_data : dict
Contains information about the state, including all
the per line structural data.
state_id : str
ID given to `Assembly` that represents the state.
"""
assembly = Assembly(assembly_id=state_id)
for k, chain in sorted(state_data.items()):
assembly._molecules.append(self.proc_chain(chain, assembly))
return assembly | Processes a state into an `Assembly`.
Parameters
----------
state_data : dict
Contains information about the state, including all
the per line structural data.
state_id : str
ID given to `Assembly` that represents the state. | Below is the the instruction that describes the task:
### Input:
Processes a state into an `Assembly`.
Parameters
----------
state_data : dict
Contains information about the state, including all
the per line structural data.
state_id : str
ID given to `Assembly` that represents the state.
### Response:
def proc_state(self, state_data, state_id):
"""Processes a state into an `Assembly`.
Parameters
----------
state_data : dict
Contains information about the state, including all
the per line structural data.
state_id : str
ID given to `Assembly` that represents the state.
"""
assembly = Assembly(assembly_id=state_id)
for k, chain in sorted(state_data.items()):
assembly._molecules.append(self.proc_chain(chain, assembly))
return assembly |
def set_element_attributes(elem_to_parse, **attrib_kwargs):
"""
Adds the specified key/value pairs to the element's attributes, and
returns the updated set of attributes.
If the element already contains any of the attributes specified in
attrib_kwargs, they are updated accordingly.
"""
element = get_element(elem_to_parse)
if element is None:
return element
if len(attrib_kwargs):
element.attrib.update(attrib_kwargs)
return element.attrib | Adds the specified key/value pairs to the element's attributes, and
returns the updated set of attributes.
If the element already contains any of the attributes specified in
attrib_kwargs, they are updated accordingly. | Below is the the instruction that describes the task:
### Input:
Adds the specified key/value pairs to the element's attributes, and
returns the updated set of attributes.
If the element already contains any of the attributes specified in
attrib_kwargs, they are updated accordingly.
### Response:
def set_element_attributes(elem_to_parse, **attrib_kwargs):
"""
Adds the specified key/value pairs to the element's attributes, and
returns the updated set of attributes.
If the element already contains any of the attributes specified in
attrib_kwargs, they are updated accordingly.
"""
element = get_element(elem_to_parse)
if element is None:
return element
if len(attrib_kwargs):
element.attrib.update(attrib_kwargs)
return element.attrib |
def list_conversations(self, filter=None, filter_mode=None, include=None, include_all_conversation_ids=None, interleave_submissions=None, scope=None):
"""
List conversations.
Returns the list of conversations for the current user, most recent ones first.
"""
path = {}
data = {}
params = {}
# OPTIONAL - scope
"""When set, only return conversations of the specified type. For example,
set to "unread" to return only conversations that haven't been read.
The default behavior is to return all non-archived conversations (i.e.
read and unread)."""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
params["scope"] = scope
# OPTIONAL - filter
"""When set, only return conversations for the specified courses, groups
or users. The id should be prefixed with its type, e.g. "user_123" or
"course_456". Can be an array (by setting "filter[]") or single value
(by setting "filter")"""
if filter is not None:
params["filter"] = filter
# OPTIONAL - filter_mode
"""When filter[] contains multiple filters, combine them with this mode,
filtering conversations that at have at least all of the contexts ("and")
or at least one of the contexts ("or")"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
params["filter_mode"] = filter_mode
# OPTIONAL - interleave_submissions
"""(Obsolete) Submissions are no
longer linked to conversations. This parameter is ignored."""
if interleave_submissions is not None:
params["interleave_submissions"] = interleave_submissions
# OPTIONAL - include_all_conversation_ids
"""Default is false. If true,
the top-level element of the response will be an object rather than
an array, and will have the keys "conversations" which will contain the
paged conversation data, and "conversation_ids" which will contain the
ids of all conversations under this scope/filter in the same order."""
if include_all_conversation_ids is not None:
params["include_all_conversation_ids"] = include_all_conversation_ids
# OPTIONAL - include
""""participant_avatars":: Optionally include an "avatar_url" key for each user participanting in the conversation"""
if include is not None:
self._validate_enum(include, ["participant_avatars"])
params["include"] = include
self.logger.debug("GET /api/v1/conversations with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/conversations".format(**path), data=data, params=params, all_pages=True) | List conversations.
Returns the list of conversations for the current user, most recent ones first. | Below is the the instruction that describes the task:
### Input:
List conversations.
Returns the list of conversations for the current user, most recent ones first.
### Response:
def list_conversations(self, filter=None, filter_mode=None, include=None, include_all_conversation_ids=None, interleave_submissions=None, scope=None):
"""
List conversations.
Returns the list of conversations for the current user, most recent ones first.
"""
path = {}
data = {}
params = {}
# OPTIONAL - scope
"""When set, only return conversations of the specified type. For example,
set to "unread" to return only conversations that haven't been read.
The default behavior is to return all non-archived conversations (i.e.
read and unread)."""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
params["scope"] = scope
# OPTIONAL - filter
"""When set, only return conversations for the specified courses, groups
or users. The id should be prefixed with its type, e.g. "user_123" or
"course_456". Can be an array (by setting "filter[]") or single value
(by setting "filter")"""
if filter is not None:
params["filter"] = filter
# OPTIONAL - filter_mode
"""When filter[] contains multiple filters, combine them with this mode,
filtering conversations that at have at least all of the contexts ("and")
or at least one of the contexts ("or")"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
params["filter_mode"] = filter_mode
# OPTIONAL - interleave_submissions
"""(Obsolete) Submissions are no
longer linked to conversations. This parameter is ignored."""
if interleave_submissions is not None:
params["interleave_submissions"] = interleave_submissions
# OPTIONAL - include_all_conversation_ids
"""Default is false. If true,
the top-level element of the response will be an object rather than
an array, and will have the keys "conversations" which will contain the
paged conversation data, and "conversation_ids" which will contain the
ids of all conversations under this scope/filter in the same order."""
if include_all_conversation_ids is not None:
params["include_all_conversation_ids"] = include_all_conversation_ids
# OPTIONAL - include
""""participant_avatars":: Optionally include an "avatar_url" key for each user participanting in the conversation"""
if include is not None:
self._validate_enum(include, ["participant_avatars"])
params["include"] = include
self.logger.debug("GET /api/v1/conversations with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/conversations".format(**path), data=data, params=params, all_pages=True) |
def is_ancestor_name(
frame: astroid.node_classes.NodeNG, node: astroid.node_classes.NodeNG
) -> bool:
"""return True if `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute
"""
try:
bases = frame.bases
except AttributeError:
return False
for base in bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False | return True if `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute | Below is the the instruction that describes the task:
### Input:
return True if `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute
### Response:
def is_ancestor_name(
frame: astroid.node_classes.NodeNG, node: astroid.node_classes.NodeNG
) -> bool:
"""return True if `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute
"""
try:
bases = frame.bases
except AttributeError:
return False
for base in bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False |
async def prover_create_proof(wallet_handle: int,
proof_req_json: str,
requested_credentials_json: str,
master_secret_name: str,
schemas_json: str,
credential_defs_json: str,
rev_states_json: str) -> str:
"""
Creates a proof according to the given proof request
Either a corresponding credential with optionally revealed attributes or self-attested attribute must be provided
for each requested attribute (see indy_prover_get_credentials_for_pool_req).
A proof request may request multiple credentials from different schemas and different issuers.
All required schemas, public keys and revocation registries must be provided.
The proof request also contains nonce.
The proof contains either proof or self-attested attribute value for each requested attribute.
:param wallet_handle: wallet handler (created by open_wallet).
:param proof_req_json: proof request json
{
"name": string,
"version": string,
"nonce": string,
"requested_attributes": { // set of requested attributes
"<attr_referent>": <attr_info>, // see below
...,
},
"requested_predicates": { // set of requested predicates
"<predicate_referent>": <predicate_info>, // see below
...,
},
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval for each attribute
// (can be overridden on attribute level)
}
:param requested_credentials_json: either a credential or self-attested attribute for each requested attribute
{
"self_attested_attributes": {
"self_attested_attribute_referent": string
},
"requested_attributes": {
"requested_attribute_referent_1": {"cred_id": string, "timestamp": Optional<number>, revealed: <bool> }},
"requested_attribute_referent_2": {"cred_id": string, "timestamp": Optional<number>, revealed: <bool> }}
},
"requested_predicates": {
"requested_predicates_referent_1": {"cred_id": string, "timestamp": Optional<number> }},
}
}
:param master_secret_name: the id of the master secret stored in the wallet
:param schemas_json: all schemas json participating in the proof request
{
<schema1_id>: <schema1_json>,
<schema2_id>: <schema2_json>,
<schema3_id>: <schema3_json>,
}
:param credential_defs_json: all credential definitions json participating in the proof request
{
"cred_def1_id": <credential_def1_json>,
"cred_def2_id": <credential_def2_json>,
"cred_def3_id": <credential_def3_json>,
}
:param rev_states_json: all revocation states json participating in the proof request
{
"rev_reg_def1_id": {
"timestamp1": <rev_state1>,
"timestamp2": <rev_state2>,
},
"rev_reg_def2_id": {
"timestamp3": <rev_state3>
},
"rev_reg_def3_id": {
"timestamp4": <rev_state4>
},
}
where
wql query: indy-sdk/docs/design/011-wallet-query-language/README.md
attr_referent: Proof-request local identifier of requested attribute
attr_info: Describes requested attribute
{
"name": string, // attribute name, (case insensitive and ignore spaces)
"restrictions": Optional<[<wql query>]>,
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
predicate_referent: Proof-request local identifier of requested attribute predicate
predicate_info: Describes requested attribute predicate
{
"name": attribute name, (case insensitive and ignore spaces)
"p_type": predicate type (Currently >= only)
"p_value": predicate value
"restrictions": Optional<[<wql query>]>,
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
non_revoc_interval: Defines non-revocation interval
{
"from": Optional<int>, // timestamp of interval beginning
"to": Optional<int>, // timestamp of interval ending
}
:return: Proof json
For each requested attribute either a proof (with optionally revealed attribute value) or
self-attested attribute value is provided.
Each proof is associated with a credential and corresponding schema_id, cred_def_id, rev_reg_id and timestamp.
There is also aggregated proof part common for all credential proofs.
{
"requested_proof": {
"revealed_attrs": {
"requested_attr1_id": {sub_proof_index: number, raw: string, encoded: string},
"requested_attr4_id": {sub_proof_index: number: string, encoded: string},
},
"unrevealed_attrs": {
"requested_attr3_id": {sub_proof_index: number}
},
"self_attested_attrs": {
"requested_attr2_id": self_attested_value,
},
"requested_predicates": {
"requested_predicate_1_referent": {sub_proof_index: int},
"requested_predicate_2_referent": {sub_proof_index: int},
}
}
"proof": {
"proofs": [ <credential_proof>, <credential_proof>, <credential_proof> ],
"aggregated_proof": <aggregated_proof>
}
"identifiers": [{schema_id, cred_def_id, Optional<rev_reg_id>, Optional<timestamp>}]
}
"""
logger = logging.getLogger(__name__)
logger.debug("prover_create_proof: >>> wallet_handle: %r, proof_req_json: %r, requested_credentials_json: %r, "
"schemas_json: %r, master_secret_name: %r, credential_defs_json: %r, rev_infos_json: %r",
wallet_handle,
proof_req_json,
requested_credentials_json,
schemas_json,
master_secret_name,
credential_defs_json,
rev_states_json)
if not hasattr(prover_create_proof, "cb"):
logger.debug("prover_create_proof: Creating callback")
prover_create_proof.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_proof_req_json = c_char_p(proof_req_json.encode('utf-8'))
c_requested_credentials_json = c_char_p(requested_credentials_json.encode('utf-8'))
c_schemas_json = c_char_p(schemas_json.encode('utf-8'))
c_master_secret_name = c_char_p(master_secret_name.encode('utf-8'))
c_credential_defs_json = c_char_p(credential_defs_json.encode('utf-8'))
c_rev_infos_json = c_char_p(rev_states_json.encode('utf-8'))
proof_json = await do_call('indy_prover_create_proof',
c_wallet_handle,
c_proof_req_json,
c_requested_credentials_json,
c_master_secret_name,
c_schemas_json,
c_credential_defs_json,
c_rev_infos_json,
prover_create_proof.cb)
res = proof_json.decode()
logger.debug("prover_create_proof: <<< res: %r", res)
return res | Creates a proof according to the given proof request
Either a corresponding credential with optionally revealed attributes or self-attested attribute must be provided
for each requested attribute (see indy_prover_get_credentials_for_pool_req).
A proof request may request multiple credentials from different schemas and different issuers.
All required schemas, public keys and revocation registries must be provided.
The proof request also contains nonce.
The proof contains either proof or self-attested attribute value for each requested attribute.
:param wallet_handle: wallet handler (created by open_wallet).
:param proof_req_json: proof request json
{
"name": string,
"version": string,
"nonce": string,
"requested_attributes": { // set of requested attributes
"<attr_referent>": <attr_info>, // see below
...,
},
"requested_predicates": { // set of requested predicates
"<predicate_referent>": <predicate_info>, // see below
...,
},
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval for each attribute
// (can be overridden on attribute level)
}
:param requested_credentials_json: either a credential or self-attested attribute for each requested attribute
{
"self_attested_attributes": {
"self_attested_attribute_referent": string
},
"requested_attributes": {
"requested_attribute_referent_1": {"cred_id": string, "timestamp": Optional<number>, revealed: <bool> }},
"requested_attribute_referent_2": {"cred_id": string, "timestamp": Optional<number>, revealed: <bool> }}
},
"requested_predicates": {
"requested_predicates_referent_1": {"cred_id": string, "timestamp": Optional<number> }},
}
}
:param master_secret_name: the id of the master secret stored in the wallet
:param schemas_json: all schemas json participating in the proof request
{
<schema1_id>: <schema1_json>,
<schema2_id>: <schema2_json>,
<schema3_id>: <schema3_json>,
}
:param credential_defs_json: all credential definitions json participating in the proof request
{
"cred_def1_id": <credential_def1_json>,
"cred_def2_id": <credential_def2_json>,
"cred_def3_id": <credential_def3_json>,
}
:param rev_states_json: all revocation states json participating in the proof request
{
"rev_reg_def1_id": {
"timestamp1": <rev_state1>,
"timestamp2": <rev_state2>,
},
"rev_reg_def2_id": {
"timestamp3": <rev_state3>
},
"rev_reg_def3_id": {
"timestamp4": <rev_state4>
},
}
where
wql query: indy-sdk/docs/design/011-wallet-query-language/README.md
attr_referent: Proof-request local identifier of requested attribute
attr_info: Describes requested attribute
{
"name": string, // attribute name, (case insensitive and ignore spaces)
"restrictions": Optional<[<wql query>]>,
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
predicate_referent: Proof-request local identifier of requested attribute predicate
predicate_info: Describes requested attribute predicate
{
"name": attribute name, (case insensitive and ignore spaces)
"p_type": predicate type (Currently >= only)
"p_value": predicate value
"restrictions": Optional<[<wql query>]>,
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
non_revoc_interval: Defines non-revocation interval
{
"from": Optional<int>, // timestamp of interval beginning
"to": Optional<int>, // timestamp of interval ending
}
:return: Proof json
For each requested attribute either a proof (with optionally revealed attribute value) or
self-attested attribute value is provided.
Each proof is associated with a credential and corresponding schema_id, cred_def_id, rev_reg_id and timestamp.
There is also aggregated proof part common for all credential proofs.
{
"requested_proof": {
"revealed_attrs": {
"requested_attr1_id": {sub_proof_index: number, raw: string, encoded: string},
"requested_attr4_id": {sub_proof_index: number: string, encoded: string},
},
"unrevealed_attrs": {
"requested_attr3_id": {sub_proof_index: number}
},
"self_attested_attrs": {
"requested_attr2_id": self_attested_value,
},
"requested_predicates": {
"requested_predicate_1_referent": {sub_proof_index: int},
"requested_predicate_2_referent": {sub_proof_index: int},
}
}
"proof": {
"proofs": [ <credential_proof>, <credential_proof>, <credential_proof> ],
"aggregated_proof": <aggregated_proof>
}
"identifiers": [{schema_id, cred_def_id, Optional<rev_reg_id>, Optional<timestamp>}]
} | Below is the the instruction that describes the task:
### Input:
Creates a proof according to the given proof request
Either a corresponding credential with optionally revealed attributes or self-attested attribute must be provided
for each requested attribute (see indy_prover_get_credentials_for_pool_req).
A proof request may request multiple credentials from different schemas and different issuers.
All required schemas, public keys and revocation registries must be provided.
The proof request also contains nonce.
The proof contains either proof or self-attested attribute value for each requested attribute.
:param wallet_handle: wallet handler (created by open_wallet).
:param proof_req_json: proof request json
{
"name": string,
"version": string,
"nonce": string,
"requested_attributes": { // set of requested attributes
"<attr_referent>": <attr_info>, // see below
...,
},
"requested_predicates": { // set of requested predicates
"<predicate_referent>": <predicate_info>, // see below
...,
},
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval for each attribute
// (can be overridden on attribute level)
}
:param requested_credentials_json: either a credential or self-attested attribute for each requested attribute
{
"self_attested_attributes": {
"self_attested_attribute_referent": string
},
"requested_attributes": {
"requested_attribute_referent_1": {"cred_id": string, "timestamp": Optional<number>, revealed: <bool> }},
"requested_attribute_referent_2": {"cred_id": string, "timestamp": Optional<number>, revealed: <bool> }}
},
"requested_predicates": {
"requested_predicates_referent_1": {"cred_id": string, "timestamp": Optional<number> }},
}
}
:param master_secret_name: the id of the master secret stored in the wallet
:param schemas_json: all schemas json participating in the proof request
{
<schema1_id>: <schema1_json>,
<schema2_id>: <schema2_json>,
<schema3_id>: <schema3_json>,
}
:param credential_defs_json: all credential definitions json participating in the proof request
{
"cred_def1_id": <credential_def1_json>,
"cred_def2_id": <credential_def2_json>,
"cred_def3_id": <credential_def3_json>,
}
:param rev_states_json: all revocation states json participating in the proof request
{
"rev_reg_def1_id": {
"timestamp1": <rev_state1>,
"timestamp2": <rev_state2>,
},
"rev_reg_def2_id": {
"timestamp3": <rev_state3>
},
"rev_reg_def3_id": {
"timestamp4": <rev_state4>
},
}
where
wql query: indy-sdk/docs/design/011-wallet-query-language/README.md
attr_referent: Proof-request local identifier of requested attribute
attr_info: Describes requested attribute
{
"name": string, // attribute name, (case insensitive and ignore spaces)
"restrictions": Optional<[<wql query>]>,
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
predicate_referent: Proof-request local identifier of requested attribute predicate
predicate_info: Describes requested attribute predicate
{
"name": attribute name, (case insensitive and ignore spaces)
"p_type": predicate type (Currently >= only)
"p_value": predicate value
"restrictions": Optional<[<wql query>]>,
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
non_revoc_interval: Defines non-revocation interval
{
"from": Optional<int>, // timestamp of interval beginning
"to": Optional<int>, // timestamp of interval ending
}
:return: Proof json
For each requested attribute either a proof (with optionally revealed attribute value) or
self-attested attribute value is provided.
Each proof is associated with a credential and corresponding schema_id, cred_def_id, rev_reg_id and timestamp.
There is also aggregated proof part common for all credential proofs.
{
"requested_proof": {
"revealed_attrs": {
"requested_attr1_id": {sub_proof_index: number, raw: string, encoded: string},
"requested_attr4_id": {sub_proof_index: number: string, encoded: string},
},
"unrevealed_attrs": {
"requested_attr3_id": {sub_proof_index: number}
},
"self_attested_attrs": {
"requested_attr2_id": self_attested_value,
},
"requested_predicates": {
"requested_predicate_1_referent": {sub_proof_index: int},
"requested_predicate_2_referent": {sub_proof_index: int},
}
}
"proof": {
"proofs": [ <credential_proof>, <credential_proof>, <credential_proof> ],
"aggregated_proof": <aggregated_proof>
}
"identifiers": [{schema_id, cred_def_id, Optional<rev_reg_id>, Optional<timestamp>}]
}
### Response:
async def prover_create_proof(wallet_handle: int,
proof_req_json: str,
requested_credentials_json: str,
master_secret_name: str,
schemas_json: str,
credential_defs_json: str,
rev_states_json: str) -> str:
"""
Creates a proof according to the given proof request
Either a corresponding credential with optionally revealed attributes or self-attested attribute must be provided
for each requested attribute (see indy_prover_get_credentials_for_pool_req).
A proof request may request multiple credentials from different schemas and different issuers.
All required schemas, public keys and revocation registries must be provided.
The proof request also contains nonce.
The proof contains either proof or self-attested attribute value for each requested attribute.
:param wallet_handle: wallet handler (created by open_wallet).
:param proof_req_json: proof request json
{
"name": string,
"version": string,
"nonce": string,
"requested_attributes": { // set of requested attributes
"<attr_referent>": <attr_info>, // see below
...,
},
"requested_predicates": { // set of requested predicates
"<predicate_referent>": <predicate_info>, // see below
...,
},
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval for each attribute
// (can be overridden on attribute level)
}
:param requested_credentials_json: either a credential or self-attested attribute for each requested attribute
{
"self_attested_attributes": {
"self_attested_attribute_referent": string
},
"requested_attributes": {
"requested_attribute_referent_1": {"cred_id": string, "timestamp": Optional<number>, revealed: <bool> }},
"requested_attribute_referent_2": {"cred_id": string, "timestamp": Optional<number>, revealed: <bool> }}
},
"requested_predicates": {
"requested_predicates_referent_1": {"cred_id": string, "timestamp": Optional<number> }},
}
}
:param master_secret_name: the id of the master secret stored in the wallet
:param schemas_json: all schemas json participating in the proof request
{
<schema1_id>: <schema1_json>,
<schema2_id>: <schema2_json>,
<schema3_id>: <schema3_json>,
}
:param credential_defs_json: all credential definitions json participating in the proof request
{
"cred_def1_id": <credential_def1_json>,
"cred_def2_id": <credential_def2_json>,
"cred_def3_id": <credential_def3_json>,
}
:param rev_states_json: all revocation states json participating in the proof request
{
"rev_reg_def1_id": {
"timestamp1": <rev_state1>,
"timestamp2": <rev_state2>,
},
"rev_reg_def2_id": {
"timestamp3": <rev_state3>
},
"rev_reg_def3_id": {
"timestamp4": <rev_state4>
},
}
where
wql query: indy-sdk/docs/design/011-wallet-query-language/README.md
attr_referent: Proof-request local identifier of requested attribute
attr_info: Describes requested attribute
{
"name": string, // attribute name, (case insensitive and ignore spaces)
"restrictions": Optional<[<wql query>]>,
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
predicate_referent: Proof-request local identifier of requested attribute predicate
predicate_info: Describes requested attribute predicate
{
"name": attribute name, (case insensitive and ignore spaces)
"p_type": predicate type (Currently >= only)
"p_value": predicate value
"restrictions": Optional<[<wql query>]>,
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
non_revoc_interval: Defines non-revocation interval
{
"from": Optional<int>, // timestamp of interval beginning
"to": Optional<int>, // timestamp of interval ending
}
:return: Proof json
For each requested attribute either a proof (with optionally revealed attribute value) or
self-attested attribute value is provided.
Each proof is associated with a credential and corresponding schema_id, cred_def_id, rev_reg_id and timestamp.
There is also aggregated proof part common for all credential proofs.
{
"requested_proof": {
"revealed_attrs": {
"requested_attr1_id": {sub_proof_index: number, raw: string, encoded: string},
"requested_attr4_id": {sub_proof_index: number: string, encoded: string},
},
"unrevealed_attrs": {
"requested_attr3_id": {sub_proof_index: number}
},
"self_attested_attrs": {
"requested_attr2_id": self_attested_value,
},
"requested_predicates": {
"requested_predicate_1_referent": {sub_proof_index: int},
"requested_predicate_2_referent": {sub_proof_index: int},
}
}
"proof": {
"proofs": [ <credential_proof>, <credential_proof>, <credential_proof> ],
"aggregated_proof": <aggregated_proof>
}
"identifiers": [{schema_id, cred_def_id, Optional<rev_reg_id>, Optional<timestamp>}]
}
"""
logger = logging.getLogger(__name__)
logger.debug("prover_create_proof: >>> wallet_handle: %r, proof_req_json: %r, requested_credentials_json: %r, "
"schemas_json: %r, master_secret_name: %r, credential_defs_json: %r, rev_infos_json: %r",
wallet_handle,
proof_req_json,
requested_credentials_json,
schemas_json,
master_secret_name,
credential_defs_json,
rev_states_json)
if not hasattr(prover_create_proof, "cb"):
logger.debug("prover_create_proof: Creating callback")
prover_create_proof.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_proof_req_json = c_char_p(proof_req_json.encode('utf-8'))
c_requested_credentials_json = c_char_p(requested_credentials_json.encode('utf-8'))
c_schemas_json = c_char_p(schemas_json.encode('utf-8'))
c_master_secret_name = c_char_p(master_secret_name.encode('utf-8'))
c_credential_defs_json = c_char_p(credential_defs_json.encode('utf-8'))
c_rev_infos_json = c_char_p(rev_states_json.encode('utf-8'))
proof_json = await do_call('indy_prover_create_proof',
c_wallet_handle,
c_proof_req_json,
c_requested_credentials_json,
c_master_secret_name,
c_schemas_json,
c_credential_defs_json,
c_rev_infos_json,
prover_create_proof.cb)
res = proof_json.decode()
logger.debug("prover_create_proof: <<< res: %r", res)
return res |
def messages(path, thread, fmt, nocolor, timezones, utc, noprogress, resolve, directory):
"""
Conversion of Facebook chat history.
"""
with colorize_output(nocolor):
try:
chat_history = _process_history(
path=path, thread=thread, timezones=timezones,
utc=utc, noprogress=noprogress, resolve=resolve)
except ProcessingFailure:
return
if directory:
set_all_color(enabled=False)
write(fmt, chat_history, directory or sys.stdout) | Conversion of Facebook chat history. | Below is the the instruction that describes the task:
### Input:
Conversion of Facebook chat history.
### Response:
def messages(path, thread, fmt, nocolor, timezones, utc, noprogress, resolve, directory):
"""
Conversion of Facebook chat history.
"""
with colorize_output(nocolor):
try:
chat_history = _process_history(
path=path, thread=thread, timezones=timezones,
utc=utc, noprogress=noprogress, resolve=resolve)
except ProcessingFailure:
return
if directory:
set_all_color(enabled=False)
write(fmt, chat_history, directory or sys.stdout) |
def read_string(buff, byteorder='big'):
"""Read a string from a file-like object."""
length = read_numeric(USHORT, buff, byteorder)
return buff.read(length).decode('utf-8') | Read a string from a file-like object. | Below is the the instruction that describes the task:
### Input:
Read a string from a file-like object.
### Response:
def read_string(buff, byteorder='big'):
"""Read a string from a file-like object."""
length = read_numeric(USHORT, buff, byteorder)
return buff.read(length).decode('utf-8') |
def all_faces_with_verts(self, v_indices, as_boolean=False):
'''
returns all of the faces that contain at least one of the vertices in v_indices
'''
import numpy as np
included_vertices = np.zeros(self.v.shape[0], dtype=bool)
included_vertices[np.array(v_indices, dtype=np.uint32)] = True
faces_with_verts = included_vertices[self.f].all(axis=1)
if as_boolean:
return faces_with_verts
return np.nonzero(faces_with_verts)[0] | returns all of the faces that contain at least one of the vertices in v_indices | Below is the the instruction that describes the task:
### Input:
returns all of the faces that contain at least one of the vertices in v_indices
### Response:
def all_faces_with_verts(self, v_indices, as_boolean=False):
'''
returns all of the faces that contain at least one of the vertices in v_indices
'''
import numpy as np
included_vertices = np.zeros(self.v.shape[0], dtype=bool)
included_vertices[np.array(v_indices, dtype=np.uint32)] = True
faces_with_verts = included_vertices[self.f].all(axis=1)
if as_boolean:
return faces_with_verts
return np.nonzero(faces_with_verts)[0] |
def interpolate2dStructuredPointSpreadIDW(grid, mask, kernel=15, power=2,
maxIter=1e5, copy=True):
'''
same as interpolate2dStructuredIDW but using the point spread method
this is faster if there are bigger connected masked areas and the border
length is smaller
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[copy] -> False: a bit faster, but modifies 'grid' and 'mask'
'''
assert grid.shape == mask.shape, 'grid and mask shape are different'
border = np.zeros(shape=mask.shape, dtype=np.bool)
if copy:
# copy mask as well because if will be modified later:
mask = mask.copy()
grid = grid.copy()
return _calc(grid, mask, border, kernel, power, maxIter) | same as interpolate2dStructuredIDW but using the point spread method
this is faster if there are bigger connected masked areas and the border
length is smaller
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[copy] -> False: a bit faster, but modifies 'grid' and 'mask' | Below is the the instruction that describes the task:
### Input:
same as interpolate2dStructuredIDW but using the point spread method
this is faster if there are bigger connected masked areas and the border
length is smaller
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[copy] -> False: a bit faster, but modifies 'grid' and 'mask'
### Response:
def interpolate2dStructuredPointSpreadIDW(grid, mask, kernel=15, power=2,
maxIter=1e5, copy=True):
'''
same as interpolate2dStructuredIDW but using the point spread method
this is faster if there are bigger connected masked areas and the border
length is smaller
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[copy] -> False: a bit faster, but modifies 'grid' and 'mask'
'''
assert grid.shape == mask.shape, 'grid and mask shape are different'
border = np.zeros(shape=mask.shape, dtype=np.bool)
if copy:
# copy mask as well because if will be modified later:
mask = mask.copy()
grid = grid.copy()
return _calc(grid, mask, border, kernel, power, maxIter) |
def set_renamed_input_fields(self, renamed_input_fields):
"""This method expects a scalar string or a list of input_fields
to """
if not (isinstance(renamed_input_fields, basestring) or
isinstance(renamed_input_fields, ListType)):
raise ValueError("renamed_input_fields must be a string or a list")
self.renamed_input_fields = renamed_input_fields
return self | This method expects a scalar string or a list of input_fields
to | Below is the the instruction that describes the task:
### Input:
This method expects a scalar string or a list of input_fields
to
### Response:
def set_renamed_input_fields(self, renamed_input_fields):
"""This method expects a scalar string or a list of input_fields
to """
if not (isinstance(renamed_input_fields, basestring) or
isinstance(renamed_input_fields, ListType)):
raise ValueError("renamed_input_fields must be a string or a list")
self.renamed_input_fields = renamed_input_fields
return self |
async def list(source):
"""Generate a single list from an asynchronous sequence."""
result = []
async with streamcontext(source) as streamer:
async for item in streamer:
result.append(item)
yield result | Generate a single list from an asynchronous sequence. | Below is the the instruction that describes the task:
### Input:
Generate a single list from an asynchronous sequence.
### Response:
async def list(source):
"""Generate a single list from an asynchronous sequence."""
result = []
async with streamcontext(source) as streamer:
async for item in streamer:
result.append(item)
yield result |
def modified_data_decorator(function):
"""
Decorator to initialise the modified_data if necessary. To be used in list functions
to modify the list
"""
@wraps(function)
def func(self, *args, **kwargs):
"""Decorator function"""
if not self.get_read_only() or not self.is_locked():
self.initialise_modified_data()
return function(self, *args, **kwargs)
return lambda: None
return func | Decorator to initialise the modified_data if necessary. To be used in list functions
to modify the list | Below is the the instruction that describes the task:
### Input:
Decorator to initialise the modified_data if necessary. To be used in list functions
to modify the list
### Response:
def modified_data_decorator(function):
"""
Decorator to initialise the modified_data if necessary. To be used in list functions
to modify the list
"""
@wraps(function)
def func(self, *args, **kwargs):
"""Decorator function"""
if not self.get_read_only() or not self.is_locked():
self.initialise_modified_data()
return function(self, *args, **kwargs)
return lambda: None
return func |
def custom_properties(self, props):
"""
Sets the custom properties file to use.
:param props: the props file
:type props: str
"""
fprops = javabridge.make_instance("java/io/File", "(Ljava/lang/String;)V", props)
javabridge.call(self.jobject, "setCustomPropsFile", "(Ljava/io/File;)V", fprops) | Sets the custom properties file to use.
:param props: the props file
:type props: str | Below is the the instruction that describes the task:
### Input:
Sets the custom properties file to use.
:param props: the props file
:type props: str
### Response:
def custom_properties(self, props):
"""
Sets the custom properties file to use.
:param props: the props file
:type props: str
"""
fprops = javabridge.make_instance("java/io/File", "(Ljava/lang/String;)V", props)
javabridge.call(self.jobject, "setCustomPropsFile", "(Ljava/io/File;)V", fprops) |
def interp_value(mass, age, feh, icol,
grid, mass_col, ages, fehs, grid_Ns):
# return_box):
"""mass, age, feh are *single values* at which values are desired
icol is the column index of desired value
grid is nfeh x nage x max(nmass) x ncols array
mass_col is the column index of mass
ages is grid of ages
fehs is grid of fehs
grid_Ns keeps track of nmass in each slice (beyond this are nans)
"""
Nage = len(ages)
Nfeh = len(fehs)
ifeh = searchsorted(fehs, Nfeh, feh)
iage = searchsorted(ages, Nage, age)
pts = np.zeros((8,3))
vals = np.zeros(8)
i_f = ifeh - 1
i_a = iage - 1
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[0, 0] = grid[i_f, i_a, imass, mass_col]
pts[0, 1] = ages[i_a]
pts[0, 2] = fehs[i_f]
vals[0] = grid[i_f, i_a, imass, icol]
pts[1, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[1, 1] = ages[i_a]
pts[1, 2] = fehs[i_f]
vals[1] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh - 1
i_a = iage
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[2, 0] = grid[i_f, i_a, imass, mass_col]
pts[2, 1] = ages[i_a]
pts[2, 2] = fehs[i_f]
vals[2] = grid[i_f, i_a, imass, icol]
pts[3, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[3, 1] = ages[i_a]
pts[3, 2] = fehs[i_f]
vals[3] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh
i_a = iage - 1
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[4, 0] = grid[i_f, i_a, imass, mass_col]
pts[4, 1] = ages[i_a]
pts[4, 2] = fehs[i_f]
vals[4] = grid[i_f, i_a, imass, icol]
pts[5, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[5, 1] = ages[i_a]
pts[5, 2] = fehs[i_f]
vals[5] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh
i_a = iage
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[6, 0] = grid[i_f, i_a, imass, mass_col]
pts[6, 1] = ages[i_a]
pts[6, 2] = fehs[i_f]
vals[6] = grid[i_f, i_a, imass, icol]
pts[7, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[7, 1] = ages[i_a]
pts[7, 2] = fehs[i_f]
vals[7] = grid[i_f, i_a, imass-1, icol]
# if return_box:
# return pts, vals
# else:
return interp_box(mass, age, feh, pts, vals) | mass, age, feh are *single values* at which values are desired
icol is the column index of desired value
grid is nfeh x nage x max(nmass) x ncols array
mass_col is the column index of mass
ages is grid of ages
fehs is grid of fehs
grid_Ns keeps track of nmass in each slice (beyond this are nans) | Below is the the instruction that describes the task:
### Input:
mass, age, feh are *single values* at which values are desired
icol is the column index of desired value
grid is nfeh x nage x max(nmass) x ncols array
mass_col is the column index of mass
ages is grid of ages
fehs is grid of fehs
grid_Ns keeps track of nmass in each slice (beyond this are nans)
### Response:
def interp_value(mass, age, feh, icol,
grid, mass_col, ages, fehs, grid_Ns):
# return_box):
"""mass, age, feh are *single values* at which values are desired
icol is the column index of desired value
grid is nfeh x nage x max(nmass) x ncols array
mass_col is the column index of mass
ages is grid of ages
fehs is grid of fehs
grid_Ns keeps track of nmass in each slice (beyond this are nans)
"""
Nage = len(ages)
Nfeh = len(fehs)
ifeh = searchsorted(fehs, Nfeh, feh)
iage = searchsorted(ages, Nage, age)
pts = np.zeros((8,3))
vals = np.zeros(8)
i_f = ifeh - 1
i_a = iage - 1
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[0, 0] = grid[i_f, i_a, imass, mass_col]
pts[0, 1] = ages[i_a]
pts[0, 2] = fehs[i_f]
vals[0] = grid[i_f, i_a, imass, icol]
pts[1, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[1, 1] = ages[i_a]
pts[1, 2] = fehs[i_f]
vals[1] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh - 1
i_a = iage
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[2, 0] = grid[i_f, i_a, imass, mass_col]
pts[2, 1] = ages[i_a]
pts[2, 2] = fehs[i_f]
vals[2] = grid[i_f, i_a, imass, icol]
pts[3, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[3, 1] = ages[i_a]
pts[3, 2] = fehs[i_f]
vals[3] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh
i_a = iage - 1
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[4, 0] = grid[i_f, i_a, imass, mass_col]
pts[4, 1] = ages[i_a]
pts[4, 2] = fehs[i_f]
vals[4] = grid[i_f, i_a, imass, icol]
pts[5, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[5, 1] = ages[i_a]
pts[5, 2] = fehs[i_f]
vals[5] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh
i_a = iage
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[6, 0] = grid[i_f, i_a, imass, mass_col]
pts[6, 1] = ages[i_a]
pts[6, 2] = fehs[i_f]
vals[6] = grid[i_f, i_a, imass, icol]
pts[7, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[7, 1] = ages[i_a]
pts[7, 2] = fehs[i_f]
vals[7] = grid[i_f, i_a, imass-1, icol]
# if return_box:
# return pts, vals
# else:
return interp_box(mass, age, feh, pts, vals) |
def rendered(self):
"""The rendered wire format for all conditions that have been rendered. Rendered conditions are never
cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation."""
expressions = {k: v for (k, v) in self.expressions.items() if v is not None}
if self.refs.attr_names:
expressions["ExpressionAttributeNames"] = self.refs.attr_names
if self.refs.attr_values:
expressions["ExpressionAttributeValues"] = self.refs.attr_values
return expressions | The rendered wire format for all conditions that have been rendered. Rendered conditions are never
cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation. | Below is the the instruction that describes the task:
### Input:
The rendered wire format for all conditions that have been rendered. Rendered conditions are never
cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation.
### Response:
def rendered(self):
"""The rendered wire format for all conditions that have been rendered. Rendered conditions are never
cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation."""
expressions = {k: v for (k, v) in self.expressions.items() if v is not None}
if self.refs.attr_names:
expressions["ExpressionAttributeNames"] = self.refs.attr_names
if self.refs.attr_values:
expressions["ExpressionAttributeValues"] = self.refs.attr_values
return expressions |
def wvcal_spectrum(sp, fxpeaks, poly_degree_wfit, wv_master,
wv_ini_search=None, wv_end_search=None,
wvmin_useful=None, wvmax_useful=None,
geometry=None, debugplot=0):
"""Execute wavelength calibration of a spectrum using fixed line peaks.
Parameters
----------
sp : 1d numpy array
Spectrum to be wavelength calibrated.
fxpeaks : 1d numpy array
Refined location of peaks in array index scale, i.e, from 0
to naxis1 - 1. The wavelength calibration is performed using
these line locations.
poly_degree_wfit : int
Degree for wavelength calibration polynomial.
wv_master : 1d numpy array
Array with arc line wavelengths.
wv_ini_search : float or None
Minimum expected wavelength in spectrum.
wv_end_search : float or None
Maximum expected wavelength in spectrum.
wvmin_useful : float or None
If not None, this value is used to clip detected lines below it.
wvmax_useful : float or None
If not None, this value is used to clip detected lines above it.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the Qt backend geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
solution_wv : instance of SolutionArcCalibration
Wavelength calibration solution.
"""
# check there are enough lines for fit
if len(fxpeaks) <= poly_degree_wfit:
print(">>> Warning: not enough lines to fit spectrum")
return None
# spectrum dimension
naxis1 = sp.shape[0]
wv_master_range = wv_master[-1] - wv_master[0]
delta_wv_master_range = 0.20 * wv_master_range
if wv_ini_search is None:
wv_ini_search = wv_master[0] - delta_wv_master_range
if wv_end_search is None:
wv_end_search = wv_master[-1] + delta_wv_master_range
# use channels (pixels from 1 to naxis1)
xchannel = fxpeaks + 1.0
# wavelength calibration
list_of_wvfeatures = arccalibration(
wv_master=wv_master,
xpos_arc=xchannel,
naxis1_arc=naxis1,
crpix1=1.0,
wv_ini_search=wv_ini_search,
wv_end_search=wv_end_search,
wvmin_useful=wvmin_useful,
wvmax_useful=wvmax_useful,
error_xpos_arc=3,
times_sigma_r=3.0,
frac_triplets_for_sum=0.50,
times_sigma_theil_sen=10.0,
poly_degree_wfit=poly_degree_wfit,
times_sigma_polfilt=10.0,
times_sigma_cook=10.0,
times_sigma_inclusion=10.0,
geometry=geometry,
debugplot=debugplot
)
title = "Wavelength calibration"
solution_wv = fit_list_of_wvfeatures(
list_of_wvfeatures=list_of_wvfeatures,
naxis1_arc=naxis1,
crpix1=1.0,
poly_degree_wfit=poly_degree_wfit,
weighted=False,
plot_title=title,
geometry=geometry,
debugplot=debugplot
)
if abs(debugplot) % 10 != 0:
# final plot with identified lines
xplot = np.arange(1, naxis1 + 1, dtype=float)
ax = ximplotxy(xplot, sp, title=title, show=False,
xlabel='pixel (from 1 to NAXIS1)',
ylabel='number of counts',
geometry=geometry)
ymin = sp.min()
ymax = sp.max()
dy = ymax-ymin
ymin -= dy/20.
ymax += dy/20.
ax.set_ylim([ymin, ymax])
# plot wavelength of each identified line
for feature in solution_wv.features:
xpos = feature.xpos
reference = feature.reference
ax.text(xpos, sp[int(xpos+0.5)-1],
str(reference), fontsize=8,
horizontalalignment='center')
# show plot
print('Plot with identified lines')
pause_debugplot(12, pltshow=True)
# return the wavelength calibration solution
return solution_wv | Execute wavelength calibration of a spectrum using fixed line peaks.
Parameters
----------
sp : 1d numpy array
Spectrum to be wavelength calibrated.
fxpeaks : 1d numpy array
Refined location of peaks in array index scale, i.e, from 0
to naxis1 - 1. The wavelength calibration is performed using
these line locations.
poly_degree_wfit : int
Degree for wavelength calibration polynomial.
wv_master : 1d numpy array
Array with arc line wavelengths.
wv_ini_search : float or None
Minimum expected wavelength in spectrum.
wv_end_search : float or None
Maximum expected wavelength in spectrum.
wvmin_useful : float or None
If not None, this value is used to clip detected lines below it.
wvmax_useful : float or None
If not None, this value is used to clip detected lines above it.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the Qt backend geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
solution_wv : instance of SolutionArcCalibration
Wavelength calibration solution. | Below is the the instruction that describes the task:
### Input:
Execute wavelength calibration of a spectrum using fixed line peaks.
Parameters
----------
sp : 1d numpy array
Spectrum to be wavelength calibrated.
fxpeaks : 1d numpy array
Refined location of peaks in array index scale, i.e, from 0
to naxis1 - 1. The wavelength calibration is performed using
these line locations.
poly_degree_wfit : int
Degree for wavelength calibration polynomial.
wv_master : 1d numpy array
Array with arc line wavelengths.
wv_ini_search : float or None
Minimum expected wavelength in spectrum.
wv_end_search : float or None
Maximum expected wavelength in spectrum.
wvmin_useful : float or None
If not None, this value is used to clip detected lines below it.
wvmax_useful : float or None
If not None, this value is used to clip detected lines above it.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the Qt backend geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
solution_wv : instance of SolutionArcCalibration
Wavelength calibration solution.
### Response:
def wvcal_spectrum(sp, fxpeaks, poly_degree_wfit, wv_master,
wv_ini_search=None, wv_end_search=None,
wvmin_useful=None, wvmax_useful=None,
geometry=None, debugplot=0):
"""Execute wavelength calibration of a spectrum using fixed line peaks.
Parameters
----------
sp : 1d numpy array
Spectrum to be wavelength calibrated.
fxpeaks : 1d numpy array
Refined location of peaks in array index scale, i.e, from 0
to naxis1 - 1. The wavelength calibration is performed using
these line locations.
poly_degree_wfit : int
Degree for wavelength calibration polynomial.
wv_master : 1d numpy array
Array with arc line wavelengths.
wv_ini_search : float or None
Minimum expected wavelength in spectrum.
wv_end_search : float or None
Maximum expected wavelength in spectrum.
wvmin_useful : float or None
If not None, this value is used to clip detected lines below it.
wvmax_useful : float or None
If not None, this value is used to clip detected lines above it.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the Qt backend geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
solution_wv : instance of SolutionArcCalibration
Wavelength calibration solution.
"""
# check there are enough lines for fit
if len(fxpeaks) <= poly_degree_wfit:
print(">>> Warning: not enough lines to fit spectrum")
return None
# spectrum dimension
naxis1 = sp.shape[0]
wv_master_range = wv_master[-1] - wv_master[0]
delta_wv_master_range = 0.20 * wv_master_range
if wv_ini_search is None:
wv_ini_search = wv_master[0] - delta_wv_master_range
if wv_end_search is None:
wv_end_search = wv_master[-1] + delta_wv_master_range
# use channels (pixels from 1 to naxis1)
xchannel = fxpeaks + 1.0
# wavelength calibration
list_of_wvfeatures = arccalibration(
wv_master=wv_master,
xpos_arc=xchannel,
naxis1_arc=naxis1,
crpix1=1.0,
wv_ini_search=wv_ini_search,
wv_end_search=wv_end_search,
wvmin_useful=wvmin_useful,
wvmax_useful=wvmax_useful,
error_xpos_arc=3,
times_sigma_r=3.0,
frac_triplets_for_sum=0.50,
times_sigma_theil_sen=10.0,
poly_degree_wfit=poly_degree_wfit,
times_sigma_polfilt=10.0,
times_sigma_cook=10.0,
times_sigma_inclusion=10.0,
geometry=geometry,
debugplot=debugplot
)
title = "Wavelength calibration"
solution_wv = fit_list_of_wvfeatures(
list_of_wvfeatures=list_of_wvfeatures,
naxis1_arc=naxis1,
crpix1=1.0,
poly_degree_wfit=poly_degree_wfit,
weighted=False,
plot_title=title,
geometry=geometry,
debugplot=debugplot
)
if abs(debugplot) % 10 != 0:
# final plot with identified lines
xplot = np.arange(1, naxis1 + 1, dtype=float)
ax = ximplotxy(xplot, sp, title=title, show=False,
xlabel='pixel (from 1 to NAXIS1)',
ylabel='number of counts',
geometry=geometry)
ymin = sp.min()
ymax = sp.max()
dy = ymax-ymin
ymin -= dy/20.
ymax += dy/20.
ax.set_ylim([ymin, ymax])
# plot wavelength of each identified line
for feature in solution_wv.features:
xpos = feature.xpos
reference = feature.reference
ax.text(xpos, sp[int(xpos+0.5)-1],
str(reference), fontsize=8,
horizontalalignment='center')
# show plot
print('Plot with identified lines')
pause_debugplot(12, pltshow=True)
# return the wavelength calibration solution
return solution_wv |
def iter_size_changes(self, issue):
"""Yield an IssueSnapshot for each time the issue size changed
"""
# Find the first size change, if any
try:
size_changes = list(filter(lambda h: h.field == 'Story Points',
itertools.chain.from_iterable([c.items for c in issue.changelog.histories])))
except AttributeError:
return
# If we have no size changes and the issue has a current size then a size must have ben specified at issue creation time.
# Return the size at creation time
try:
current_size = issue.fields.__dict__[self.fields['StoryPoints']]
except:
current_size = None
size = (size_changes[0].fromString) if len(size_changes) else current_size
# Issue was created
yield IssueSizeSnapshot(
change=None,
key=issue.key,
date=dateutil.parser.parse(issue.fields.created),
size=size
)
for change in issue.changelog.histories:
change_date = dateutil.parser.parse(change.created)
#sizes = list(filter(lambda i: i.field == 'Story Points', change.items))
#is_resolved = (sizes[-1].to is not None) if len(sizes) > 0 else is_resolved
for item in change.items:
if item.field == 'Story Points':
# StoryPoints value was changed
size = item.toString
yield IssueSizeSnapshot(
change=item.field,
key=issue.key,
date=change_date,
size=size
) | Yield an IssueSnapshot for each time the issue size changed | Below is the the instruction that describes the task:
### Input:
Yield an IssueSnapshot for each time the issue size changed
### Response:
def iter_size_changes(self, issue):
"""Yield an IssueSnapshot for each time the issue size changed
"""
# Find the first size change, if any
try:
size_changes = list(filter(lambda h: h.field == 'Story Points',
itertools.chain.from_iterable([c.items for c in issue.changelog.histories])))
except AttributeError:
return
# If we have no size changes and the issue has a current size then a size must have ben specified at issue creation time.
# Return the size at creation time
try:
current_size = issue.fields.__dict__[self.fields['StoryPoints']]
except:
current_size = None
size = (size_changes[0].fromString) if len(size_changes) else current_size
# Issue was created
yield IssueSizeSnapshot(
change=None,
key=issue.key,
date=dateutil.parser.parse(issue.fields.created),
size=size
)
for change in issue.changelog.histories:
change_date = dateutil.parser.parse(change.created)
#sizes = list(filter(lambda i: i.field == 'Story Points', change.items))
#is_resolved = (sizes[-1].to is not None) if len(sizes) > 0 else is_resolved
for item in change.items:
if item.field == 'Story Points':
# StoryPoints value was changed
size = item.toString
yield IssueSizeSnapshot(
change=item.field,
key=issue.key,
date=change_date,
size=size
) |
def data_to_elem_base(self):
"""Custom system base unconversion function"""
if not self.n or self._flags['sysbase'] is False:
return
self.R = mul(self.R, self.Sn) / self.system.mva
super(GovernorBase, self).data_to_elem_base() | Custom system base unconversion function | Below is the the instruction that describes the task:
### Input:
Custom system base unconversion function
### Response:
def data_to_elem_base(self):
"""Custom system base unconversion function"""
if not self.n or self._flags['sysbase'] is False:
return
self.R = mul(self.R, self.Sn) / self.system.mva
super(GovernorBase, self).data_to_elem_base() |
def provider_image(self):
"""Image path getter.
This method uses a pluggable image provider to retrieve an
image's path.
"""
if self._image is None:
if isinstance(self.configuration['disk']['image'], dict):
ProviderClass = lookup_provider_class(
self.configuration['disk']['image']['provider'])
self._image = ProviderClass(
self.configuration['disk']['image']).image
else:
# If image is not a dictionary, return it as is for backwards
# compatibility
self._image = self.configuration['disk']['image']
return self._image | Image path getter.
This method uses a pluggable image provider to retrieve an
image's path. | Below is the the instruction that describes the task:
### Input:
Image path getter.
This method uses a pluggable image provider to retrieve an
image's path.
### Response:
def provider_image(self):
"""Image path getter.
This method uses a pluggable image provider to retrieve an
image's path.
"""
if self._image is None:
if isinstance(self.configuration['disk']['image'], dict):
ProviderClass = lookup_provider_class(
self.configuration['disk']['image']['provider'])
self._image = ProviderClass(
self.configuration['disk']['image']).image
else:
# If image is not a dictionary, return it as is for backwards
# compatibility
self._image = self.configuration['disk']['image']
return self._image |
def histograms(dat, keys=None, bins=25, logy=False, cmap=None, ncol=4):
"""
Plot histograms of all items in dat.
Parameters
----------
dat : dict
Data in {key: array} pairs.
keys : arra-like
The keys in dat that you want to plot. If None,
all are plotted.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
cmap : dict
The colours that the different items should be. If None,
all are grey.
Returns
-------
fig, axes
"""
if keys is None:
keys = dat.keys()
ncol = int(ncol)
nrow = calc_nrow(len(keys), ncol)
fig, axs = plt.subplots(nrow, 4, figsize=[ncol * 2, nrow * 2])
pn = 0
for k, ax in zip(keys, axs.flat):
tmp = nominal_values(dat[k])
x = tmp[~np.isnan(tmp)]
if cmap is not None:
c = cmap[k]
else:
c = (0, 0, 0, 0.5)
ax.hist(x, bins=bins, color=c)
if logy:
ax.set_yscale('log')
ylab = '$log_{10}(n)$'
else:
ylab = 'n'
ax.set_ylim(1, ax.get_ylim()[1])
if ax.is_first_col():
ax.set_ylabel(ylab)
ax.set_yticklabels([])
ax.text(.95, .95, k, ha='right', va='top', transform=ax.transAxes)
pn += 1
for ax in axs.flat[pn:]:
ax.set_visible(False)
fig.tight_layout()
return fig, axs | Plot histograms of all items in dat.
Parameters
----------
dat : dict
Data in {key: array} pairs.
keys : arra-like
The keys in dat that you want to plot. If None,
all are plotted.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
cmap : dict
The colours that the different items should be. If None,
all are grey.
Returns
-------
fig, axes | Below is the the instruction that describes the task:
### Input:
Plot histograms of all items in dat.
Parameters
----------
dat : dict
Data in {key: array} pairs.
keys : arra-like
The keys in dat that you want to plot. If None,
all are plotted.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
cmap : dict
The colours that the different items should be. If None,
all are grey.
Returns
-------
fig, axes
### Response:
def histograms(dat, keys=None, bins=25, logy=False, cmap=None, ncol=4):
"""
Plot histograms of all items in dat.
Parameters
----------
dat : dict
Data in {key: array} pairs.
keys : arra-like
The keys in dat that you want to plot. If None,
all are plotted.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
cmap : dict
The colours that the different items should be. If None,
all are grey.
Returns
-------
fig, axes
"""
if keys is None:
keys = dat.keys()
ncol = int(ncol)
nrow = calc_nrow(len(keys), ncol)
fig, axs = plt.subplots(nrow, 4, figsize=[ncol * 2, nrow * 2])
pn = 0
for k, ax in zip(keys, axs.flat):
tmp = nominal_values(dat[k])
x = tmp[~np.isnan(tmp)]
if cmap is not None:
c = cmap[k]
else:
c = (0, 0, 0, 0.5)
ax.hist(x, bins=bins, color=c)
if logy:
ax.set_yscale('log')
ylab = '$log_{10}(n)$'
else:
ylab = 'n'
ax.set_ylim(1, ax.get_ylim()[1])
if ax.is_first_col():
ax.set_ylabel(ylab)
ax.set_yticklabels([])
ax.text(.95, .95, k, ha='right', va='top', transform=ax.transAxes)
pn += 1
for ax in axs.flat[pn:]:
ax.set_visible(False)
fig.tight_layout()
return fig, axs |
def html_factory(tag, **defaults):
'''Returns an :class:`Html` factory function for ``tag`` and a given
dictionary of ``defaults`` parameters. For example::
>>> input_factory = html_factory('input', type='text')
>>> html = input_factory(value='bla')
'''
def html_input(*children, **params):
p = defaults.copy()
p.update(params)
return Html(tag, *children, **p)
return html_input | Returns an :class:`Html` factory function for ``tag`` and a given
dictionary of ``defaults`` parameters. For example::
>>> input_factory = html_factory('input', type='text')
>>> html = input_factory(value='bla') | Below is the the instruction that describes the task:
### Input:
Returns an :class:`Html` factory function for ``tag`` and a given
dictionary of ``defaults`` parameters. For example::
>>> input_factory = html_factory('input', type='text')
>>> html = input_factory(value='bla')
### Response:
def html_factory(tag, **defaults):
'''Returns an :class:`Html` factory function for ``tag`` and a given
dictionary of ``defaults`` parameters. For example::
>>> input_factory = html_factory('input', type='text')
>>> html = input_factory(value='bla')
'''
def html_input(*children, **params):
p = defaults.copy()
p.update(params)
return Html(tag, *children, **p)
return html_input |
def _process_cmap(cmap):
'''
Returns a kwarg dict suitable for a ColorScale
'''
option = {}
if isinstance(cmap, str):
option['scheme'] = cmap
elif isinstance(cmap, list):
option['colors'] = cmap
else:
raise ValueError('''`cmap` must be a string (name of a color scheme)
or a list of colors, but a value of {} was given
'''.format(cmap))
return option | Returns a kwarg dict suitable for a ColorScale | Below is the the instruction that describes the task:
### Input:
Returns a kwarg dict suitable for a ColorScale
### Response:
def _process_cmap(cmap):
'''
Returns a kwarg dict suitable for a ColorScale
'''
option = {}
if isinstance(cmap, str):
option['scheme'] = cmap
elif isinstance(cmap, list):
option['colors'] = cmap
else:
raise ValueError('''`cmap` must be a string (name of a color scheme)
or a list of colors, but a value of {} was given
'''.format(cmap))
return option |
def write(self, data):
"""
Writes data to the TLS-wrapped socket
:param data:
A byte string to write to the socket
:raises:
socket.socket - when a non-TLS socket error occurs
oscrypto.errors.TLSError - when a TLS-related error occurs
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
data_len = len(data)
while data_len:
if self._ssl is None:
self._raise_closed()
result = libssl.SSL_write(self._ssl, data, data_len)
self._raw_write()
if result <= 0:
error = libssl.SSL_get_error(self._ssl, result)
if error == LibsslConst.SSL_ERROR_WANT_READ:
if self._raw_read() != b'':
continue
raise_disconnection()
elif error == LibsslConst.SSL_ERROR_WANT_WRITE:
self._raw_write()
continue
elif error == LibsslConst.SSL_ERROR_ZERO_RETURN:
self._gracefully_closed = True
self._shutdown(False)
self._raise_closed()
else:
handle_openssl_error(0, TLSError)
data = data[result:]
data_len = len(data) | Writes data to the TLS-wrapped socket
:param data:
A byte string to write to the socket
:raises:
socket.socket - when a non-TLS socket error occurs
oscrypto.errors.TLSError - when a TLS-related error occurs
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library | Below is the the instruction that describes the task:
### Input:
Writes data to the TLS-wrapped socket
:param data:
A byte string to write to the socket
:raises:
socket.socket - when a non-TLS socket error occurs
oscrypto.errors.TLSError - when a TLS-related error occurs
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
### Response:
def write(self, data):
"""
Writes data to the TLS-wrapped socket
:param data:
A byte string to write to the socket
:raises:
socket.socket - when a non-TLS socket error occurs
oscrypto.errors.TLSError - when a TLS-related error occurs
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
data_len = len(data)
while data_len:
if self._ssl is None:
self._raise_closed()
result = libssl.SSL_write(self._ssl, data, data_len)
self._raw_write()
if result <= 0:
error = libssl.SSL_get_error(self._ssl, result)
if error == LibsslConst.SSL_ERROR_WANT_READ:
if self._raw_read() != b'':
continue
raise_disconnection()
elif error == LibsslConst.SSL_ERROR_WANT_WRITE:
self._raw_write()
continue
elif error == LibsslConst.SSL_ERROR_ZERO_RETURN:
self._gracefully_closed = True
self._shutdown(False)
self._raise_closed()
else:
handle_openssl_error(0, TLSError)
data = data[result:]
data_len = len(data) |
def _adjustFileAlignment(self, value, fileAlignment):
"""
Align a value to C{FileAligment}.
@type value: int
@param value: The value to align.
@type fileAlignment: int
@param fileAlignment: The value to be used to align the C{value} parameter.
@rtype: int
@return: The aligned value.
"""
if fileAlignment > consts.DEFAULT_FILE_ALIGNMENT:
if not utils.powerOfTwo(fileAlignment):
print "Warning: FileAlignment is greater than DEFAULT_FILE_ALIGNMENT (0x200) and is not power of two."
if fileAlignment < consts.DEFAULT_FILE_ALIGNMENT:
return value
if fileAlignment and value % fileAlignment:
return ((value / fileAlignment) + 1) * fileAlignment
return value | Align a value to C{FileAligment}.
@type value: int
@param value: The value to align.
@type fileAlignment: int
@param fileAlignment: The value to be used to align the C{value} parameter.
@rtype: int
@return: The aligned value. | Below is the the instruction that describes the task:
### Input:
Align a value to C{FileAligment}.
@type value: int
@param value: The value to align.
@type fileAlignment: int
@param fileAlignment: The value to be used to align the C{value} parameter.
@rtype: int
@return: The aligned value.
### Response:
def _adjustFileAlignment(self, value, fileAlignment):
"""
Align a value to C{FileAligment}.
@type value: int
@param value: The value to align.
@type fileAlignment: int
@param fileAlignment: The value to be used to align the C{value} parameter.
@rtype: int
@return: The aligned value.
"""
if fileAlignment > consts.DEFAULT_FILE_ALIGNMENT:
if not utils.powerOfTwo(fileAlignment):
print "Warning: FileAlignment is greater than DEFAULT_FILE_ALIGNMENT (0x200) and is not power of two."
if fileAlignment < consts.DEFAULT_FILE_ALIGNMENT:
return value
if fileAlignment and value % fileAlignment:
return ((value / fileAlignment) + 1) * fileAlignment
return value |
def _sign_threshold_signature_fulfillment(cls, input_, message, key_pairs):
"""Signs a ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
"""
input_ = deepcopy(input_)
message = sha3_256(message.encode())
if input_.fulfills:
message.update('{}{}'.format(
input_.fulfills.txid, input_.fulfills.output).encode())
for owner_before in set(input_.owners_before):
# TODO: CC should throw a KeypairMismatchException, instead of
# our manual mapping here
# TODO FOR CC: Naming wise this is not so smart,
# `get_subcondition` in fact doesn't return a
# condition but a fulfillment
# TODO FOR CC: `get_subcondition` is singular. One would not
# expect to get a list back.
ccffill = input_.fulfillment
subffills = ccffill.get_subcondition_from_vk(
base58.b58decode(owner_before))
if not subffills:
raise KeypairMismatchException('Public key {} cannot be found '
'in the fulfillment'
.format(owner_before))
try:
private_key = key_pairs[owner_before]
except KeyError:
raise KeypairMismatchException('Public key {} is not a pair '
'to any of the private keys'
.format(owner_before))
# cryptoconditions makes no assumptions of the encoding of the
# message to sign or verify. It only accepts bytestrings
for subffill in subffills:
subffill.sign(
message.digest(), base58.b58decode(private_key.encode()))
return input_ | Signs a ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with. | Below is the the instruction that describes the task:
### Input:
Signs a ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
### Response:
def _sign_threshold_signature_fulfillment(cls, input_, message, key_pairs):
"""Signs a ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
"""
input_ = deepcopy(input_)
message = sha3_256(message.encode())
if input_.fulfills:
message.update('{}{}'.format(
input_.fulfills.txid, input_.fulfills.output).encode())
for owner_before in set(input_.owners_before):
# TODO: CC should throw a KeypairMismatchException, instead of
# our manual mapping here
# TODO FOR CC: Naming wise this is not so smart,
# `get_subcondition` in fact doesn't return a
# condition but a fulfillment
# TODO FOR CC: `get_subcondition` is singular. One would not
# expect to get a list back.
ccffill = input_.fulfillment
subffills = ccffill.get_subcondition_from_vk(
base58.b58decode(owner_before))
if not subffills:
raise KeypairMismatchException('Public key {} cannot be found '
'in the fulfillment'
.format(owner_before))
try:
private_key = key_pairs[owner_before]
except KeyError:
raise KeypairMismatchException('Public key {} is not a pair '
'to any of the private keys'
.format(owner_before))
# cryptoconditions makes no assumptions of the encoding of the
# message to sign or verify. It only accepts bytestrings
for subffill in subffills:
subffill.sign(
message.digest(), base58.b58decode(private_key.encode()))
return input_ |
def find_omega_min(omega, Nl, detuningsij, i_d, I_nd):
r"""This function returns a list of length Nl containing the mininmal frequency
that each laser excites.
"""
omega_min = []
omega_min_indices = []
for l in range(Nl):
omegas = sorted([(omega[i_d(p[0]+1)-1][i_d(p[1]+1)-1], p)
for p in detuningsij[l]])
omega_min += [omegas[0][0]]
omega_min_indices += [omegas[0][1]]
return omega_min, omega_min_indices | r"""This function returns a list of length Nl containing the mininmal frequency
that each laser excites. | Below is the the instruction that describes the task:
### Input:
r"""This function returns a list of length Nl containing the mininmal frequency
that each laser excites.
### Response:
def find_omega_min(omega, Nl, detuningsij, i_d, I_nd):
r"""This function returns a list of length Nl containing the mininmal frequency
that each laser excites.
"""
omega_min = []
omega_min_indices = []
for l in range(Nl):
omegas = sorted([(omega[i_d(p[0]+1)-1][i_d(p[1]+1)-1], p)
for p in detuningsij[l]])
omega_min += [omegas[0][0]]
omega_min_indices += [omegas[0][1]]
return omega_min, omega_min_indices |
def post(self, url, body=None):
"""Sends this `Resource` instance to the service with a
``POST`` request to the given URL. Takes an optional body"""
response = self.http_request(url, 'POST', body or self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status not in (200, 201, 204):
self.raise_http_error(response)
self._url = response.getheader('Location')
if response.status in (200, 201):
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml)) | Sends this `Resource` instance to the service with a
``POST`` request to the given URL. Takes an optional body | Below is the the instruction that describes the task:
### Input:
Sends this `Resource` instance to the service with a
``POST`` request to the given URL. Takes an optional body
### Response:
def post(self, url, body=None):
"""Sends this `Resource` instance to the service with a
``POST`` request to the given URL. Takes an optional body"""
response = self.http_request(url, 'POST', body or self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status not in (200, 201, 204):
self.raise_http_error(response)
self._url = response.getheader('Location')
if response.status in (200, 201):
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml)) |
def references(self):
"""Return EIDs of references of an article.
Note: Requires the FULL view of the article.
"""
refs = self.items.find('bibrecord/tail/bibliography', ns)
if refs is not None:
eids = [r.find("ref-info/refd-itemidlist/itemid", ns).text for r
in refs.findall("reference", ns)]
return ["2-s2.0-" + eid for eid in eids]
else:
return None | Return EIDs of references of an article.
Note: Requires the FULL view of the article. | Below is the the instruction that describes the task:
### Input:
Return EIDs of references of an article.
Note: Requires the FULL view of the article.
### Response:
def references(self):
"""Return EIDs of references of an article.
Note: Requires the FULL view of the article.
"""
refs = self.items.find('bibrecord/tail/bibliography', ns)
if refs is not None:
eids = [r.find("ref-info/refd-itemidlist/itemid", ns).text for r
in refs.findall("reference", ns)]
return ["2-s2.0-" + eid for eid in eids]
else:
return None |
def _run(self, peer):
"""Sends open message to peer and handles received messages.
Parameters:
- `peer`: the peer to which this protocol instance is connected to.
"""
# We know the peer we are connected to, we send open message.
self._peer = peer
self.connection_made()
# We wait for peer to send messages.
self._recv_loop() | Sends open message to peer and handles received messages.
Parameters:
- `peer`: the peer to which this protocol instance is connected to. | Below is the the instruction that describes the task:
### Input:
Sends open message to peer and handles received messages.
Parameters:
- `peer`: the peer to which this protocol instance is connected to.
### Response:
def _run(self, peer):
"""Sends open message to peer and handles received messages.
Parameters:
- `peer`: the peer to which this protocol instance is connected to.
"""
# We know the peer we are connected to, we send open message.
self._peer = peer
self.connection_made()
# We wait for peer to send messages.
self._recv_loop() |
def get(self, pk=None, **kwargs):
"""Return one and exactly one notification template.
Note here configuration-related fields like
'notification_configuration' and 'channels' will not be
used even provided.
Lookups may be through a primary key, specified as a positional
argument, and/or through filters specified through keyword arguments.
If the number of results does not equal one, raise an exception.
=====API DOCS=====
Retrieve one and exactly one object.
:param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object
if ``pk`` is provided (not ``None``).
:type pk: int
:param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.
:returns: loaded JSON of the retrieved resource object.
:rtype: dict
=====API DOCS=====
"""
self._separate(kwargs)
return super(Resource, self).get(pk=pk, **kwargs) | Return one and exactly one notification template.
Note here configuration-related fields like
'notification_configuration' and 'channels' will not be
used even provided.
Lookups may be through a primary key, specified as a positional
argument, and/or through filters specified through keyword arguments.
If the number of results does not equal one, raise an exception.
=====API DOCS=====
Retrieve one and exactly one object.
:param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object
if ``pk`` is provided (not ``None``).
:type pk: int
:param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.
:returns: loaded JSON of the retrieved resource object.
:rtype: dict
=====API DOCS===== | Below is the the instruction that describes the task:
### Input:
Return one and exactly one notification template.
Note here configuration-related fields like
'notification_configuration' and 'channels' will not be
used even provided.
Lookups may be through a primary key, specified as a positional
argument, and/or through filters specified through keyword arguments.
If the number of results does not equal one, raise an exception.
=====API DOCS=====
Retrieve one and exactly one object.
:param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object
if ``pk`` is provided (not ``None``).
:type pk: int
:param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.
:returns: loaded JSON of the retrieved resource object.
:rtype: dict
=====API DOCS=====
### Response:
def get(self, pk=None, **kwargs):
"""Return one and exactly one notification template.
Note here configuration-related fields like
'notification_configuration' and 'channels' will not be
used even provided.
Lookups may be through a primary key, specified as a positional
argument, and/or through filters specified through keyword arguments.
If the number of results does not equal one, raise an exception.
=====API DOCS=====
Retrieve one and exactly one object.
:param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object
if ``pk`` is provided (not ``None``).
:type pk: int
:param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.
:returns: loaded JSON of the retrieved resource object.
:rtype: dict
=====API DOCS=====
"""
self._separate(kwargs)
return super(Resource, self).get(pk=pk, **kwargs) |
def _joinOnAsPriv(self, model, onIndex, whatAs):
"""
Private method for handling joins.
"""
if self._join:
raise Exception("Already joined with a table!")
self._join = model
self._joinedField = whatAs
table = model.table
self._query = self._query.eq_join(onIndex, r.table(table))
return self | Private method for handling joins. | Below is the the instruction that describes the task:
### Input:
Private method for handling joins.
### Response:
def _joinOnAsPriv(self, model, onIndex, whatAs):
"""
Private method for handling joins.
"""
if self._join:
raise Exception("Already joined with a table!")
self._join = model
self._joinedField = whatAs
table = model.table
self._query = self._query.eq_join(onIndex, r.table(table))
return self |
def get_system_root_directory():
"""
Get system root directory (application installed root directory)
Returns
-------
string
A full path
"""
root = os.path.dirname(__file__)
root = os.path.dirname(root)
root = os.path.abspath(root)
return root | Get system root directory (application installed root directory)
Returns
-------
string
A full path | Below is the the instruction that describes the task:
### Input:
Get system root directory (application installed root directory)
Returns
-------
string
A full path
### Response:
def get_system_root_directory():
"""
Get system root directory (application installed root directory)
Returns
-------
string
A full path
"""
root = os.path.dirname(__file__)
root = os.path.dirname(root)
root = os.path.abspath(root)
return root |
def reset_all():
"""
Clear relevant globals to start fresh
:return:
"""
global _username
global _password
global _active_config
global _active_tests
global _machine_names
global _deployers
reset_deployers()
reset_collector()
_username = None
_password = None
_active_config = None
_active_tests = {}
_machine_names = defaultdict() | Clear relevant globals to start fresh
:return: | Below is the the instruction that describes the task:
### Input:
Clear relevant globals to start fresh
:return:
### Response:
def reset_all():
"""
Clear relevant globals to start fresh
:return:
"""
global _username
global _password
global _active_config
global _active_tests
global _machine_names
global _deployers
reset_deployers()
reset_collector()
_username = None
_password = None
_active_config = None
_active_tests = {}
_machine_names = defaultdict() |
def map_qubits(self: TSelf_PauliStringGateOperation,
qubit_map: Dict[raw_types.Qid, raw_types.Qid]
) -> TSelf_PauliStringGateOperation:
"""Return an equivalent operation on new qubits with its Pauli string
mapped to new qubits.
new_pauli_string = self.pauli_string.map_qubits(qubit_map)
""" | Return an equivalent operation on new qubits with its Pauli string
mapped to new qubits.
new_pauli_string = self.pauli_string.map_qubits(qubit_map) | Below is the the instruction that describes the task:
### Input:
Return an equivalent operation on new qubits with its Pauli string
mapped to new qubits.
new_pauli_string = self.pauli_string.map_qubits(qubit_map)
### Response:
def map_qubits(self: TSelf_PauliStringGateOperation,
qubit_map: Dict[raw_types.Qid, raw_types.Qid]
) -> TSelf_PauliStringGateOperation:
"""Return an equivalent operation on new qubits with its Pauli string
mapped to new qubits.
new_pauli_string = self.pauli_string.map_qubits(qubit_map)
""" |
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params | Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension | Below is the the instruction that describes the task:
### Input:
Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
### Response:
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params |
def _prepare_persistence_engine(self):
"""Load the specified persistence engine, or the default if none is
set.
"""
if self._persistence_engine:
return
persistence_engine = self._options.get('persistence_engine')
if persistence_engine:
self._persistence_engine = path_to_reference(persistence_engine)
return
from furious.config import get_default_persistence_engine
self._persistence_engine = get_default_persistence_engine() | Load the specified persistence engine, or the default if none is
set. | Below is the the instruction that describes the task:
### Input:
Load the specified persistence engine, or the default if none is
set.
### Response:
def _prepare_persistence_engine(self):
"""Load the specified persistence engine, or the default if none is
set.
"""
if self._persistence_engine:
return
persistence_engine = self._options.get('persistence_engine')
if persistence_engine:
self._persistence_engine = path_to_reference(persistence_engine)
return
from furious.config import get_default_persistence_engine
self._persistence_engine = get_default_persistence_engine() |
def dist(self, x1, x2):
"""Return the distance between ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `LinearSpaceElement`
Elements whose distance to compute.
Returns
-------
dist : float
Distance between ``x1`` and ``x2``.
"""
if x1 not in self:
raise LinearSpaceTypeError('`x1` {!r} is not an element of '
'{!r}'.format(x1, self))
if x2 not in self:
raise LinearSpaceTypeError('`x2` {!r} is not an element of '
'{!r}'.format(x2, self))
return float(self._dist(x1, x2)) | Return the distance between ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `LinearSpaceElement`
Elements whose distance to compute.
Returns
-------
dist : float
Distance between ``x1`` and ``x2``. | Below is the the instruction that describes the task:
### Input:
Return the distance between ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `LinearSpaceElement`
Elements whose distance to compute.
Returns
-------
dist : float
Distance between ``x1`` and ``x2``.
### Response:
def dist(self, x1, x2):
"""Return the distance between ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `LinearSpaceElement`
Elements whose distance to compute.
Returns
-------
dist : float
Distance between ``x1`` and ``x2``.
"""
if x1 not in self:
raise LinearSpaceTypeError('`x1` {!r} is not an element of '
'{!r}'.format(x1, self))
if x2 not in self:
raise LinearSpaceTypeError('`x2` {!r} is not an element of '
'{!r}'.format(x2, self))
return float(self._dist(x1, x2)) |
def rparents(self, level=-1, intermediate=True):
"""Create a recursive list of children.
Note that the :param:`intermediate` can be used to include every
parents to the returned list, not only the most nested ones.
Parameters:
level (int): The depth level to continue fetching parents from
(default is -1, to get parents to the utter depths)
intermediate (bool): Also include the intermediate parents
(default is True)
Returns:
:obj:`pronto.TermList`:
The recursive children of the Term following the parameters
"""
try:
return self._rparents[(level, intermediate)]
except KeyError:
rparents = []
if self.parents and level:
if intermediate or level==1:
rparents.extend(self.parents)
for parent in self.parents:
rparents.extend(parent.rparents(level=level-1,
intermediate=intermediate))
rparents = TermList(unique_everseen(rparents))
self._rparents[(level, intermediate)] = rparents
return rparents | Create a recursive list of children.
Note that the :param:`intermediate` can be used to include every
parents to the returned list, not only the most nested ones.
Parameters:
level (int): The depth level to continue fetching parents from
(default is -1, to get parents to the utter depths)
intermediate (bool): Also include the intermediate parents
(default is True)
Returns:
:obj:`pronto.TermList`:
The recursive children of the Term following the parameters | Below is the the instruction that describes the task:
### Input:
Create a recursive list of children.
Note that the :param:`intermediate` can be used to include every
parents to the returned list, not only the most nested ones.
Parameters:
level (int): The depth level to continue fetching parents from
(default is -1, to get parents to the utter depths)
intermediate (bool): Also include the intermediate parents
(default is True)
Returns:
:obj:`pronto.TermList`:
The recursive children of the Term following the parameters
### Response:
def rparents(self, level=-1, intermediate=True):
"""Create a recursive list of children.
Note that the :param:`intermediate` can be used to include every
parents to the returned list, not only the most nested ones.
Parameters:
level (int): The depth level to continue fetching parents from
(default is -1, to get parents to the utter depths)
intermediate (bool): Also include the intermediate parents
(default is True)
Returns:
:obj:`pronto.TermList`:
The recursive children of the Term following the parameters
"""
try:
return self._rparents[(level, intermediate)]
except KeyError:
rparents = []
if self.parents and level:
if intermediate or level==1:
rparents.extend(self.parents)
for parent in self.parents:
rparents.extend(parent.rparents(level=level-1,
intermediate=intermediate))
rparents = TermList(unique_everseen(rparents))
self._rparents[(level, intermediate)] = rparents
return rparents |
def poll(self):
"""
Perform a non-blocking scan of recv and send states on the server
and client connection sockets. Process new connection requests,
read incomming data, and send outgoing data. Sends and receives may
be partial.
"""
#print len(self.connections)
## Build a list of connections to test for receive data pending
recv_list = [self.server_fileno] # always add the server
for client in self.clients.values():
if client.active:
recv_list.append(client.fileno)
## Delete inactive connections from the dictionary
else:
#print "-- Lost connection to %s" % client.addrport()
#client.sock.close()
self.on_disconnect(client)
del self.clients[client.fileno]
## Build a list of connections that need to send data
send_list = []
for client in self.clients.values():
if client.send_pending:
send_list.append(client.fileno)
## Get active socket file descriptors from select.select()
try:
rlist, slist, elist = select.select(recv_list, send_list, [],
self.timeout)
except select.error, err:
## If we can't even use select(), game over man, game over
print >> sys.stderr, ("!! FATAL SELECT error '%d:%s'!"
% (err[0], err[1]))
sys.exit(1)
## Process socket file descriptors with data to recieve
for sock_fileno in rlist:
## If it's coming from the server's socket then this is a new
## connection request.
if sock_fileno == self.server_fileno:
try:
sock, addr_tup = self.server_socket.accept()
except socket.error, err:
print >> sys.stderr, ("!! ACCEPT error '%d:%s'." %
(err[0], err[1]))
continue
## Check for maximum connections
if self.client_count() >= MAX_CONNECTIONS:
print '?? Refusing new connection; maximum in use.'
sock.close()
continue
new_client = TelnetClient(sock, addr_tup)
#print "++ Opened connection to %s" % new_client.addrport()
## Add the connection to our dictionary and call handler
self.clients[new_client.fileno] = new_client
self.on_connect(new_client)
else:
## Call the connection's recieve method
try:
self.clients[sock_fileno].socket_recv()
except BogConnectionLost:
self.clients[sock_fileno].deactivate()
## Process sockets with data to send
for sock_fileno in slist:
## Call the connection's send method
self.clients[sock_fileno].socket_send() | Perform a non-blocking scan of recv and send states on the server
and client connection sockets. Process new connection requests,
read incomming data, and send outgoing data. Sends and receives may
be partial. | Below is the the instruction that describes the task:
### Input:
Perform a non-blocking scan of recv and send states on the server
and client connection sockets. Process new connection requests,
read incomming data, and send outgoing data. Sends and receives may
be partial.
### Response:
def poll(self):
"""
Perform a non-blocking scan of recv and send states on the server
and client connection sockets. Process new connection requests,
read incomming data, and send outgoing data. Sends and receives may
be partial.
"""
#print len(self.connections)
## Build a list of connections to test for receive data pending
recv_list = [self.server_fileno] # always add the server
for client in self.clients.values():
if client.active:
recv_list.append(client.fileno)
## Delete inactive connections from the dictionary
else:
#print "-- Lost connection to %s" % client.addrport()
#client.sock.close()
self.on_disconnect(client)
del self.clients[client.fileno]
## Build a list of connections that need to send data
send_list = []
for client in self.clients.values():
if client.send_pending:
send_list.append(client.fileno)
## Get active socket file descriptors from select.select()
try:
rlist, slist, elist = select.select(recv_list, send_list, [],
self.timeout)
except select.error, err:
## If we can't even use select(), game over man, game over
print >> sys.stderr, ("!! FATAL SELECT error '%d:%s'!"
% (err[0], err[1]))
sys.exit(1)
## Process socket file descriptors with data to recieve
for sock_fileno in rlist:
## If it's coming from the server's socket then this is a new
## connection request.
if sock_fileno == self.server_fileno:
try:
sock, addr_tup = self.server_socket.accept()
except socket.error, err:
print >> sys.stderr, ("!! ACCEPT error '%d:%s'." %
(err[0], err[1]))
continue
## Check for maximum connections
if self.client_count() >= MAX_CONNECTIONS:
print '?? Refusing new connection; maximum in use.'
sock.close()
continue
new_client = TelnetClient(sock, addr_tup)
#print "++ Opened connection to %s" % new_client.addrport()
## Add the connection to our dictionary and call handler
self.clients[new_client.fileno] = new_client
self.on_connect(new_client)
else:
## Call the connection's recieve method
try:
self.clients[sock_fileno].socket_recv()
except BogConnectionLost:
self.clients[sock_fileno].deactivate()
## Process sockets with data to send
for sock_fileno in slist:
## Call the connection's send method
self.clients[sock_fileno].socket_send() |
def sanitize_http_headers(client, event):
"""
Sanitizes http request/response headers
:param client: an ElasticAPM client
:param event: a transaction or error event
:return: The modified event
"""
# request headers
try:
headers = event["context"]["request"]["headers"]
event["context"]["request"]["headers"] = varmap(_sanitize, headers)
except (KeyError, TypeError):
pass
# response headers
try:
headers = event["context"]["response"]["headers"]
event["context"]["response"]["headers"] = varmap(_sanitize, headers)
except (KeyError, TypeError):
pass
return event | Sanitizes http request/response headers
:param client: an ElasticAPM client
:param event: a transaction or error event
:return: The modified event | Below is the the instruction that describes the task:
### Input:
Sanitizes http request/response headers
:param client: an ElasticAPM client
:param event: a transaction or error event
:return: The modified event
### Response:
def sanitize_http_headers(client, event):
"""
Sanitizes http request/response headers
:param client: an ElasticAPM client
:param event: a transaction or error event
:return: The modified event
"""
# request headers
try:
headers = event["context"]["request"]["headers"]
event["context"]["request"]["headers"] = varmap(_sanitize, headers)
except (KeyError, TypeError):
pass
# response headers
try:
headers = event["context"]["response"]["headers"]
event["context"]["response"]["headers"] = varmap(_sanitize, headers)
except (KeyError, TypeError):
pass
return event |
def CLASSDEF(self, node):
"""
Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.
"""
for deco in node.decorator_list:
self.handleNode(deco, node)
for baseNode in node.bases:
self.handleNode(baseNode, node)
if not PY2:
for keywordNode in node.keywords:
self.handleNode(keywordNode, node)
self.pushScope(ClassScope)
# doctest does not process doctest within a doctest
# classes within classes are processed.
if (self.withDoctest and
not self._in_doctest() and
not isinstance(self.scope, FunctionScope)):
self.deferFunction(lambda: self.handleDoctests(node))
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node, ClassDefinition(node.name, node)) | Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope. | Below is the the instruction that describes the task:
### Input:
Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.
### Response:
def CLASSDEF(self, node):
"""
Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.
"""
for deco in node.decorator_list:
self.handleNode(deco, node)
for baseNode in node.bases:
self.handleNode(baseNode, node)
if not PY2:
for keywordNode in node.keywords:
self.handleNode(keywordNode, node)
self.pushScope(ClassScope)
# doctest does not process doctest within a doctest
# classes within classes are processed.
if (self.withDoctest and
not self._in_doctest() and
not isinstance(self.scope, FunctionScope)):
self.deferFunction(lambda: self.handleDoctests(node))
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node, ClassDefinition(node.name, node)) |
def _set_fabric_virtual_gateway(self, v, load=False):
"""
Setter method for fabric_virtual_gateway, mapped from YANG variable /router/fabric_virtual_gateway (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fabric_virtual_gateway is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fabric_virtual_gateway() directly.
YANG Description: Fabric virtual gateway
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fabric_virtual_gateway.fabric_virtual_gateway, is_container='container', presence=True, yang_name="fabric-virtual-gateway", rest_name="fabric-virtual-gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fabric virtual gateway', u'callpoint': u'AnycastGatewayGlobalConfig', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'conf-router-fabric-virtual-gateway'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fabric_virtual_gateway must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=fabric_virtual_gateway.fabric_virtual_gateway, is_container='container', presence=True, yang_name="fabric-virtual-gateway", rest_name="fabric-virtual-gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fabric virtual gateway', u'callpoint': u'AnycastGatewayGlobalConfig', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'conf-router-fabric-virtual-gateway'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True)""",
})
self.__fabric_virtual_gateway = t
if hasattr(self, '_set'):
self._set() | Setter method for fabric_virtual_gateway, mapped from YANG variable /router/fabric_virtual_gateway (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fabric_virtual_gateway is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fabric_virtual_gateway() directly.
YANG Description: Fabric virtual gateway | Below is the the instruction that describes the task:
### Input:
Setter method for fabric_virtual_gateway, mapped from YANG variable /router/fabric_virtual_gateway (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fabric_virtual_gateway is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fabric_virtual_gateway() directly.
YANG Description: Fabric virtual gateway
### Response:
def _set_fabric_virtual_gateway(self, v, load=False):
"""
Setter method for fabric_virtual_gateway, mapped from YANG variable /router/fabric_virtual_gateway (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fabric_virtual_gateway is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fabric_virtual_gateway() directly.
YANG Description: Fabric virtual gateway
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fabric_virtual_gateway.fabric_virtual_gateway, is_container='container', presence=True, yang_name="fabric-virtual-gateway", rest_name="fabric-virtual-gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fabric virtual gateway', u'callpoint': u'AnycastGatewayGlobalConfig', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'conf-router-fabric-virtual-gateway'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fabric_virtual_gateway must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=fabric_virtual_gateway.fabric_virtual_gateway, is_container='container', presence=True, yang_name="fabric-virtual-gateway", rest_name="fabric-virtual-gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fabric virtual gateway', u'callpoint': u'AnycastGatewayGlobalConfig', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'conf-router-fabric-virtual-gateway'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True)""",
})
self.__fabric_virtual_gateway = t
if hasattr(self, '_set'):
self._set() |
def analyze_traceback(tb, inspection_level=None, limit=None):
"""
Extract trace back information into a list of dictionaries.
:param tb: traceback
:return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks
"""
info = []
tb_level = tb
extracted_tb = traceback.extract_tb(tb, limit=limit)
for ii, (filepath, line, module, code) in enumerate(extracted_tb):
func_source, func_lineno = inspect.getsourcelines(tb_level.tb_frame)
d = {"File": filepath,
"Error Line Number": line,
"Module": module,
"Error Line": code,
"Module Line Number": func_lineno,
"Custom Inspection": {},
"Source Code": ''}
if inspection_level is None or len(extracted_tb) - ii <= inspection_level:
# Perform advanced inspection on the last `inspection_level` tracebacks.
d['Source Code'] = ''.join(func_source)
d['Local Variables'] = get_local_references(tb_level)
d['Object Variables'] = get_object_references(tb_level, d['Source Code'])
tb_level = getattr(tb_level, 'tb_next', None)
info.append(d)
return info | Extract trace back information into a list of dictionaries.
:param tb: traceback
:return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks | Below is the the instruction that describes the task:
### Input:
Extract trace back information into a list of dictionaries.
:param tb: traceback
:return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks
### Response:
def analyze_traceback(tb, inspection_level=None, limit=None):
"""
Extract trace back information into a list of dictionaries.
:param tb: traceback
:return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks
"""
info = []
tb_level = tb
extracted_tb = traceback.extract_tb(tb, limit=limit)
for ii, (filepath, line, module, code) in enumerate(extracted_tb):
func_source, func_lineno = inspect.getsourcelines(tb_level.tb_frame)
d = {"File": filepath,
"Error Line Number": line,
"Module": module,
"Error Line": code,
"Module Line Number": func_lineno,
"Custom Inspection": {},
"Source Code": ''}
if inspection_level is None or len(extracted_tb) - ii <= inspection_level:
# Perform advanced inspection on the last `inspection_level` tracebacks.
d['Source Code'] = ''.join(func_source)
d['Local Variables'] = get_local_references(tb_level)
d['Object Variables'] = get_object_references(tb_level, d['Source Code'])
tb_level = getattr(tb_level, 'tb_next', None)
info.append(d)
return info |
def set_params(self, arg_params, aux_params):
"""Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays.
"""
for texec in self.execgrp.train_execs:
texec.copy_params_from(arg_params, aux_params) | Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays. | Below is the the instruction that describes the task:
### Input:
Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays.
### Response:
def set_params(self, arg_params, aux_params):
"""Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays.
"""
for texec in self.execgrp.train_execs:
texec.copy_params_from(arg_params, aux_params) |
def peukerdouglas(np, fel, streamSkeleton, workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run peuker-douglas function"""
fname = TauDEM.func_name('peukerdouglas')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel}, workingdir,
None,
{'-ss': streamSkeleton},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | Run peuker-douglas function | Below is the the instruction that describes the task:
### Input:
Run peuker-douglas function
### Response:
def peukerdouglas(np, fel, streamSkeleton, workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run peuker-douglas function"""
fname = TauDEM.func_name('peukerdouglas')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel}, workingdir,
None,
{'-ss': streamSkeleton},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) |
def _check_holiday_structure(self, times):
""" To check the structure of the HolidayClass
:param list times: years or months or days or number week
:rtype: None or Exception
:return: in the case of exception returns the exception
"""
if not isinstance(times, list):
raise TypeError("an list is required")
for time in times:
if not isinstance(time, tuple):
raise TypeError("a tuple is required")
if len(time) > 5:
raise TypeError("Target time takes at most 5 arguments"
" ('%d' given)" % len(time))
if len(time) < 5:
raise TypeError("Required argument '%s' (pos '%d')"
" not found" % (TIME_LABEL[len(time)], len(time)))
self._check_time_format(TIME_LABEL, time) | To check the structure of the HolidayClass
:param list times: years or months or days or number week
:rtype: None or Exception
:return: in the case of exception returns the exception | Below is the the instruction that describes the task:
### Input:
To check the structure of the HolidayClass
:param list times: years or months or days or number week
:rtype: None or Exception
:return: in the case of exception returns the exception
### Response:
def _check_holiday_structure(self, times):
""" To check the structure of the HolidayClass
:param list times: years or months or days or number week
:rtype: None or Exception
:return: in the case of exception returns the exception
"""
if not isinstance(times, list):
raise TypeError("an list is required")
for time in times:
if not isinstance(time, tuple):
raise TypeError("a tuple is required")
if len(time) > 5:
raise TypeError("Target time takes at most 5 arguments"
" ('%d' given)" % len(time))
if len(time) < 5:
raise TypeError("Required argument '%s' (pos '%d')"
" not found" % (TIME_LABEL[len(time)], len(time)))
self._check_time_format(TIME_LABEL, time) |
def uncache_zipdir(path):
"""Ensure that the importer caches dont have stale info for `path`"""
from zipimport import _zip_directory_cache as zdc
_uncache(path, zdc)
_uncache(path, sys.path_importer_cache) | Ensure that the importer caches dont have stale info for `path` | Below is the the instruction that describes the task:
### Input:
Ensure that the importer caches dont have stale info for `path`
### Response:
def uncache_zipdir(path):
"""Ensure that the importer caches dont have stale info for `path`"""
from zipimport import _zip_directory_cache as zdc
_uncache(path, zdc)
_uncache(path, sys.path_importer_cache) |
def flatten_container(self, container):
"""
Accepts a kubernetes container and pulls out the nested values into the top level
"""
for names in ARG_MAP.values():
if names[TransformationTypes.KUBERNETES.value]['name'] and \
'.' in names[TransformationTypes.KUBERNETES.value]['name']:
kubernetes_dotted_name = names[TransformationTypes.KUBERNETES.value]['name']
parts = kubernetes_dotted_name.split('.')
result = lookup_nested_dict(container, *parts)
if result:
container[kubernetes_dotted_name] = result
return container | Accepts a kubernetes container and pulls out the nested values into the top level | Below is the the instruction that describes the task:
### Input:
Accepts a kubernetes container and pulls out the nested values into the top level
### Response:
def flatten_container(self, container):
"""
Accepts a kubernetes container and pulls out the nested values into the top level
"""
for names in ARG_MAP.values():
if names[TransformationTypes.KUBERNETES.value]['name'] and \
'.' in names[TransformationTypes.KUBERNETES.value]['name']:
kubernetes_dotted_name = names[TransformationTypes.KUBERNETES.value]['name']
parts = kubernetes_dotted_name.split('.')
result = lookup_nested_dict(container, *parts)
if result:
container[kubernetes_dotted_name] = result
return container |
def calc_time(self) -> None:
"""
Prints statistics about the the total duration of recordings in the
corpus.
"""
def get_number_of_frames(feat_fns):
""" fns: A list of numpy files which contain a number of feature
frames. """
total = 0
for feat_fn in feat_fns:
num_frames = len(np.load(feat_fn))
total += num_frames
return total
def numframes_to_minutes(num_frames):
# TODO Assumes 10ms strides for the frames. This should generalize to
# different frame stride widths, as should feature preparation.
minutes = ((num_frames*10)/1000)/60
return minutes
total_frames = 0
train_fns = [train_fn[0] for train_fn in self.train_fns]
num_train_frames = get_number_of_frames(train_fns)
total_frames += num_train_frames
num_valid_frames = get_number_of_frames(self.corpus.get_valid_fns()[0])
total_frames += num_valid_frames
num_test_frames = get_number_of_frames(self.corpus.get_test_fns()[0])
total_frames += num_test_frames
print("Train duration: %0.3f" % numframes_to_minutes(num_train_frames))
print("Validation duration: %0.3f" % numframes_to_minutes(num_valid_frames))
print("Test duration: %0.3f" % numframes_to_minutes(num_test_frames))
print("Total duration: %0.3f" % numframes_to_minutes(total_frames)) | Prints statistics about the the total duration of recordings in the
corpus. | Below is the the instruction that describes the task:
### Input:
Prints statistics about the the total duration of recordings in the
corpus.
### Response:
def calc_time(self) -> None:
"""
Prints statistics about the the total duration of recordings in the
corpus.
"""
def get_number_of_frames(feat_fns):
""" fns: A list of numpy files which contain a number of feature
frames. """
total = 0
for feat_fn in feat_fns:
num_frames = len(np.load(feat_fn))
total += num_frames
return total
def numframes_to_minutes(num_frames):
# TODO Assumes 10ms strides for the frames. This should generalize to
# different frame stride widths, as should feature preparation.
minutes = ((num_frames*10)/1000)/60
return minutes
total_frames = 0
train_fns = [train_fn[0] for train_fn in self.train_fns]
num_train_frames = get_number_of_frames(train_fns)
total_frames += num_train_frames
num_valid_frames = get_number_of_frames(self.corpus.get_valid_fns()[0])
total_frames += num_valid_frames
num_test_frames = get_number_of_frames(self.corpus.get_test_fns()[0])
total_frames += num_test_frames
print("Train duration: %0.3f" % numframes_to_minutes(num_train_frames))
print("Validation duration: %0.3f" % numframes_to_minutes(num_valid_frames))
print("Test duration: %0.3f" % numframes_to_minutes(num_test_frames))
print("Total duration: %0.3f" % numframes_to_minutes(total_frames)) |
def get_suggested_saxis(magmoms):
"""
This method returns a suggested spin axis for a set of magmoms,
taking the largest magnetic moment as the reference. For calculations
with collinear spins, this would give a sensible saxis for a ncl
calculation.
:param magmoms: list of magmoms (Magmoms, scalars or vectors)
:return: np.ndarray of length 3
"""
# heuristic, will pick largest magmom as reference
# useful for creating collinear approximations of
# e.g. slightly canted magnetic structures
# for fully collinear structures, will return expected
# result
magmoms = [Magmom(magmom) for magmom in magmoms]
# filter only non-zero magmoms
magmoms = [magmom for magmom in magmoms if abs(magmom)]
magmoms.sort(reverse=True)
if len(magmoms) > 0:
return magmoms[0].get_00t_magmom_with_xyz_saxis().saxis
else:
return np.array([0, 0, 1], dtype="d") | This method returns a suggested spin axis for a set of magmoms,
taking the largest magnetic moment as the reference. For calculations
with collinear spins, this would give a sensible saxis for a ncl
calculation.
:param magmoms: list of magmoms (Magmoms, scalars or vectors)
:return: np.ndarray of length 3 | Below is the the instruction that describes the task:
### Input:
This method returns a suggested spin axis for a set of magmoms,
taking the largest magnetic moment as the reference. For calculations
with collinear spins, this would give a sensible saxis for a ncl
calculation.
:param magmoms: list of magmoms (Magmoms, scalars or vectors)
:return: np.ndarray of length 3
### Response:
def get_suggested_saxis(magmoms):
"""
This method returns a suggested spin axis for a set of magmoms,
taking the largest magnetic moment as the reference. For calculations
with collinear spins, this would give a sensible saxis for a ncl
calculation.
:param magmoms: list of magmoms (Magmoms, scalars or vectors)
:return: np.ndarray of length 3
"""
# heuristic, will pick largest magmom as reference
# useful for creating collinear approximations of
# e.g. slightly canted magnetic structures
# for fully collinear structures, will return expected
# result
magmoms = [Magmom(magmom) for magmom in magmoms]
# filter only non-zero magmoms
magmoms = [magmom for magmom in magmoms if abs(magmom)]
magmoms.sort(reverse=True)
if len(magmoms) > 0:
return magmoms[0].get_00t_magmom_with_xyz_saxis().saxis
else:
return np.array([0, 0, 1], dtype="d") |
def _send_ping(self, dest_addr, payload):
"""
Send a single ICMPecho (ping) packet to the specified address.
The payload must be specified as a packed byte string. Note that its
length has to be divisible by 2 for this to work correctly.
"""
pkt_id = self._last_used_id
is_ipv6 = ':' in dest_addr
if is_ipv6:
self._ipv6_address_present = True
icmp_echo_request = _ICMPV6_ECHO_REQUEST
else:
icmp_echo_request = _ICMP_ECHO_REQUEST
# For checksum calculation we require a dummy header, with the checksum
# field set to zero. This header consists of:
# - ICMP type = 8 (v4) / 128 (v6) (unsigned byte)
# - ICMP code = 0 (unsigned byte)
# - checksum = 0 (unsigned short)
# - packet id (unsigned short)
# - sequence = 0 (unsigned short) This doesn't have to be 0.
dummy_header = bytearray(
struct.pack(_ICMP_HDR_PACK_FORMAT,
icmp_echo_request, 0, 0,
pkt_id, self.ident))
# Calculate the checksum over the combined dummy header and payload
checksum = self._checksum(dummy_header + payload)
# We can now create the real header, which contains the correct
# checksum. Need to make sure to convert checksum to network byte
# order.
real_header = bytearray(
struct.pack(_ICMP_HDR_PACK_FORMAT,
icmp_echo_request, 0, checksum,
pkt_id, self.ident))
# Full packet consists of header plus payload
full_pkt = real_header + payload
# The full address for a sendto operation consists of the IP address
# and a port. We don't really need a port for ICMP, so we just use 0
# for that.
full_dest_addr = (dest_addr, 0)
if is_ipv6:
socket.inet_pton(socket.AF_INET6, dest_addr)
try:
self._sock6.sendto(full_pkt, full_dest_addr)
except Exception:
# on systems without IPv6 connectivity, sendto will fail with
# 'No route to host'
pass
else:
self._sock.sendto(full_pkt, full_dest_addr) | Send a single ICMPecho (ping) packet to the specified address.
The payload must be specified as a packed byte string. Note that its
length has to be divisible by 2 for this to work correctly. | Below is the the instruction that describes the task:
### Input:
Send a single ICMPecho (ping) packet to the specified address.
The payload must be specified as a packed byte string. Note that its
length has to be divisible by 2 for this to work correctly.
### Response:
def _send_ping(self, dest_addr, payload):
"""
Send a single ICMPecho (ping) packet to the specified address.
The payload must be specified as a packed byte string. Note that its
length has to be divisible by 2 for this to work correctly.
"""
pkt_id = self._last_used_id
is_ipv6 = ':' in dest_addr
if is_ipv6:
self._ipv6_address_present = True
icmp_echo_request = _ICMPV6_ECHO_REQUEST
else:
icmp_echo_request = _ICMP_ECHO_REQUEST
# For checksum calculation we require a dummy header, with the checksum
# field set to zero. This header consists of:
# - ICMP type = 8 (v4) / 128 (v6) (unsigned byte)
# - ICMP code = 0 (unsigned byte)
# - checksum = 0 (unsigned short)
# - packet id (unsigned short)
# - sequence = 0 (unsigned short) This doesn't have to be 0.
dummy_header = bytearray(
struct.pack(_ICMP_HDR_PACK_FORMAT,
icmp_echo_request, 0, 0,
pkt_id, self.ident))
# Calculate the checksum over the combined dummy header and payload
checksum = self._checksum(dummy_header + payload)
# We can now create the real header, which contains the correct
# checksum. Need to make sure to convert checksum to network byte
# order.
real_header = bytearray(
struct.pack(_ICMP_HDR_PACK_FORMAT,
icmp_echo_request, 0, checksum,
pkt_id, self.ident))
# Full packet consists of header plus payload
full_pkt = real_header + payload
# The full address for a sendto operation consists of the IP address
# and a port. We don't really need a port for ICMP, so we just use 0
# for that.
full_dest_addr = (dest_addr, 0)
if is_ipv6:
socket.inet_pton(socket.AF_INET6, dest_addr)
try:
self._sock6.sendto(full_pkt, full_dest_addr)
except Exception:
# on systems without IPv6 connectivity, sendto will fail with
# 'No route to host'
pass
else:
self._sock.sendto(full_pkt, full_dest_addr) |
async def get_version(self, timeout: int = 15) -> Optional[str]:
"""Execute FFmpeg process and parse the version information.
Return full FFmpeg version string. Such as 3.4.2-tessus
"""
command = ["-version"]
# open input for capture 1 frame
is_open = await self.open(cmd=command, input_source=None, output="")
# error after open?
if not is_open:
_LOGGER.warning("Error starting FFmpeg.")
return
# read output
try:
proc_func = functools.partial(self._proc.communicate, timeout=timeout)
output, _ = await self._loop.run_in_executor(None, proc_func)
result = re.search(r"ffmpeg version (\S*)", output.decode())
if result is not None:
return result.group(1)
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout reading stdout.")
self.kill()
return None | Execute FFmpeg process and parse the version information.
Return full FFmpeg version string. Such as 3.4.2-tessus | Below is the the instruction that describes the task:
### Input:
Execute FFmpeg process and parse the version information.
Return full FFmpeg version string. Such as 3.4.2-tessus
### Response:
async def get_version(self, timeout: int = 15) -> Optional[str]:
"""Execute FFmpeg process and parse the version information.
Return full FFmpeg version string. Such as 3.4.2-tessus
"""
command = ["-version"]
# open input for capture 1 frame
is_open = await self.open(cmd=command, input_source=None, output="")
# error after open?
if not is_open:
_LOGGER.warning("Error starting FFmpeg.")
return
# read output
try:
proc_func = functools.partial(self._proc.communicate, timeout=timeout)
output, _ = await self._loop.run_in_executor(None, proc_func)
result = re.search(r"ffmpeg version (\S*)", output.decode())
if result is not None:
return result.group(1)
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout reading stdout.")
self.kill()
return None |
def get_path():
"""
Shortcut for users whose theme is next to their conf.py.
"""
# Theme directory is defined as our parent directory
return os.path.abspath(os.path.dirname(os.path.dirname(__file__))) | Shortcut for users whose theme is next to their conf.py. | Below is the the instruction that describes the task:
### Input:
Shortcut for users whose theme is next to their conf.py.
### Response:
def get_path():
"""
Shortcut for users whose theme is next to their conf.py.
"""
# Theme directory is defined as our parent directory
return os.path.abspath(os.path.dirname(os.path.dirname(__file__))) |
def read_sis_ini(fh, byteorder, dtype, count, offsetsize):
"""Read OlympusSIS INI string and return as dict."""
inistr = fh.read(count)
inistr = bytes2str(stripnull(inistr))
try:
return olympusini_metadata(inistr)
except Exception as exc:
log.warning('olympusini_metadata: %s: %s', exc.__class__.__name__, exc)
return {} | Read OlympusSIS INI string and return as dict. | Below is the the instruction that describes the task:
### Input:
Read OlympusSIS INI string and return as dict.
### Response:
def read_sis_ini(fh, byteorder, dtype, count, offsetsize):
"""Read OlympusSIS INI string and return as dict."""
inistr = fh.read(count)
inistr = bytes2str(stripnull(inistr))
try:
return olympusini_metadata(inistr)
except Exception as exc:
log.warning('olympusini_metadata: %s: %s', exc.__class__.__name__, exc)
return {} |
def clear_product(self, standard, key):
"""
清除商品信息
详情请参考
http://mp.weixin.qq.com/wiki/15/7fa787701295b884410b5163e13313af.html
:param standard: 商品编码标准
:param key: 商品编码内容
:return: 返回的 JSON 数据包
"""
data = {
'keystandard': standard,
'keystr': key,
}
return self._post('product/clear', data=data) | 清除商品信息
详情请参考
http://mp.weixin.qq.com/wiki/15/7fa787701295b884410b5163e13313af.html
:param standard: 商品编码标准
:param key: 商品编码内容
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
清除商品信息
详情请参考
http://mp.weixin.qq.com/wiki/15/7fa787701295b884410b5163e13313af.html
:param standard: 商品编码标准
:param key: 商品编码内容
:return: 返回的 JSON 数据包
### Response:
def clear_product(self, standard, key):
"""
清除商品信息
详情请参考
http://mp.weixin.qq.com/wiki/15/7fa787701295b884410b5163e13313af.html
:param standard: 商品编码标准
:param key: 商品编码内容
:return: 返回的 JSON 数据包
"""
data = {
'keystandard': standard,
'keystr': key,
}
return self._post('product/clear', data=data) |
def add_listener(self, callback, event_type=None):
"""Add a callback handler for events going to this room.
Args:
callback (func(room, event)): Callback called when an event arrives.
event_type (str): The event_type to filter for.
Returns:
uuid.UUID: Unique id of the listener, can be used to identify the listener.
"""
listener_id = uuid4()
self.listeners.append(
{
'uid': listener_id,
'callback': callback,
'event_type': event_type
}
)
return listener_id | Add a callback handler for events going to this room.
Args:
callback (func(room, event)): Callback called when an event arrives.
event_type (str): The event_type to filter for.
Returns:
uuid.UUID: Unique id of the listener, can be used to identify the listener. | Below is the the instruction that describes the task:
### Input:
Add a callback handler for events going to this room.
Args:
callback (func(room, event)): Callback called when an event arrives.
event_type (str): The event_type to filter for.
Returns:
uuid.UUID: Unique id of the listener, can be used to identify the listener.
### Response:
def add_listener(self, callback, event_type=None):
"""Add a callback handler for events going to this room.
Args:
callback (func(room, event)): Callback called when an event arrives.
event_type (str): The event_type to filter for.
Returns:
uuid.UUID: Unique id of the listener, can be used to identify the listener.
"""
listener_id = uuid4()
self.listeners.append(
{
'uid': listener_id,
'callback': callback,
'event_type': event_type
}
)
return listener_id |
def H_acceptor_count(mol):
"""Hydrogen bond acceptor count """
mol.require("Valence")
return sum(1 for _, a in mol.atoms_iter() if a.H_acceptor) | Hydrogen bond acceptor count | Below is the the instruction that describes the task:
### Input:
Hydrogen bond acceptor count
### Response:
def H_acceptor_count(mol):
"""Hydrogen bond acceptor count """
mol.require("Valence")
return sum(1 for _, a in mol.atoms_iter() if a.H_acceptor) |
def listBlocksParents(self):
"""
API to list block parents of multiple blocks. To be called by blockparents url with post call.
:param block_names: list of block names [block_name1, block_name2, ...] (Required). Mwx length 1000.
:type block_names: list
"""
try :
body = request.body.read()
data = cjson.decode(body)
data = validateJSONInputNoCopy("block", data, read=True)
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second.
# YG Nov-05-2015
max_array_size = 1000
if ( 'block_names' in data.keys() and isinstance(data['block_names'], list) and len(data['block_names'])>max_array_size):
dbsExceptionHandler("dbsException-invalid-input",
"The Max list length supported in listBlocksParents is %s." %max_array_size, self.logger.exception)
return self.dbsBlock.listBlockParents(data["block_name"])
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except cjson.DecodeError as de:
sError = "DBSReaderModel/listBlockParents. %s\n. Exception trace: \n %s" \
% (de, traceback.format_exc())
msg = "DBSReaderModel/listBlockParents. %s" % de
dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, sError)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listBlockParents. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | API to list block parents of multiple blocks. To be called by blockparents url with post call.
:param block_names: list of block names [block_name1, block_name2, ...] (Required). Mwx length 1000.
:type block_names: list | Below is the the instruction that describes the task:
### Input:
API to list block parents of multiple blocks. To be called by blockparents url with post call.
:param block_names: list of block names [block_name1, block_name2, ...] (Required). Mwx length 1000.
:type block_names: list
### Response:
def listBlocksParents(self):
"""
API to list block parents of multiple blocks. To be called by blockparents url with post call.
:param block_names: list of block names [block_name1, block_name2, ...] (Required). Mwx length 1000.
:type block_names: list
"""
try :
body = request.body.read()
data = cjson.decode(body)
data = validateJSONInputNoCopy("block", data, read=True)
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second.
# YG Nov-05-2015
max_array_size = 1000
if ( 'block_names' in data.keys() and isinstance(data['block_names'], list) and len(data['block_names'])>max_array_size):
dbsExceptionHandler("dbsException-invalid-input",
"The Max list length supported in listBlocksParents is %s." %max_array_size, self.logger.exception)
return self.dbsBlock.listBlockParents(data["block_name"])
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except cjson.DecodeError as de:
sError = "DBSReaderModel/listBlockParents. %s\n. Exception trace: \n %s" \
% (de, traceback.format_exc())
msg = "DBSReaderModel/listBlockParents. %s" % de
dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, sError)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listBlockParents. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) |
def color(self):
"""
The |ColorFormat| instance that provides access to the color settings
for this line. Essentially a shortcut for ``line.fill.fore_color``.
As a side-effect, accessing this property causes the line fill type
to be set to ``MSO_FILL.SOLID``. If this sounds risky for your use
case, use ``line.fill.type`` to non-destructively discover the
existing fill type.
"""
if self.fill.type != MSO_FILL.SOLID:
self.fill.solid()
return self.fill.fore_color | The |ColorFormat| instance that provides access to the color settings
for this line. Essentially a shortcut for ``line.fill.fore_color``.
As a side-effect, accessing this property causes the line fill type
to be set to ``MSO_FILL.SOLID``. If this sounds risky for your use
case, use ``line.fill.type`` to non-destructively discover the
existing fill type. | Below is the the instruction that describes the task:
### Input:
The |ColorFormat| instance that provides access to the color settings
for this line. Essentially a shortcut for ``line.fill.fore_color``.
As a side-effect, accessing this property causes the line fill type
to be set to ``MSO_FILL.SOLID``. If this sounds risky for your use
case, use ``line.fill.type`` to non-destructively discover the
existing fill type.
### Response:
def color(self):
"""
The |ColorFormat| instance that provides access to the color settings
for this line. Essentially a shortcut for ``line.fill.fore_color``.
As a side-effect, accessing this property causes the line fill type
to be set to ``MSO_FILL.SOLID``. If this sounds risky for your use
case, use ``line.fill.type`` to non-destructively discover the
existing fill type.
"""
if self.fill.type != MSO_FILL.SOLID:
self.fill.solid()
return self.fill.fore_color |
def score(infile, outfile, classifier, xgb_autotune, apply_weights, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test):
"""
Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
# Prepare XGBoost-specific parameters
xgb_hyperparams = {'autotune': xgb_autotune, 'autotune_num_rounds': 10, 'num_boost_round': 100, 'early_stopping_rounds': 10, 'test_size': 0.33}
xgb_params = {'eta': 0.3, 'gamma': 0, 'max_depth': 6, 'min_child_weight': 1, 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': 1, 'alpha': 0, 'scale_pos_weight': 1, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}
xgb_params_space = {'eta': hp.uniform('eta', 0.0, 0.3), 'gamma': hp.uniform('gamma', 0.0, 0.5), 'max_depth': hp.quniform('max_depth', 2, 8, 1), 'min_child_weight': hp.quniform('min_child_weight', 1, 5, 1), 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': hp.uniform('lambda', 0.0, 1.0), 'alpha': hp.uniform('alpha', 0.0, 1.0), 'scale_pos_weight': 1.0, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}
if not apply_weights:
PyProphetLearner(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test).run()
else:
PyProphetWeightApplier(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, apply_weights).run() | Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data. | Below is the the instruction that describes the task:
### Input:
Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data.
### Response:
def score(infile, outfile, classifier, xgb_autotune, apply_weights, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test):
"""
Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
# Prepare XGBoost-specific parameters
xgb_hyperparams = {'autotune': xgb_autotune, 'autotune_num_rounds': 10, 'num_boost_round': 100, 'early_stopping_rounds': 10, 'test_size': 0.33}
xgb_params = {'eta': 0.3, 'gamma': 0, 'max_depth': 6, 'min_child_weight': 1, 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': 1, 'alpha': 0, 'scale_pos_weight': 1, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}
xgb_params_space = {'eta': hp.uniform('eta', 0.0, 0.3), 'gamma': hp.uniform('gamma', 0.0, 0.5), 'max_depth': hp.quniform('max_depth', 2, 8, 1), 'min_child_weight': hp.quniform('min_child_weight', 1, 5, 1), 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': hp.uniform('lambda', 0.0, 1.0), 'alpha': hp.uniform('alpha', 0.0, 1.0), 'scale_pos_weight': 1.0, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}
if not apply_weights:
PyProphetLearner(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test).run()
else:
PyProphetWeightApplier(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, apply_weights).run() |
def _create_w_objective(m, X, R):
"""
Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
R (array): 1 x genes
"""
genes, clusters = m.shape
cells = X.shape[1]
R1 = R.reshape((genes, 1)).dot(np.ones((1, cells)))
def objective(w):
# convert w into a matrix first... because it's a vector for
# optimization purposes
w = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w)+eps
return np.sum((X + R1)*np.log(d + R1) - X*np.log(d))/genes
def deriv(w):
# derivative of objective wrt all elements of w
# for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus
# x_ij
w2 = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w2)+eps
temp = X/d
temp2 = (X+R1)/(d+R1)
m1 = m.T.dot(temp2)
m2 = m.T.dot(temp)
deriv = m1 - m2
return deriv.flatten()/genes
return objective, deriv | Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
R (array): 1 x genes | Below is the the instruction that describes the task:
### Input:
Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
R (array): 1 x genes
### Response:
def _create_w_objective(m, X, R):
"""
Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
R (array): 1 x genes
"""
genes, clusters = m.shape
cells = X.shape[1]
R1 = R.reshape((genes, 1)).dot(np.ones((1, cells)))
def objective(w):
# convert w into a matrix first... because it's a vector for
# optimization purposes
w = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w)+eps
return np.sum((X + R1)*np.log(d + R1) - X*np.log(d))/genes
def deriv(w):
# derivative of objective wrt all elements of w
# for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus
# x_ij
w2 = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w2)+eps
temp = X/d
temp2 = (X+R1)/(d+R1)
m1 = m.T.dot(temp2)
m2 = m.T.dot(temp)
deriv = m1 - m2
return deriv.flatten()/genes
return objective, deriv |
def get_cache(self, decorated_function, *args, **kwargs):
""" :meth:`WCacheStorage.get_cache` method implementation
"""
self.__check(decorated_function, *args, **kwargs)
if decorated_function in self._storage:
for i in self._storage[decorated_function]:
if i['instance']() == args[0]:
result = i['result'].cache_entry(*args, **kwargs)
if self.__statistic is True:
if result.has_value is True:
self.__cache_hit += 1
else:
self.__cache_missed += 1
return result
if self.__statistic is True:
self.__cache_missed += 1
return WCacheStorage.CacheEntry() | :meth:`WCacheStorage.get_cache` method implementation | Below is the the instruction that describes the task:
### Input:
:meth:`WCacheStorage.get_cache` method implementation
### Response:
def get_cache(self, decorated_function, *args, **kwargs):
""" :meth:`WCacheStorage.get_cache` method implementation
"""
self.__check(decorated_function, *args, **kwargs)
if decorated_function in self._storage:
for i in self._storage[decorated_function]:
if i['instance']() == args[0]:
result = i['result'].cache_entry(*args, **kwargs)
if self.__statistic is True:
if result.has_value is True:
self.__cache_hit += 1
else:
self.__cache_missed += 1
return result
if self.__statistic is True:
self.__cache_missed += 1
return WCacheStorage.CacheEntry() |
def sync(self, graph_commons):
"""Synchronize local and remote representations."""
if self['id'] is None:
return
remote_graph = graph_commons.graphs(self['id'])
# TODO: less forceful, more elegant
self.edges = remote_graph.edges
self.nodes = remote_graph.nodes
self.node_types = remote_graph.node_types
self.edge_types = remote_graph.edge_types
self._edges = dict((edge['id'], edge) for edge in self.edges)
self._nodes = dict((node['id'], node) for node in self.nodes)
self._node_types = dict((t['id'], t) for t in self.node_types)
self._edge_types = dict((t['id'], t) for t in self.edge_types) | Synchronize local and remote representations. | Below is the the instruction that describes the task:
### Input:
Synchronize local and remote representations.
### Response:
def sync(self, graph_commons):
"""Synchronize local and remote representations."""
if self['id'] is None:
return
remote_graph = graph_commons.graphs(self['id'])
# TODO: less forceful, more elegant
self.edges = remote_graph.edges
self.nodes = remote_graph.nodes
self.node_types = remote_graph.node_types
self.edge_types = remote_graph.edge_types
self._edges = dict((edge['id'], edge) for edge in self.edges)
self._nodes = dict((node['id'], node) for node in self.nodes)
self._node_types = dict((t['id'], t) for t in self.node_types)
self._edge_types = dict((t['id'], t) for t in self.edge_types) |
def get_stanza(self, peer_jid):
"""
Return the last presence recieved for the given bare or full
`peer_jid`. If the last presence was unavailable, the return value is
:data:`None`, as if no presence was ever received.
If no presence was ever received for the given bare JID, :data:`None`
is returned.
"""
try:
return self._presences[peer_jid.bare()][peer_jid.resource]
except KeyError:
pass
try:
return self._presences[peer_jid.bare()][None]
except KeyError:
pass | Return the last presence recieved for the given bare or full
`peer_jid`. If the last presence was unavailable, the return value is
:data:`None`, as if no presence was ever received.
If no presence was ever received for the given bare JID, :data:`None`
is returned. | Below is the the instruction that describes the task:
### Input:
Return the last presence recieved for the given bare or full
`peer_jid`. If the last presence was unavailable, the return value is
:data:`None`, as if no presence was ever received.
If no presence was ever received for the given bare JID, :data:`None`
is returned.
### Response:
def get_stanza(self, peer_jid):
"""
Return the last presence recieved for the given bare or full
`peer_jid`. If the last presence was unavailable, the return value is
:data:`None`, as if no presence was ever received.
If no presence was ever received for the given bare JID, :data:`None`
is returned.
"""
try:
return self._presences[peer_jid.bare()][peer_jid.resource]
except KeyError:
pass
try:
return self._presences[peer_jid.bare()][None]
except KeyError:
pass |
def wrap_iter_with_message_events(
request_or_response_iter,
span,
message_event_type
):
"""Wraps a request or response iterator to add message events to the span
for each proto message sent or received
"""
for message_id, message in enumerate(request_or_response_iter, start=1):
add_message_event(
proto_message=message,
span=span,
message_event_type=message_event_type,
message_id=message_id
)
yield message | Wraps a request or response iterator to add message events to the span
for each proto message sent or received | Below is the the instruction that describes the task:
### Input:
Wraps a request or response iterator to add message events to the span
for each proto message sent or received
### Response:
def wrap_iter_with_message_events(
request_or_response_iter,
span,
message_event_type
):
"""Wraps a request or response iterator to add message events to the span
for each proto message sent or received
"""
for message_id, message in enumerate(request_or_response_iter, start=1):
add_message_event(
proto_message=message,
span=span,
message_event_type=message_event_type,
message_id=message_id
)
yield message |
def idmap_get_max_id(connection, id_class):
"""
Given an ilwd:char ID class, return the highest ID from the table
for whose IDs that is the class.
Example:
>>> event_id = ilwd.ilwdchar("sngl_burst:event_id:0")
>>> print event_id
sngl_inspiral:event_id:0
>>> max_id = get_max_id(connection, type(event_id))
>>> print max_id
sngl_inspiral:event_id:1054
"""
cursor = connection.cursor()
cursor.execute("SELECT MAX(CAST(SUBSTR(%s, %d, 10) AS INTEGER)) FROM %s" % (id_class.column_name, id_class.index_offset + 1, id_class.table_name))
maxid = cursor.fetchone()[0]
cursor.close()
if maxid is None:
return None
return id_class(maxid) | Given an ilwd:char ID class, return the highest ID from the table
for whose IDs that is the class.
Example:
>>> event_id = ilwd.ilwdchar("sngl_burst:event_id:0")
>>> print event_id
sngl_inspiral:event_id:0
>>> max_id = get_max_id(connection, type(event_id))
>>> print max_id
sngl_inspiral:event_id:1054 | Below is the the instruction that describes the task:
### Input:
Given an ilwd:char ID class, return the highest ID from the table
for whose IDs that is the class.
Example:
>>> event_id = ilwd.ilwdchar("sngl_burst:event_id:0")
>>> print event_id
sngl_inspiral:event_id:0
>>> max_id = get_max_id(connection, type(event_id))
>>> print max_id
sngl_inspiral:event_id:1054
### Response:
def idmap_get_max_id(connection, id_class):
"""
Given an ilwd:char ID class, return the highest ID from the table
for whose IDs that is the class.
Example:
>>> event_id = ilwd.ilwdchar("sngl_burst:event_id:0")
>>> print event_id
sngl_inspiral:event_id:0
>>> max_id = get_max_id(connection, type(event_id))
>>> print max_id
sngl_inspiral:event_id:1054
"""
cursor = connection.cursor()
cursor.execute("SELECT MAX(CAST(SUBSTR(%s, %d, 10) AS INTEGER)) FROM %s" % (id_class.column_name, id_class.index_offset + 1, id_class.table_name))
maxid = cursor.fetchone()[0]
cursor.close()
if maxid is None:
return None
return id_class(maxid) |
def encode(self, name, as_map_key=False):
"""Returns the name the first time and the key after that"""
if name in self.key_to_value:
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name | Returns the name the first time and the key after that | Below is the the instruction that describes the task:
### Input:
Returns the name the first time and the key after that
### Response:
def encode(self, name, as_map_key=False):
"""Returns the name the first time and the key after that"""
if name in self.key_to_value:
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name |
def import_tree_corpus(labels_path, parents_path, texts_path):
"""
Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees.
"""
with codecs.open(labels_path, "r", "UTF-8") as f:
label_lines = f.readlines()
with codecs.open(parents_path, "r", "UTF-8") as f:
parent_lines = f.readlines()
with codecs.open(texts_path, "r", "UTF-8") as f:
word_lines = f.readlines()
assert len(label_lines) == len(parent_lines)
assert len(label_lines) == len(word_lines)
trees = []
for labels, parents, words in zip(label_lines, parent_lines, word_lines):
labels = [int(l) + 2 for l in labels.strip().split(" ")]
parents = [int(l) for l in parents.strip().split(" ")]
words = words.strip().split(" ")
assert len(labels) == len(parents)
trees.append(read_tree(parents, labels, words))
return trees | Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees. | Below is the the instruction that describes the task:
### Input:
Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees.
### Response:
def import_tree_corpus(labels_path, parents_path, texts_path):
"""
Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees.
"""
with codecs.open(labels_path, "r", "UTF-8") as f:
label_lines = f.readlines()
with codecs.open(parents_path, "r", "UTF-8") as f:
parent_lines = f.readlines()
with codecs.open(texts_path, "r", "UTF-8") as f:
word_lines = f.readlines()
assert len(label_lines) == len(parent_lines)
assert len(label_lines) == len(word_lines)
trees = []
for labels, parents, words in zip(label_lines, parent_lines, word_lines):
labels = [int(l) + 2 for l in labels.strip().split(" ")]
parents = [int(l) for l in parents.strip().split(" ")]
words = words.strip().split(" ")
assert len(labels) == len(parents)
trees.append(read_tree(parents, labels, words))
return trees |
def offsets(self):
"""A generator producing a (path, offset) tuple for all tailed files."""
for path, tailedfile in self._tailedfiles.iteritems():
yield path, tailedfile._offset | A generator producing a (path, offset) tuple for all tailed files. | Below is the the instruction that describes the task:
### Input:
A generator producing a (path, offset) tuple for all tailed files.
### Response:
def offsets(self):
"""A generator producing a (path, offset) tuple for all tailed files."""
for path, tailedfile in self._tailedfiles.iteritems():
yield path, tailedfile._offset |
def transform(data, target_wd, target_ht, is_train, box):
"""Crop and normnalize an image nd array."""
if box is not None:
x, y, w, h = box
data = data[y:min(y+h, data.shape[0]), x:min(x+w, data.shape[1])]
# Resize to target_wd * target_ht.
data = mx.image.imresize(data, target_wd, target_ht)
# Normalize in the same way as the pre-trained model.
data = data.astype(np.float32) / 255.0
data = (data - mx.nd.array([0.485, 0.456, 0.406])) / mx.nd.array([0.229, 0.224, 0.225])
if is_train:
if random.random() < 0.5:
data = nd.flip(data, axis=1)
data, _ = mx.image.random_crop(data, (224, 224))
else:
data, _ = mx.image.center_crop(data, (224, 224))
# Transpose from (target_wd, target_ht, 3)
# to (3, target_wd, target_ht).
data = nd.transpose(data, (2, 0, 1))
# If image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = nd.tile(data, (3, 1, 1))
return data.reshape((1,) + data.shape) | Crop and normnalize an image nd array. | Below is the the instruction that describes the task:
### Input:
Crop and normnalize an image nd array.
### Response:
def transform(data, target_wd, target_ht, is_train, box):
"""Crop and normnalize an image nd array."""
if box is not None:
x, y, w, h = box
data = data[y:min(y+h, data.shape[0]), x:min(x+w, data.shape[1])]
# Resize to target_wd * target_ht.
data = mx.image.imresize(data, target_wd, target_ht)
# Normalize in the same way as the pre-trained model.
data = data.astype(np.float32) / 255.0
data = (data - mx.nd.array([0.485, 0.456, 0.406])) / mx.nd.array([0.229, 0.224, 0.225])
if is_train:
if random.random() < 0.5:
data = nd.flip(data, axis=1)
data, _ = mx.image.random_crop(data, (224, 224))
else:
data, _ = mx.image.center_crop(data, (224, 224))
# Transpose from (target_wd, target_ht, 3)
# to (3, target_wd, target_ht).
data = nd.transpose(data, (2, 0, 1))
# If image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = nd.tile(data, (3, 1, 1))
return data.reshape((1,) + data.shape) |
def create_network(self):
"""Get an instance of vlan services facade."""
return Network(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of vlan services facade. | Below is the the instruction that describes the task:
### Input:
Get an instance of vlan services facade.
### Response:
def create_network(self):
"""Get an instance of vlan services facade."""
return Network(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) |
def createDenseCNNModel(self):
"""
Create a standard network composed of two CNN / MaxPool layers followed by a
linear layer with using ReLU activation between the layers
"""
# Create denseCNN2 model
model = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels[0],
kernel_size=self.kernel_size[0], stride=self.stride[0],
padding=self.padding[0]),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(),
nn.Conv2d(in_channels=self.out_channels[0], out_channels=self.out_channels[1],
kernel_size=self.kernel_size[1], stride=self.stride[1],
padding=self.padding[1]),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(),
Flatten(),
nn.Linear(self.cnn_output_len[1], self.n),
nn.ReLU(),
nn.Linear(self.n, self.output_size),
nn.LogSoftmax(dim=1)
)
model.to(self.device)
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
return model | Create a standard network composed of two CNN / MaxPool layers followed by a
linear layer with using ReLU activation between the layers | Below is the the instruction that describes the task:
### Input:
Create a standard network composed of two CNN / MaxPool layers followed by a
linear layer with using ReLU activation between the layers
### Response:
def createDenseCNNModel(self):
"""
Create a standard network composed of two CNN / MaxPool layers followed by a
linear layer with using ReLU activation between the layers
"""
# Create denseCNN2 model
model = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels[0],
kernel_size=self.kernel_size[0], stride=self.stride[0],
padding=self.padding[0]),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(),
nn.Conv2d(in_channels=self.out_channels[0], out_channels=self.out_channels[1],
kernel_size=self.kernel_size[1], stride=self.stride[1],
padding=self.padding[1]),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(),
Flatten(),
nn.Linear(self.cnn_output_len[1], self.n),
nn.ReLU(),
nn.Linear(self.n, self.output_size),
nn.LogSoftmax(dim=1)
)
model.to(self.device)
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
return model |
def _iparam_instancename(instancename):
"""
Convert an instance name specified in an operation method into a CIM
object that can be passed to imethodcall().
"""
if isinstance(instancename, CIMInstanceName):
instancename = instancename.copy()
instancename.host = None
instancename.namespace = None
elif instancename is None:
pass
else:
raise TypeError(
_format("The 'InstanceName' argument of the WBEMConnection "
"operation has invalid type {0} (must be None, a "
"string, or a CIMInstanceName)",
type(instancename)))
return instancename | Convert an instance name specified in an operation method into a CIM
object that can be passed to imethodcall(). | Below is the the instruction that describes the task:
### Input:
Convert an instance name specified in an operation method into a CIM
object that can be passed to imethodcall().
### Response:
def _iparam_instancename(instancename):
"""
Convert an instance name specified in an operation method into a CIM
object that can be passed to imethodcall().
"""
if isinstance(instancename, CIMInstanceName):
instancename = instancename.copy()
instancename.host = None
instancename.namespace = None
elif instancename is None:
pass
else:
raise TypeError(
_format("The 'InstanceName' argument of the WBEMConnection "
"operation has invalid type {0} (must be None, a "
"string, or a CIMInstanceName)",
type(instancename)))
return instancename |
def energy_based_strength_of_connection(A, theta=0.0, k=2):
"""Energy Strength Measure.
Compute a strength of connection matrix using an energy-based measure.
Parameters
----------
A : sparse-matrix
matrix from which to generate strength of connection information
theta : float
Threshold parameter in [0,1]
k : int
Number of relaxation steps used to generate strength information
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. The sparsity pattern
of S matches that of A. For BSR matrices, S is a reduced strength
of connection matrix that describes connections between supernodes.
Notes
-----
This method relaxes with weighted-Jacobi in order to approximate the
matrix inverse. A normalized change of energy is then used to define
point-wise strength of connection values. Specifically, let v be the
approximation to the i-th column of the inverse, then
(S_ij)^2 = <v_j, v_j>_A / <v, v>_A,
where v_j = v, such that entry j in v has been zeroed out. As is common,
larger values imply a stronger connection.
Current implementation is a very slow pure-python implementation for
experimental purposes, only.
See [2006BrBrMaMaMc]_ for more details.
References
----------
.. [2006BrBrMaMaMc] Brannick, Brezina, MacLachlan, Manteuffel, McCormick.
"An Energy-Based AMG Coarsening Strategy",
Numerical Linear Algebra with Applications,
vol. 13, pp. 133-148, 2006.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import energy_based_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = energy_based_strength_of_connection(A, 0.0)
"""
if (theta < 0):
raise ValueError('expected a positive theta')
if not sparse.isspmatrix(A):
raise ValueError('expected sparse matrix')
if (k < 0):
raise ValueError('expected positive number of steps')
if not isinstance(k, int):
raise ValueError('expected integer')
if sparse.isspmatrix_bsr(A):
bsr_flag = True
numPDEs = A.blocksize[0]
if A.blocksize[0] != A.blocksize[1]:
raise ValueError('expected square blocks in BSR matrix A')
else:
bsr_flag = False
# Convert A to csc and Atilde to csr
if sparse.isspmatrix_csr(A):
Atilde = A.copy()
A = A.tocsc()
else:
A = A.tocsc()
Atilde = A.copy()
Atilde = Atilde.tocsr()
# Calculate the weighted-Jacobi parameter
from pyamg.util.linalg import approximate_spectral_radius
D = A.diagonal()
Dinv = 1.0 / D
Dinv[D == 0] = 0.0
Dinv = sparse.csc_matrix((Dinv, (np.arange(A.shape[0]),
np.arange(A.shape[1]))), shape=A.shape)
DinvA = Dinv * A
omega = 1.0 / approximate_spectral_radius(DinvA)
del DinvA
# Approximate A-inverse with k steps of w-Jacobi and a zero initial guess
S = sparse.csc_matrix(A.shape, dtype=A.dtype) # empty matrix
Id = sparse.eye(A.shape[0], A.shape[1], format='csc')
for i in range(k + 1):
S = S + omega * (Dinv * (Id - A * S))
# Calculate the strength entries in S column-wise, but only strength
# values at the sparsity pattern of A
for i in range(Atilde.shape[0]):
v = np.mat(S[:, i].todense())
Av = np.mat(A * v)
denom = np.sqrt(np.conjugate(v).T * Av)
# replace entries in row i with strength values
for j in range(Atilde.indptr[i], Atilde.indptr[i + 1]):
col = Atilde.indices[j]
vj = v[col].copy()
v[col] = 0.0
# = (||v_j||_A - ||v||_A) / ||v||_A
val = np.sqrt(np.conjugate(v).T * A * v) / denom - 1.0
# Negative values generally imply a weak connection
if val > -0.01:
Atilde.data[j] = abs(val)
else:
Atilde.data[j] = 0.0
v[col] = vj
# Apply drop tolerance
Atilde = classical_strength_of_connection(Atilde, theta=theta)
Atilde.eliminate_zeros()
# Put ones on the diagonal
Atilde = Atilde + Id.tocsr()
Atilde.sort_indices()
# Amalgamate Atilde for the BSR case, using ones for all strong connections
if bsr_flag:
Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))
nblocks = Atilde.indices.shape[0]
uone = np.ones((nblocks,))
Atilde = sparse.csr_matrix((uone, Atilde.indices, Atilde.indptr),
shape=(
int(Atilde.shape[0] / numPDEs),
int(Atilde.shape[1] / numPDEs)))
# Scale C by the largest magnitude entry in each row
Atilde = scale_rows_by_largest_entry(Atilde)
return Atilde | Energy Strength Measure.
Compute a strength of connection matrix using an energy-based measure.
Parameters
----------
A : sparse-matrix
matrix from which to generate strength of connection information
theta : float
Threshold parameter in [0,1]
k : int
Number of relaxation steps used to generate strength information
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. The sparsity pattern
of S matches that of A. For BSR matrices, S is a reduced strength
of connection matrix that describes connections between supernodes.
Notes
-----
This method relaxes with weighted-Jacobi in order to approximate the
matrix inverse. A normalized change of energy is then used to define
point-wise strength of connection values. Specifically, let v be the
approximation to the i-th column of the inverse, then
(S_ij)^2 = <v_j, v_j>_A / <v, v>_A,
where v_j = v, such that entry j in v has been zeroed out. As is common,
larger values imply a stronger connection.
Current implementation is a very slow pure-python implementation for
experimental purposes, only.
See [2006BrBrMaMaMc]_ for more details.
References
----------
.. [2006BrBrMaMaMc] Brannick, Brezina, MacLachlan, Manteuffel, McCormick.
"An Energy-Based AMG Coarsening Strategy",
Numerical Linear Algebra with Applications,
vol. 13, pp. 133-148, 2006.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import energy_based_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = energy_based_strength_of_connection(A, 0.0) | Below is the the instruction that describes the task:
### Input:
Energy Strength Measure.
Compute a strength of connection matrix using an energy-based measure.
Parameters
----------
A : sparse-matrix
matrix from which to generate strength of connection information
theta : float
Threshold parameter in [0,1]
k : int
Number of relaxation steps used to generate strength information
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. The sparsity pattern
of S matches that of A. For BSR matrices, S is a reduced strength
of connection matrix that describes connections between supernodes.
Notes
-----
This method relaxes with weighted-Jacobi in order to approximate the
matrix inverse. A normalized change of energy is then used to define
point-wise strength of connection values. Specifically, let v be the
approximation to the i-th column of the inverse, then
(S_ij)^2 = <v_j, v_j>_A / <v, v>_A,
where v_j = v, such that entry j in v has been zeroed out. As is common,
larger values imply a stronger connection.
Current implementation is a very slow pure-python implementation for
experimental purposes, only.
See [2006BrBrMaMaMc]_ for more details.
References
----------
.. [2006BrBrMaMaMc] Brannick, Brezina, MacLachlan, Manteuffel, McCormick.
"An Energy-Based AMG Coarsening Strategy",
Numerical Linear Algebra with Applications,
vol. 13, pp. 133-148, 2006.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import energy_based_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = energy_based_strength_of_connection(A, 0.0)
### Response:
def energy_based_strength_of_connection(A, theta=0.0, k=2):
"""Energy Strength Measure.
Compute a strength of connection matrix using an energy-based measure.
Parameters
----------
A : sparse-matrix
matrix from which to generate strength of connection information
theta : float
Threshold parameter in [0,1]
k : int
Number of relaxation steps used to generate strength information
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. The sparsity pattern
of S matches that of A. For BSR matrices, S is a reduced strength
of connection matrix that describes connections between supernodes.
Notes
-----
This method relaxes with weighted-Jacobi in order to approximate the
matrix inverse. A normalized change of energy is then used to define
point-wise strength of connection values. Specifically, let v be the
approximation to the i-th column of the inverse, then
(S_ij)^2 = <v_j, v_j>_A / <v, v>_A,
where v_j = v, such that entry j in v has been zeroed out. As is common,
larger values imply a stronger connection.
Current implementation is a very slow pure-python implementation for
experimental purposes, only.
See [2006BrBrMaMaMc]_ for more details.
References
----------
.. [2006BrBrMaMaMc] Brannick, Brezina, MacLachlan, Manteuffel, McCormick.
"An Energy-Based AMG Coarsening Strategy",
Numerical Linear Algebra with Applications,
vol. 13, pp. 133-148, 2006.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import energy_based_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = energy_based_strength_of_connection(A, 0.0)
"""
if (theta < 0):
raise ValueError('expected a positive theta')
if not sparse.isspmatrix(A):
raise ValueError('expected sparse matrix')
if (k < 0):
raise ValueError('expected positive number of steps')
if not isinstance(k, int):
raise ValueError('expected integer')
if sparse.isspmatrix_bsr(A):
bsr_flag = True
numPDEs = A.blocksize[0]
if A.blocksize[0] != A.blocksize[1]:
raise ValueError('expected square blocks in BSR matrix A')
else:
bsr_flag = False
# Convert A to csc and Atilde to csr
if sparse.isspmatrix_csr(A):
Atilde = A.copy()
A = A.tocsc()
else:
A = A.tocsc()
Atilde = A.copy()
Atilde = Atilde.tocsr()
# Calculate the weighted-Jacobi parameter
from pyamg.util.linalg import approximate_spectral_radius
D = A.diagonal()
Dinv = 1.0 / D
Dinv[D == 0] = 0.0
Dinv = sparse.csc_matrix((Dinv, (np.arange(A.shape[0]),
np.arange(A.shape[1]))), shape=A.shape)
DinvA = Dinv * A
omega = 1.0 / approximate_spectral_radius(DinvA)
del DinvA
# Approximate A-inverse with k steps of w-Jacobi and a zero initial guess
S = sparse.csc_matrix(A.shape, dtype=A.dtype) # empty matrix
Id = sparse.eye(A.shape[0], A.shape[1], format='csc')
for i in range(k + 1):
S = S + omega * (Dinv * (Id - A * S))
# Calculate the strength entries in S column-wise, but only strength
# values at the sparsity pattern of A
for i in range(Atilde.shape[0]):
v = np.mat(S[:, i].todense())
Av = np.mat(A * v)
denom = np.sqrt(np.conjugate(v).T * Av)
# replace entries in row i with strength values
for j in range(Atilde.indptr[i], Atilde.indptr[i + 1]):
col = Atilde.indices[j]
vj = v[col].copy()
v[col] = 0.0
# = (||v_j||_A - ||v||_A) / ||v||_A
val = np.sqrt(np.conjugate(v).T * A * v) / denom - 1.0
# Negative values generally imply a weak connection
if val > -0.01:
Atilde.data[j] = abs(val)
else:
Atilde.data[j] = 0.0
v[col] = vj
# Apply drop tolerance
Atilde = classical_strength_of_connection(Atilde, theta=theta)
Atilde.eliminate_zeros()
# Put ones on the diagonal
Atilde = Atilde + Id.tocsr()
Atilde.sort_indices()
# Amalgamate Atilde for the BSR case, using ones for all strong connections
if bsr_flag:
Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))
nblocks = Atilde.indices.shape[0]
uone = np.ones((nblocks,))
Atilde = sparse.csr_matrix((uone, Atilde.indices, Atilde.indptr),
shape=(
int(Atilde.shape[0] / numPDEs),
int(Atilde.shape[1] / numPDEs)))
# Scale C by the largest magnitude entry in each row
Atilde = scale_rows_by_largest_entry(Atilde)
return Atilde |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.