code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _get_struct_fillstyle(self, shape_number):
"""Get the values for the FILLSTYLE record."""
obj = _make_object("FillStyle")
obj.FillStyleType = style_type = unpack_ui8(self._src)
if style_type == 0x00:
if shape_number <= 2:
obj.Color = self._get_struct_rgb()
else:
obj.Color = self._get_struct_rgba()
if style_type in (0x10, 0x12, 0x13):
obj.GradientMatrix = self._get_struct_matrix()
if style_type in (0x10, 0x12):
obj.Gradient = self._get_struct_gradient(shape_number)
if style_type == 0x13:
obj.Gradient = self._get_struct_focalgradient(shape_number)
if style_type in (0x40, 0x41, 0x42, 0x43):
obj.BitmapId = unpack_ui16(self._src)
obj.BitmapMatrix = self._get_struct_matrix()
return obj | Get the values for the FILLSTYLE record. | Below is the the instruction that describes the task:
### Input:
Get the values for the FILLSTYLE record.
### Response:
def _get_struct_fillstyle(self, shape_number):
"""Get the values for the FILLSTYLE record."""
obj = _make_object("FillStyle")
obj.FillStyleType = style_type = unpack_ui8(self._src)
if style_type == 0x00:
if shape_number <= 2:
obj.Color = self._get_struct_rgb()
else:
obj.Color = self._get_struct_rgba()
if style_type in (0x10, 0x12, 0x13):
obj.GradientMatrix = self._get_struct_matrix()
if style_type in (0x10, 0x12):
obj.Gradient = self._get_struct_gradient(shape_number)
if style_type == 0x13:
obj.Gradient = self._get_struct_focalgradient(shape_number)
if style_type in (0x40, 0x41, 0x42, 0x43):
obj.BitmapId = unpack_ui16(self._src)
obj.BitmapMatrix = self._get_struct_matrix()
return obj |
def register_callback(self, callback):
""" Register a new callback.
Note:
The callback will be executed in the AlarmTask context and for
this reason it should not block, otherwise we can miss block
changes.
"""
if not callable(callback):
raise ValueError('callback is not a callable')
self.callbacks.append(callback) | Register a new callback.
Note:
The callback will be executed in the AlarmTask context and for
this reason it should not block, otherwise we can miss block
changes. | Below is the the instruction that describes the task:
### Input:
Register a new callback.
Note:
The callback will be executed in the AlarmTask context and for
this reason it should not block, otherwise we can miss block
changes.
### Response:
def register_callback(self, callback):
""" Register a new callback.
Note:
The callback will be executed in the AlarmTask context and for
this reason it should not block, otherwise we can miss block
changes.
"""
if not callable(callback):
raise ValueError('callback is not a callable')
self.callbacks.append(callback) |
def compute_consistency_score(returns_test, preds):
"""
Compute Bayesian consistency score.
Parameters
----------
returns_test : pd.Series
Observed cumulative returns.
preds : numpy.array
Multiple (simulated) cumulative returns.
Returns
-------
Consistency score
Score from 100 (returns_test perfectly on the median line of the
Bayesian cone spanned by preds) to 0 (returns_test completely
outside of Bayesian cone.)
"""
returns_test_cum = cum_returns(returns_test, starting_value=1.)
cum_preds = np.cumprod(preds + 1, 1)
q = [sp.stats.percentileofscore(cum_preds[:, i],
returns_test_cum.iloc[i],
kind='weak')
for i in range(len(returns_test_cum))]
# normalize to be from 100 (perfect median line) to 0 (completely outside
# of cone)
return 100 - np.abs(50 - np.mean(q)) / .5 | Compute Bayesian consistency score.
Parameters
----------
returns_test : pd.Series
Observed cumulative returns.
preds : numpy.array
Multiple (simulated) cumulative returns.
Returns
-------
Consistency score
Score from 100 (returns_test perfectly on the median line of the
Bayesian cone spanned by preds) to 0 (returns_test completely
outside of Bayesian cone.) | Below is the the instruction that describes the task:
### Input:
Compute Bayesian consistency score.
Parameters
----------
returns_test : pd.Series
Observed cumulative returns.
preds : numpy.array
Multiple (simulated) cumulative returns.
Returns
-------
Consistency score
Score from 100 (returns_test perfectly on the median line of the
Bayesian cone spanned by preds) to 0 (returns_test completely
outside of Bayesian cone.)
### Response:
def compute_consistency_score(returns_test, preds):
"""
Compute Bayesian consistency score.
Parameters
----------
returns_test : pd.Series
Observed cumulative returns.
preds : numpy.array
Multiple (simulated) cumulative returns.
Returns
-------
Consistency score
Score from 100 (returns_test perfectly on the median line of the
Bayesian cone spanned by preds) to 0 (returns_test completely
outside of Bayesian cone.)
"""
returns_test_cum = cum_returns(returns_test, starting_value=1.)
cum_preds = np.cumprod(preds + 1, 1)
q = [sp.stats.percentileofscore(cum_preds[:, i],
returns_test_cum.iloc[i],
kind='weak')
for i in range(len(returns_test_cum))]
# normalize to be from 100 (perfect median line) to 0 (completely outside
# of cone)
return 100 - np.abs(50 - np.mean(q)) / .5 |
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print '%10s: added %s' % (caller(2), edge)
if not expects:
self.extender(edge)
else:
self.predictor(edge) | Add edge to chart, and see if it extends or predicts another edge. | Below is the the instruction that describes the task:
### Input:
Add edge to chart, and see if it extends or predicts another edge.
### Response:
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print '%10s: added %s' % (caller(2), edge)
if not expects:
self.extender(edge)
else:
self.predictor(edge) |
def _parse_fmt(fmt, color_key='colors', ls_key='linestyles',
marker_key='marker'):
'''Modified from matplotlib's _process_plot_format function.'''
try: # Is fmt just a colorspec?
color = mcolors.colorConverter.to_rgb(fmt)
except ValueError:
pass # No, not just a color.
else:
# Either a color or a numeric marker style
if fmt not in mlines.lineMarkers:
return {color_key:color}
result = dict()
# handle the multi char special cases and strip them from the string
if fmt.find('--') >= 0:
result[ls_key] = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.') >= 0:
result[ls_key] = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ') >= 0:
result[ls_key] = 'None'
fmt = fmt.replace(' ', '')
for c in list(fmt):
if c in mlines.lineStyles:
if ls_key in result:
raise ValueError('Illegal format string; two linestyle symbols')
result[ls_key] = c
elif c in mlines.lineMarkers:
if marker_key in result:
raise ValueError('Illegal format string; two marker symbols')
result[marker_key] = c
elif c in mcolors.colorConverter.colors:
if color_key in result:
raise ValueError('Illegal format string; two color symbols')
result[color_key] = c
else:
raise ValueError('Unrecognized character %c in format string' % c)
return result | Modified from matplotlib's _process_plot_format function. | Below is the the instruction that describes the task:
### Input:
Modified from matplotlib's _process_plot_format function.
### Response:
def _parse_fmt(fmt, color_key='colors', ls_key='linestyles',
marker_key='marker'):
'''Modified from matplotlib's _process_plot_format function.'''
try: # Is fmt just a colorspec?
color = mcolors.colorConverter.to_rgb(fmt)
except ValueError:
pass # No, not just a color.
else:
# Either a color or a numeric marker style
if fmt not in mlines.lineMarkers:
return {color_key:color}
result = dict()
# handle the multi char special cases and strip them from the string
if fmt.find('--') >= 0:
result[ls_key] = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.') >= 0:
result[ls_key] = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ') >= 0:
result[ls_key] = 'None'
fmt = fmt.replace(' ', '')
for c in list(fmt):
if c in mlines.lineStyles:
if ls_key in result:
raise ValueError('Illegal format string; two linestyle symbols')
result[ls_key] = c
elif c in mlines.lineMarkers:
if marker_key in result:
raise ValueError('Illegal format string; two marker symbols')
result[marker_key] = c
elif c in mcolors.colorConverter.colors:
if color_key in result:
raise ValueError('Illegal format string; two color symbols')
result[color_key] = c
else:
raise ValueError('Unrecognized character %c in format string' % c)
return result |
def writes(nb, format, **kwargs):
"""Write a notebook to a string in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
----------
nb : NotebookNode
The notebook to write.
format : (u'json', u'ipynb', u'py')
The format to write the notebook in.
Returns
-------
s : unicode
The notebook string.
"""
format = unicode(format)
if format == u'json' or format == u'ipynb':
return writes_json(nb, **kwargs)
elif format == u'py':
return writes_py(nb, **kwargs)
else:
raise NBFormatError('Unsupported format: %s' % format) | Write a notebook to a string in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
----------
nb : NotebookNode
The notebook to write.
format : (u'json', u'ipynb', u'py')
The format to write the notebook in.
Returns
-------
s : unicode
The notebook string. | Below is the the instruction that describes the task:
### Input:
Write a notebook to a string in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
----------
nb : NotebookNode
The notebook to write.
format : (u'json', u'ipynb', u'py')
The format to write the notebook in.
Returns
-------
s : unicode
The notebook string.
### Response:
def writes(nb, format, **kwargs):
"""Write a notebook to a string in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
----------
nb : NotebookNode
The notebook to write.
format : (u'json', u'ipynb', u'py')
The format to write the notebook in.
Returns
-------
s : unicode
The notebook string.
"""
format = unicode(format)
if format == u'json' or format == u'ipynb':
return writes_json(nb, **kwargs)
elif format == u'py':
return writes_py(nb, **kwargs)
else:
raise NBFormatError('Unsupported format: %s' % format) |
def migrate_v0_rules(self):
'''
Remove any v0 (i.e. pre-010) rules from storage and replace them with v1 rules.
Notes:
v0 had two differences user was a username. Replaced with iden of user as 'iden' field.
Also 'iden' was storage as binary. Now it is stored as hex string.
'''
for iden, valu in self.core.slab.scanByFull(db=self.trigdb):
ruledict = s_msgpack.un(valu)
ver = ruledict.get('ver')
if ver != 0:
continue
user = ruledict.pop('user')
if user is None:
logger.warning('Username missing in stored trigger rule %r', iden)
continue
# In v0, stored user was username, in >0 user is useriden
user = self.core.auth.getUserByName(user).iden
if user is None:
logger.warning('Unrecognized username in stored trigger rule %r', iden)
continue
ruledict['ver'] = 1
ruledict['useriden'] = user
newiden = s_common.ehex(iden)
self.core.slab.pop(iden, db=self.trigdb)
self.core.slab.put(newiden.encode(), s_msgpack.en(ruledict), db=self.trigdb) | Remove any v0 (i.e. pre-010) rules from storage and replace them with v1 rules.
Notes:
v0 had two differences user was a username. Replaced with iden of user as 'iden' field.
Also 'iden' was storage as binary. Now it is stored as hex string. | Below is the the instruction that describes the task:
### Input:
Remove any v0 (i.e. pre-010) rules from storage and replace them with v1 rules.
Notes:
v0 had two differences user was a username. Replaced with iden of user as 'iden' field.
Also 'iden' was storage as binary. Now it is stored as hex string.
### Response:
def migrate_v0_rules(self):
'''
Remove any v0 (i.e. pre-010) rules from storage and replace them with v1 rules.
Notes:
v0 had two differences user was a username. Replaced with iden of user as 'iden' field.
Also 'iden' was storage as binary. Now it is stored as hex string.
'''
for iden, valu in self.core.slab.scanByFull(db=self.trigdb):
ruledict = s_msgpack.un(valu)
ver = ruledict.get('ver')
if ver != 0:
continue
user = ruledict.pop('user')
if user is None:
logger.warning('Username missing in stored trigger rule %r', iden)
continue
# In v0, stored user was username, in >0 user is useriden
user = self.core.auth.getUserByName(user).iden
if user is None:
logger.warning('Unrecognized username in stored trigger rule %r', iden)
continue
ruledict['ver'] = 1
ruledict['useriden'] = user
newiden = s_common.ehex(iden)
self.core.slab.pop(iden, db=self.trigdb)
self.core.slab.put(newiden.encode(), s_msgpack.en(ruledict), db=self.trigdb) |
def make_document(self, titlestring):
"""
This method may be used to create a new document for writing as xml
to the OPS subdirectory of the ePub structure.
"""
#root = etree.XML('''<?xml version="1.0"?>\
#<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.1//EN' 'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'>\
#<html xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml" xmlns:ops="http://www.idpf.org/2007/ops">\
#</html>''')
root = etree.XML('''<?xml version="1.0"?>\
<!DOCTYPE html>\
<html xmlns="http://www.w3.org/1999/xhtml">\
</html>''')
document = etree.ElementTree(root)
html = document.getroot()
head = etree.SubElement(html, 'head')
etree.SubElement(html, 'body')
title = etree.SubElement(head, 'title')
title.text = titlestring
#The href for the css stylesheet is a standin, can be overwritten
etree.SubElement(head,
'link',
{'href': 'css/default.css',
'rel': 'stylesheet',
'type': 'text/css'})
return document | This method may be used to create a new document for writing as xml
to the OPS subdirectory of the ePub structure. | Below is the the instruction that describes the task:
### Input:
This method may be used to create a new document for writing as xml
to the OPS subdirectory of the ePub structure.
### Response:
def make_document(self, titlestring):
"""
This method may be used to create a new document for writing as xml
to the OPS subdirectory of the ePub structure.
"""
#root = etree.XML('''<?xml version="1.0"?>\
#<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.1//EN' 'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'>\
#<html xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml" xmlns:ops="http://www.idpf.org/2007/ops">\
#</html>''')
root = etree.XML('''<?xml version="1.0"?>\
<!DOCTYPE html>\
<html xmlns="http://www.w3.org/1999/xhtml">\
</html>''')
document = etree.ElementTree(root)
html = document.getroot()
head = etree.SubElement(html, 'head')
etree.SubElement(html, 'body')
title = etree.SubElement(head, 'title')
title.text = titlestring
#The href for the css stylesheet is a standin, can be overwritten
etree.SubElement(head,
'link',
{'href': 'css/default.css',
'rel': 'stylesheet',
'type': 'text/css'})
return document |
def delete(ctx, uri):
"""DELETE the specified URI
Example:
\b
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh',
'files/userfs/WEB/python/README.md']}
$ wva delete files/userfs/WEB/python/README.md
''
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh']}
"""
http_client = get_wva(ctx).get_http_client()
cli_pprint(http_client.delete(uri)) | DELETE the specified URI
Example:
\b
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh',
'files/userfs/WEB/python/README.md']}
$ wva delete files/userfs/WEB/python/README.md
''
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh']} | Below is the the instruction that describes the task:
### Input:
DELETE the specified URI
Example:
\b
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh',
'files/userfs/WEB/python/README.md']}
$ wva delete files/userfs/WEB/python/README.md
''
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh']}
### Response:
def delete(ctx, uri):
"""DELETE the specified URI
Example:
\b
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh',
'files/userfs/WEB/python/README.md']}
$ wva delete files/userfs/WEB/python/README.md
''
$ wva get files/userfs/WEB/python
{'file_list': ['files/userfs/WEB/python/.ssh']}
"""
http_client = get_wva(ctx).get_http_client()
cli_pprint(http_client.delete(uri)) |
def unsign(wheelfile):
"""
Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point.
"""
import wheel.install
vzf = wheel.install.VerifyingZipFile(wheelfile, "a")
info = vzf.infolist()
if not (len(info) and info[-1].filename.endswith('/RECORD.jws')):
raise WheelError("RECORD.jws not found at end of archive.")
vzf.pop()
vzf.close() | Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point. | Below is the the instruction that describes the task:
### Input:
Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point.
### Response:
def unsign(wheelfile):
"""
Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point.
"""
import wheel.install
vzf = wheel.install.VerifyingZipFile(wheelfile, "a")
info = vzf.infolist()
if not (len(info) and info[-1].filename.endswith('/RECORD.jws')):
raise WheelError("RECORD.jws not found at end of archive.")
vzf.pop()
vzf.close() |
def serialize_data(data, compression=False, encryption=False, public_key=None):
"""Serializes normal Python datatypes into plaintext using json.
You may also choose to enable compression and encryption when serializing
data to send over the network. Enabling one or both of these options will
incur additional overhead.
Args:
data (dict): The data to convert into plain text using json.
compression (boolean): True or False value on whether or not to compress
the serialized data.
encryption (rsa.encryption): An encryption instance used to encrypt the
message if encryption is desired.
public_key (str): The public key to use to encrypt if encryption is
enabled.
Returns:
The string message serialized using json.
"""
message = json.dumps(data)
if compression:
message = zlib.compress(message)
message = binascii.b2a_base64(message)
if encryption and public_key:
message = encryption.encrypt(message, public_key)
encoded_message = str.encode(message)
return encoded_message | Serializes normal Python datatypes into plaintext using json.
You may also choose to enable compression and encryption when serializing
data to send over the network. Enabling one or both of these options will
incur additional overhead.
Args:
data (dict): The data to convert into plain text using json.
compression (boolean): True or False value on whether or not to compress
the serialized data.
encryption (rsa.encryption): An encryption instance used to encrypt the
message if encryption is desired.
public_key (str): The public key to use to encrypt if encryption is
enabled.
Returns:
The string message serialized using json. | Below is the the instruction that describes the task:
### Input:
Serializes normal Python datatypes into plaintext using json.
You may also choose to enable compression and encryption when serializing
data to send over the network. Enabling one or both of these options will
incur additional overhead.
Args:
data (dict): The data to convert into plain text using json.
compression (boolean): True or False value on whether or not to compress
the serialized data.
encryption (rsa.encryption): An encryption instance used to encrypt the
message if encryption is desired.
public_key (str): The public key to use to encrypt if encryption is
enabled.
Returns:
The string message serialized using json.
### Response:
def serialize_data(data, compression=False, encryption=False, public_key=None):
"""Serializes normal Python datatypes into plaintext using json.
You may also choose to enable compression and encryption when serializing
data to send over the network. Enabling one or both of these options will
incur additional overhead.
Args:
data (dict): The data to convert into plain text using json.
compression (boolean): True or False value on whether or not to compress
the serialized data.
encryption (rsa.encryption): An encryption instance used to encrypt the
message if encryption is desired.
public_key (str): The public key to use to encrypt if encryption is
enabled.
Returns:
The string message serialized using json.
"""
message = json.dumps(data)
if compression:
message = zlib.compress(message)
message = binascii.b2a_base64(message)
if encryption and public_key:
message = encryption.encrypt(message, public_key)
encoded_message = str.encode(message)
return encoded_message |
def get_paths(folder, ignore_endswith=ignore_endswith):
'''Return hologram file paths
Parameters
----------
folder: str or pathlib.Path
Path to search folder
ignore_endswith: list
List of filename ending strings indicating which
files should be ignored.
'''
folder = pathlib.Path(folder).resolve()
files = folder.rglob("*")
for ie in ignore_endswith:
files = [ff for ff in files if not ff.name.endswith(ie)]
return sorted(files) | Return hologram file paths
Parameters
----------
folder: str or pathlib.Path
Path to search folder
ignore_endswith: list
List of filename ending strings indicating which
files should be ignored. | Below is the the instruction that describes the task:
### Input:
Return hologram file paths
Parameters
----------
folder: str or pathlib.Path
Path to search folder
ignore_endswith: list
List of filename ending strings indicating which
files should be ignored.
### Response:
def get_paths(folder, ignore_endswith=ignore_endswith):
'''Return hologram file paths
Parameters
----------
folder: str or pathlib.Path
Path to search folder
ignore_endswith: list
List of filename ending strings indicating which
files should be ignored.
'''
folder = pathlib.Path(folder).resolve()
files = folder.rglob("*")
for ie in ignore_endswith:
files = [ff for ff in files if not ff.name.endswith(ie)]
return sorted(files) |
def diff_identifiers(a, b):
"""Return list of tuples where identifiers in datasets differ.
Tuple structure:
(identifier, present in a, present in b)
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples where identifiers in datasets differ
"""
a_ids = set(a.identifiers)
b_ids = set(b.identifiers)
difference = []
for i in a_ids.difference(b_ids):
difference.append((i, True, False))
for i in b_ids.difference(a_ids):
difference.append((i, False, True))
return difference | Return list of tuples where identifiers in datasets differ.
Tuple structure:
(identifier, present in a, present in b)
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples where identifiers in datasets differ | Below is the the instruction that describes the task:
### Input:
Return list of tuples where identifiers in datasets differ.
Tuple structure:
(identifier, present in a, present in b)
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples where identifiers in datasets differ
### Response:
def diff_identifiers(a, b):
"""Return list of tuples where identifiers in datasets differ.
Tuple structure:
(identifier, present in a, present in b)
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples where identifiers in datasets differ
"""
a_ids = set(a.identifiers)
b_ids = set(b.identifiers)
difference = []
for i in a_ids.difference(b_ids):
difference.append((i, True, False))
for i in b_ids.difference(a_ids):
difference.append((i, False, True))
return difference |
def __create_grid(self):
"""!
@brief Creates CLIQUE grid that consists of CLIQUE blocks for clustering process.
"""
data_sizes, min_corner, max_corner = self.__get_data_size_derscription()
dimension = len(self.__data[0])
cell_sizes = [dimension_length / self.__amount_intervals for dimension_length in data_sizes]
self.__cells = [clique_block() for _ in range(pow(self.__amount_intervals, dimension))]
iterator = coordinate_iterator(dimension, self.__amount_intervals)
point_availability = [True] * len(self.__data)
self.__cell_map = {}
for index_cell in range(len(self.__cells)):
logical_location = iterator.get_coordinate()
iterator.increment()
self.__cells[index_cell].logical_location = logical_location[:]
cur_max_corner, cur_min_corner = self.__get_spatial_location(logical_location, min_corner, max_corner, cell_sizes)
self.__cells[index_cell].spatial_location = spatial_block(cur_max_corner, cur_min_corner)
self.__cells[index_cell].capture_points(self.__data, point_availability)
self.__cell_map[self.__location_to_key(logical_location)] = self.__cells[index_cell] | !
@brief Creates CLIQUE grid that consists of CLIQUE blocks for clustering process. | Below is the the instruction that describes the task:
### Input:
!
@brief Creates CLIQUE grid that consists of CLIQUE blocks for clustering process.
### Response:
def __create_grid(self):
"""!
@brief Creates CLIQUE grid that consists of CLIQUE blocks for clustering process.
"""
data_sizes, min_corner, max_corner = self.__get_data_size_derscription()
dimension = len(self.__data[0])
cell_sizes = [dimension_length / self.__amount_intervals for dimension_length in data_sizes]
self.__cells = [clique_block() for _ in range(pow(self.__amount_intervals, dimension))]
iterator = coordinate_iterator(dimension, self.__amount_intervals)
point_availability = [True] * len(self.__data)
self.__cell_map = {}
for index_cell in range(len(self.__cells)):
logical_location = iterator.get_coordinate()
iterator.increment()
self.__cells[index_cell].logical_location = logical_location[:]
cur_max_corner, cur_min_corner = self.__get_spatial_location(logical_location, min_corner, max_corner, cell_sizes)
self.__cells[index_cell].spatial_location = spatial_block(cur_max_corner, cur_min_corner)
self.__cells[index_cell].capture_points(self.__data, point_availability)
self.__cell_map[self.__location_to_key(logical_location)] = self.__cells[index_cell] |
def hex_to_xy(self, h):
"""Converts hexadecimal colors represented as a String to approximate CIE
1931 x and y coordinates.
"""
rgb = self.color.hex_to_rgb(h)
return self.rgb_to_xy(rgb[0], rgb[1], rgb[2]) | Converts hexadecimal colors represented as a String to approximate CIE
1931 x and y coordinates. | Below is the the instruction that describes the task:
### Input:
Converts hexadecimal colors represented as a String to approximate CIE
1931 x and y coordinates.
### Response:
def hex_to_xy(self, h):
"""Converts hexadecimal colors represented as a String to approximate CIE
1931 x and y coordinates.
"""
rgb = self.color.hex_to_rgb(h)
return self.rgb_to_xy(rgb[0], rgb[1], rgb[2]) |
def get_backend():
"""Get backend."""
backend = getattr(settings, 'SIMDITOR_IMAGE_BACKEND', None)
if backend == 'pillow':
from simditor.image import pillow_backend as backend
else:
from simditor.image import dummy_backend as backend
return backend | Get backend. | Below is the the instruction that describes the task:
### Input:
Get backend.
### Response:
def get_backend():
"""Get backend."""
backend = getattr(settings, 'SIMDITOR_IMAGE_BACKEND', None)
if backend == 'pillow':
from simditor.image import pillow_backend as backend
else:
from simditor.image import dummy_backend as backend
return backend |
def get(msg_or_dict, key, default=_SENTINEL):
"""Retrieve a key's value from a protobuf Message or dictionary.
Args:
mdg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to retrieve from the object.
default (Any): If the key is not present on the object, and a default
is set, returns that default instead. A type-appropriate falsy
default is generally recommended, as protobuf messages almost
always have default values for unset values and it is not always
possible to tell the difference between a falsy value and an
unset one. If no default is set then :class:`KeyError` will be
raised if the key is not present in the object.
Returns:
Any: The return value from the underlying Message or dict.
Raises:
KeyError: If the key is not found. Note that, for unset values,
messages and dictionaries may not have consistent behavior.
TypeError: If ``msg_or_dict`` is not a Message or Mapping.
"""
# We may need to get a nested key. Resolve this.
key, subkey = _resolve_subkeys(key)
# Attempt to get the value from the two types of objects we know about.
# If we get something else, complain.
if isinstance(msg_or_dict, message.Message):
answer = getattr(msg_or_dict, key, default)
elif isinstance(msg_or_dict, collections_abc.Mapping):
answer = msg_or_dict.get(key, default)
else:
raise TypeError(
"get() expected a dict or protobuf message, got {!r}.".format(
type(msg_or_dict)
)
)
# If the object we got back is our sentinel, raise KeyError; this is
# a "not found" case.
if answer is _SENTINEL:
raise KeyError(key)
# If a subkey exists, call this method recursively against the answer.
if subkey is not None and answer is not default:
return get(answer, subkey, default=default)
return answer | Retrieve a key's value from a protobuf Message or dictionary.
Args:
mdg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to retrieve from the object.
default (Any): If the key is not present on the object, and a default
is set, returns that default instead. A type-appropriate falsy
default is generally recommended, as protobuf messages almost
always have default values for unset values and it is not always
possible to tell the difference between a falsy value and an
unset one. If no default is set then :class:`KeyError` will be
raised if the key is not present in the object.
Returns:
Any: The return value from the underlying Message or dict.
Raises:
KeyError: If the key is not found. Note that, for unset values,
messages and dictionaries may not have consistent behavior.
TypeError: If ``msg_or_dict`` is not a Message or Mapping. | Below is the the instruction that describes the task:
### Input:
Retrieve a key's value from a protobuf Message or dictionary.
Args:
mdg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to retrieve from the object.
default (Any): If the key is not present on the object, and a default
is set, returns that default instead. A type-appropriate falsy
default is generally recommended, as protobuf messages almost
always have default values for unset values and it is not always
possible to tell the difference between a falsy value and an
unset one. If no default is set then :class:`KeyError` will be
raised if the key is not present in the object.
Returns:
Any: The return value from the underlying Message or dict.
Raises:
KeyError: If the key is not found. Note that, for unset values,
messages and dictionaries may not have consistent behavior.
TypeError: If ``msg_or_dict`` is not a Message or Mapping.
### Response:
def get(msg_or_dict, key, default=_SENTINEL):
"""Retrieve a key's value from a protobuf Message or dictionary.
Args:
mdg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to retrieve from the object.
default (Any): If the key is not present on the object, and a default
is set, returns that default instead. A type-appropriate falsy
default is generally recommended, as protobuf messages almost
always have default values for unset values and it is not always
possible to tell the difference between a falsy value and an
unset one. If no default is set then :class:`KeyError` will be
raised if the key is not present in the object.
Returns:
Any: The return value from the underlying Message or dict.
Raises:
KeyError: If the key is not found. Note that, for unset values,
messages and dictionaries may not have consistent behavior.
TypeError: If ``msg_or_dict`` is not a Message or Mapping.
"""
# We may need to get a nested key. Resolve this.
key, subkey = _resolve_subkeys(key)
# Attempt to get the value from the two types of objects we know about.
# If we get something else, complain.
if isinstance(msg_or_dict, message.Message):
answer = getattr(msg_or_dict, key, default)
elif isinstance(msg_or_dict, collections_abc.Mapping):
answer = msg_or_dict.get(key, default)
else:
raise TypeError(
"get() expected a dict or protobuf message, got {!r}.".format(
type(msg_or_dict)
)
)
# If the object we got back is our sentinel, raise KeyError; this is
# a "not found" case.
if answer is _SENTINEL:
raise KeyError(key)
# If a subkey exists, call this method recursively against the answer.
if subkey is not None and answer is not default:
return get(answer, subkey, default=default)
return answer |
def declareLegacyItem(typeName, schemaVersion, attributes, dummyBases=()):
"""
Generate a dummy subclass of Item that will have the given attributes,
and the base Item methods, but no methods of its own. This is for use
with upgrading.
@param typeName: a string, the Axiom TypeName to have attributes for.
@param schemaVersion: an int, the (old) version of the schema this is a proxy
for.
@param attributes: a dict mapping {columnName: attr instance} describing
the schema of C{typeName} at C{schemaVersion}.
@param dummyBases: a sequence of 4-tuples of (baseTypeName,
baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases
of this legacy class.
"""
if (typeName, schemaVersion) in _legacyTypes:
return _legacyTypes[typeName, schemaVersion]
if dummyBases:
realBases = [declareLegacyItem(*A) for A in dummyBases]
else:
realBases = (Item,)
attributes = attributes.copy()
attributes['__module__'] = 'item_dummy'
attributes['__legacy__'] = True
attributes['typeName'] = typeName
attributes['schemaVersion'] = schemaVersion
result = type(str('DummyItem<%s,%d>' % (typeName, schemaVersion)),
realBases,
attributes)
assert result is not None, 'wtf, %r' % (type,)
_legacyTypes[(typeName, schemaVersion)] = result
return result | Generate a dummy subclass of Item that will have the given attributes,
and the base Item methods, but no methods of its own. This is for use
with upgrading.
@param typeName: a string, the Axiom TypeName to have attributes for.
@param schemaVersion: an int, the (old) version of the schema this is a proxy
for.
@param attributes: a dict mapping {columnName: attr instance} describing
the schema of C{typeName} at C{schemaVersion}.
@param dummyBases: a sequence of 4-tuples of (baseTypeName,
baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases
of this legacy class. | Below is the the instruction that describes the task:
### Input:
Generate a dummy subclass of Item that will have the given attributes,
and the base Item methods, but no methods of its own. This is for use
with upgrading.
@param typeName: a string, the Axiom TypeName to have attributes for.
@param schemaVersion: an int, the (old) version of the schema this is a proxy
for.
@param attributes: a dict mapping {columnName: attr instance} describing
the schema of C{typeName} at C{schemaVersion}.
@param dummyBases: a sequence of 4-tuples of (baseTypeName,
baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases
of this legacy class.
### Response:
def declareLegacyItem(typeName, schemaVersion, attributes, dummyBases=()):
"""
Generate a dummy subclass of Item that will have the given attributes,
and the base Item methods, but no methods of its own. This is for use
with upgrading.
@param typeName: a string, the Axiom TypeName to have attributes for.
@param schemaVersion: an int, the (old) version of the schema this is a proxy
for.
@param attributes: a dict mapping {columnName: attr instance} describing
the schema of C{typeName} at C{schemaVersion}.
@param dummyBases: a sequence of 4-tuples of (baseTypeName,
baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases
of this legacy class.
"""
if (typeName, schemaVersion) in _legacyTypes:
return _legacyTypes[typeName, schemaVersion]
if dummyBases:
realBases = [declareLegacyItem(*A) for A in dummyBases]
else:
realBases = (Item,)
attributes = attributes.copy()
attributes['__module__'] = 'item_dummy'
attributes['__legacy__'] = True
attributes['typeName'] = typeName
attributes['schemaVersion'] = schemaVersion
result = type(str('DummyItem<%s,%d>' % (typeName, schemaVersion)),
realBases,
attributes)
assert result is not None, 'wtf, %r' % (type,)
_legacyTypes[(typeName, schemaVersion)] = result
return result |
def set_coeffs(self, values, ls, ms):
"""
Set spherical harmonic coefficients in-place to specified values.
Usage
-----
x.set_coeffs(values, ls, ms)
Parameters
----------
values : float (list)
The value(s) of the spherical harmonic coefficient(s).
ls : int (list)
The degree(s) of the coefficient(s) that should be set.
ms : int (list)
The order(s) of the coefficient(s) that should be set. Positive
and negative values correspond to the cosine and sine
components, respectively.
Examples
--------
x.set_coeffs(10., 1, 1) # x.coeffs[0, 1, 1] = 10.
x.set_coeffs(5., 1, -1) # x.coeffs[1, 1, 1] = 5.
x.set_coeffs([1., 2], [1, 2], [0, -2]) # x.coeffs[0, 1, 0] = 1.
# x.coeffs[1, 2, 2] = 2.
"""
# Ensure that the type is correct
values = _np.array(values)
ls = _np.array(ls)
ms = _np.array(ms)
mneg_mask = (ms < 0).astype(_np.int)
self.coeffs[mneg_mask, ls, _np.abs(ms)] = values | Set spherical harmonic coefficients in-place to specified values.
Usage
-----
x.set_coeffs(values, ls, ms)
Parameters
----------
values : float (list)
The value(s) of the spherical harmonic coefficient(s).
ls : int (list)
The degree(s) of the coefficient(s) that should be set.
ms : int (list)
The order(s) of the coefficient(s) that should be set. Positive
and negative values correspond to the cosine and sine
components, respectively.
Examples
--------
x.set_coeffs(10., 1, 1) # x.coeffs[0, 1, 1] = 10.
x.set_coeffs(5., 1, -1) # x.coeffs[1, 1, 1] = 5.
x.set_coeffs([1., 2], [1, 2], [0, -2]) # x.coeffs[0, 1, 0] = 1.
# x.coeffs[1, 2, 2] = 2. | Below is the the instruction that describes the task:
### Input:
Set spherical harmonic coefficients in-place to specified values.
Usage
-----
x.set_coeffs(values, ls, ms)
Parameters
----------
values : float (list)
The value(s) of the spherical harmonic coefficient(s).
ls : int (list)
The degree(s) of the coefficient(s) that should be set.
ms : int (list)
The order(s) of the coefficient(s) that should be set. Positive
and negative values correspond to the cosine and sine
components, respectively.
Examples
--------
x.set_coeffs(10., 1, 1) # x.coeffs[0, 1, 1] = 10.
x.set_coeffs(5., 1, -1) # x.coeffs[1, 1, 1] = 5.
x.set_coeffs([1., 2], [1, 2], [0, -2]) # x.coeffs[0, 1, 0] = 1.
# x.coeffs[1, 2, 2] = 2.
### Response:
def set_coeffs(self, values, ls, ms):
"""
Set spherical harmonic coefficients in-place to specified values.
Usage
-----
x.set_coeffs(values, ls, ms)
Parameters
----------
values : float (list)
The value(s) of the spherical harmonic coefficient(s).
ls : int (list)
The degree(s) of the coefficient(s) that should be set.
ms : int (list)
The order(s) of the coefficient(s) that should be set. Positive
and negative values correspond to the cosine and sine
components, respectively.
Examples
--------
x.set_coeffs(10., 1, 1) # x.coeffs[0, 1, 1] = 10.
x.set_coeffs(5., 1, -1) # x.coeffs[1, 1, 1] = 5.
x.set_coeffs([1., 2], [1, 2], [0, -2]) # x.coeffs[0, 1, 0] = 1.
# x.coeffs[1, 2, 2] = 2.
"""
# Ensure that the type is correct
values = _np.array(values)
ls = _np.array(ls)
ms = _np.array(ms)
mneg_mask = (ms < 0).astype(_np.int)
self.coeffs[mneg_mask, ls, _np.abs(ms)] = values |
def iterate(self, iterable, element_timeout=None):
"""
Iterate over an iterable.
The iterator is executed in the host thread. The threads dynamically
grab the elements. The iterator elements must hence be picklable to
be transferred through the queue.
If there is only one thread, no special operations are performed.
Otherwise, effectively n-1 threads are used to process the iterable
elements, and the host thread is used to provide them.
You can specify a timeout for the clients to adhere.
"""
self._assert_active()
with self._queuelock:
# Get this loop id.
self._thread_loop_ids[self._thread_num] += 1
loop_id = self._thread_loop_ids[self._thread_num]
# Iterate.
return _IterableQueueIterator(
self._iter_queue, loop_id, self, iterable, element_timeout
) | Iterate over an iterable.
The iterator is executed in the host thread. The threads dynamically
grab the elements. The iterator elements must hence be picklable to
be transferred through the queue.
If there is only one thread, no special operations are performed.
Otherwise, effectively n-1 threads are used to process the iterable
elements, and the host thread is used to provide them.
You can specify a timeout for the clients to adhere. | Below is the the instruction that describes the task:
### Input:
Iterate over an iterable.
The iterator is executed in the host thread. The threads dynamically
grab the elements. The iterator elements must hence be picklable to
be transferred through the queue.
If there is only one thread, no special operations are performed.
Otherwise, effectively n-1 threads are used to process the iterable
elements, and the host thread is used to provide them.
You can specify a timeout for the clients to adhere.
### Response:
def iterate(self, iterable, element_timeout=None):
"""
Iterate over an iterable.
The iterator is executed in the host thread. The threads dynamically
grab the elements. The iterator elements must hence be picklable to
be transferred through the queue.
If there is only one thread, no special operations are performed.
Otherwise, effectively n-1 threads are used to process the iterable
elements, and the host thread is used to provide them.
You can specify a timeout for the clients to adhere.
"""
self._assert_active()
with self._queuelock:
# Get this loop id.
self._thread_loop_ids[self._thread_num] += 1
loop_id = self._thread_loop_ids[self._thread_num]
# Iterate.
return _IterableQueueIterator(
self._iter_queue, loop_id, self, iterable, element_timeout
) |
def remove_network(self, action, n_name, **kwargs):
"""
Removes a network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param n_name: Network name or id.
:type n_name: unicode | str
:param kwargs: Additional keyword arguments.
:type kwargs: dict
"""
c_kwargs = self.get_network_remove_kwargs(action, n_name, **kwargs)
res = action.client.remove_network(**c_kwargs)
del self._policy.network_names[action.client_name][n_name]
return res | Removes a network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param n_name: Network name or id.
:type n_name: unicode | str
:param kwargs: Additional keyword arguments.
:type kwargs: dict | Below is the the instruction that describes the task:
### Input:
Removes a network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param n_name: Network name or id.
:type n_name: unicode | str
:param kwargs: Additional keyword arguments.
:type kwargs: dict
### Response:
def remove_network(self, action, n_name, **kwargs):
"""
Removes a network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param n_name: Network name or id.
:type n_name: unicode | str
:param kwargs: Additional keyword arguments.
:type kwargs: dict
"""
c_kwargs = self.get_network_remove_kwargs(action, n_name, **kwargs)
res = action.client.remove_network(**c_kwargs)
del self._policy.network_names[action.client_name][n_name]
return res |
def get_partition_hash(self):
"""
Returns partition hash calculated for serialized object.
Partition hash is used to determine partition of a Data and is calculated using
* PartitioningStrategy during serialization.
* If partition hash is not set then hash_code() is used.
:return: partition hash
"""
if self.has_partition_hash():
return unpack_from(FMT_BE_INT, self._buffer, PARTITION_HASH_OFFSET)[0]
return self.hash_code() | Returns partition hash calculated for serialized object.
Partition hash is used to determine partition of a Data and is calculated using
* PartitioningStrategy during serialization.
* If partition hash is not set then hash_code() is used.
:return: partition hash | Below is the the instruction that describes the task:
### Input:
Returns partition hash calculated for serialized object.
Partition hash is used to determine partition of a Data and is calculated using
* PartitioningStrategy during serialization.
* If partition hash is not set then hash_code() is used.
:return: partition hash
### Response:
def get_partition_hash(self):
"""
Returns partition hash calculated for serialized object.
Partition hash is used to determine partition of a Data and is calculated using
* PartitioningStrategy during serialization.
* If partition hash is not set then hash_code() is used.
:return: partition hash
"""
if self.has_partition_hash():
return unpack_from(FMT_BE_INT, self._buffer, PARTITION_HASH_OFFSET)[0]
return self.hash_code() |
def traverse_setter(obj, attribute, value):
"""
Traverses the object and sets the supplied attribute on the
object. Supports Dimensioned and DimensionedPlot types.
"""
obj.traverse(lambda x: setattr(x, attribute, value)) | Traverses the object and sets the supplied attribute on the
object. Supports Dimensioned and DimensionedPlot types. | Below is the the instruction that describes the task:
### Input:
Traverses the object and sets the supplied attribute on the
object. Supports Dimensioned and DimensionedPlot types.
### Response:
def traverse_setter(obj, attribute, value):
"""
Traverses the object and sets the supplied attribute on the
object. Supports Dimensioned and DimensionedPlot types.
"""
obj.traverse(lambda x: setattr(x, attribute, value)) |
def restart_agent(self, agent_id, **kwargs):
'''tells the host agent running in this agency to restart the agent.'''
host_medium = self.get_medium('host_agent')
agent = host_medium.get_agent()
d = host_medium.get_document(agent_id)
# This is done like this on purpose, we want to ensure that document
# exists before passing it to the agent (even though he would handle
# this himself).
d.addCallback(
lambda desc: agent.start_agent(desc.doc_id, **kwargs))
return d | tells the host agent running in this agency to restart the agent. | Below is the the instruction that describes the task:
### Input:
tells the host agent running in this agency to restart the agent.
### Response:
def restart_agent(self, agent_id, **kwargs):
'''tells the host agent running in this agency to restart the agent.'''
host_medium = self.get_medium('host_agent')
agent = host_medium.get_agent()
d = host_medium.get_document(agent_id)
# This is done like this on purpose, we want to ensure that document
# exists before passing it to the agent (even though he would handle
# this himself).
d.addCallback(
lambda desc: agent.start_agent(desc.doc_id, **kwargs))
return d |
def cut_psf(psf_data, psf_size):
"""
cut the psf properly
:param psf_data: image of PSF
:param psf_size: size of psf
:return: re-sized and re-normalized PSF
"""
kernel = image_util.cut_edges(psf_data, psf_size)
kernel = kernel_norm(kernel)
return kernel | cut the psf properly
:param psf_data: image of PSF
:param psf_size: size of psf
:return: re-sized and re-normalized PSF | Below is the the instruction that describes the task:
### Input:
cut the psf properly
:param psf_data: image of PSF
:param psf_size: size of psf
:return: re-sized and re-normalized PSF
### Response:
def cut_psf(psf_data, psf_size):
"""
cut the psf properly
:param psf_data: image of PSF
:param psf_size: size of psf
:return: re-sized and re-normalized PSF
"""
kernel = image_util.cut_edges(psf_data, psf_size)
kernel = kernel_norm(kernel)
return kernel |
def get_rows(self):
""" Returns the name of the rows of the extension"""
possible_dataframes = ['F', 'FY', 'M', 'S',
'D_cba', 'D_pba', 'D_imp', 'D_exp',
'D_cba_reg', 'D_pba_reg',
'D_imp_reg', 'D_exp_reg',
'D_cba_cap', 'D_pba_cap',
'D_imp_cap', 'D_exp_cap', ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
return getattr(self, df).index.get_values()
else:
logging.warn("No attributes available to get row names")
return None | Returns the name of the rows of the extension | Below is the the instruction that describes the task:
### Input:
Returns the name of the rows of the extension
### Response:
def get_rows(self):
""" Returns the name of the rows of the extension"""
possible_dataframes = ['F', 'FY', 'M', 'S',
'D_cba', 'D_pba', 'D_imp', 'D_exp',
'D_cba_reg', 'D_pba_reg',
'D_imp_reg', 'D_exp_reg',
'D_cba_cap', 'D_pba_cap',
'D_imp_cap', 'D_exp_cap', ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
return getattr(self, df).index.get_values()
else:
logging.warn("No attributes available to get row names")
return None |
def headers(self):
""" Returns a list of the last HTTP response headers.
Header keys are normalized to capitalized form, as in `User-Agent`.
"""
headers = self.conn.issue_command("Headers")
res = []
for header in headers.split("\r"):
key, value = header.split(": ", 1)
for line in value.split("\n"):
res.append((_normalize_header(key), line))
return res | Returns a list of the last HTTP response headers.
Header keys are normalized to capitalized form, as in `User-Agent`. | Below is the the instruction that describes the task:
### Input:
Returns a list of the last HTTP response headers.
Header keys are normalized to capitalized form, as in `User-Agent`.
### Response:
def headers(self):
""" Returns a list of the last HTTP response headers.
Header keys are normalized to capitalized form, as in `User-Agent`.
"""
headers = self.conn.issue_command("Headers")
res = []
for header in headers.split("\r"):
key, value = header.split(": ", 1)
for line in value.split("\n"):
res.append((_normalize_header(key), line))
return res |
def noise_plot(signal, noise, normalise=False, **kwargs):
"""
Plot signal and noise fourier transforms and the difference.
:type signal: `obspy.core.stream.Stream`
:param signal: Stream of "signal" window
:type noise: `obspy.core.stream.Stream`
:param noise: Stream of the "noise" window.
:type normalise: bool
:param normalise: Whether to normalise the data before plotting or not.
:return: `matplotlib.pyplot.Figure`
"""
import matplotlib.pyplot as plt
# Work out how many traces we can plot
n_traces = 0
for tr in signal:
try:
noise.select(id=tr.id)[0]
except IndexError: # pragma: no cover
continue
n_traces += 1
fig, axes = plt.subplots(n_traces, 2, sharex=True)
if len(signal) > 1:
axes = axes.ravel()
i = 0
lines = []
labels = []
for tr in signal:
try:
noise_tr = noise.select(id=tr.id)[0]
except IndexError: # pragma: no cover
continue
ax1 = axes[i]
ax2 = axes[i + 1]
fft_len = fftpack.next_fast_len(
max(noise_tr.stats.npts, tr.stats.npts))
if not normalise:
signal_fft = fftpack.rfft(tr.data, fft_len)
noise_fft = fftpack.rfft(noise_tr.data, fft_len)
else:
signal_fft = fftpack.rfft(tr.data / max(tr.data), fft_len)
noise_fft = fftpack.rfft(
noise_tr.data / max(noise_tr.data), fft_len)
frequencies = np.linspace(0, 1 / (2 * tr.stats.delta), fft_len // 2)
noise_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2]),
'k', label="noise")
signal_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2]),
'r', label="signal")
if "signal" not in labels:
labels.append("signal")
lines.append(signal_line)
if "noise" not in labels:
labels.append("noise")
lines.append(noise_line)
ax1.set_ylabel(tr.id, rotation=0, horizontalalignment='right')
ax2.plot(
frequencies,
(2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2])) -
(2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2])), 'k')
ax2.yaxis.tick_right()
ax2.set_ylim(bottom=0)
i += 2
axes[-1].set_xlabel("Frequency (Hz)")
axes[-2].set_xlabel("Frequency (Hz)")
axes[0].set_title("Spectra")
axes[1].set_title("Signal - noise")
plt.figlegend(lines, labels, 'upper left')
plt.tight_layout()
plt.subplots_adjust(hspace=0)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | Plot signal and noise fourier transforms and the difference.
:type signal: `obspy.core.stream.Stream`
:param signal: Stream of "signal" window
:type noise: `obspy.core.stream.Stream`
:param noise: Stream of the "noise" window.
:type normalise: bool
:param normalise: Whether to normalise the data before plotting or not.
:return: `matplotlib.pyplot.Figure` | Below is the the instruction that describes the task:
### Input:
Plot signal and noise fourier transforms and the difference.
:type signal: `obspy.core.stream.Stream`
:param signal: Stream of "signal" window
:type noise: `obspy.core.stream.Stream`
:param noise: Stream of the "noise" window.
:type normalise: bool
:param normalise: Whether to normalise the data before plotting or not.
:return: `matplotlib.pyplot.Figure`
### Response:
def noise_plot(signal, noise, normalise=False, **kwargs):
"""
Plot signal and noise fourier transforms and the difference.
:type signal: `obspy.core.stream.Stream`
:param signal: Stream of "signal" window
:type noise: `obspy.core.stream.Stream`
:param noise: Stream of the "noise" window.
:type normalise: bool
:param normalise: Whether to normalise the data before plotting or not.
:return: `matplotlib.pyplot.Figure`
"""
import matplotlib.pyplot as plt
# Work out how many traces we can plot
n_traces = 0
for tr in signal:
try:
noise.select(id=tr.id)[0]
except IndexError: # pragma: no cover
continue
n_traces += 1
fig, axes = plt.subplots(n_traces, 2, sharex=True)
if len(signal) > 1:
axes = axes.ravel()
i = 0
lines = []
labels = []
for tr in signal:
try:
noise_tr = noise.select(id=tr.id)[0]
except IndexError: # pragma: no cover
continue
ax1 = axes[i]
ax2 = axes[i + 1]
fft_len = fftpack.next_fast_len(
max(noise_tr.stats.npts, tr.stats.npts))
if not normalise:
signal_fft = fftpack.rfft(tr.data, fft_len)
noise_fft = fftpack.rfft(noise_tr.data, fft_len)
else:
signal_fft = fftpack.rfft(tr.data / max(tr.data), fft_len)
noise_fft = fftpack.rfft(
noise_tr.data / max(noise_tr.data), fft_len)
frequencies = np.linspace(0, 1 / (2 * tr.stats.delta), fft_len // 2)
noise_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2]),
'k', label="noise")
signal_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2]),
'r', label="signal")
if "signal" not in labels:
labels.append("signal")
lines.append(signal_line)
if "noise" not in labels:
labels.append("noise")
lines.append(noise_line)
ax1.set_ylabel(tr.id, rotation=0, horizontalalignment='right')
ax2.plot(
frequencies,
(2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2])) -
(2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2])), 'k')
ax2.yaxis.tick_right()
ax2.set_ylim(bottom=0)
i += 2
axes[-1].set_xlabel("Frequency (Hz)")
axes[-2].set_xlabel("Frequency (Hz)")
axes[0].set_title("Spectra")
axes[1].set_title("Signal - noise")
plt.figlegend(lines, labels, 'upper left')
plt.tight_layout()
plt.subplots_adjust(hspace=0)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig |
def getAnalogActionData(self, action, unActionDataSize, ulRestrictToDevice):
"""
Reads the state of an analog action given its handle. This will return VRInputError_WrongType if the type of
action is something other than analog
"""
fn = self.function_table.getAnalogActionData
pActionData = InputAnalogActionData_t()
result = fn(action, byref(pActionData), unActionDataSize, ulRestrictToDevice)
return result, pActionData | Reads the state of an analog action given its handle. This will return VRInputError_WrongType if the type of
action is something other than analog | Below is the the instruction that describes the task:
### Input:
Reads the state of an analog action given its handle. This will return VRInputError_WrongType if the type of
action is something other than analog
### Response:
def getAnalogActionData(self, action, unActionDataSize, ulRestrictToDevice):
"""
Reads the state of an analog action given its handle. This will return VRInputError_WrongType if the type of
action is something other than analog
"""
fn = self.function_table.getAnalogActionData
pActionData = InputAnalogActionData_t()
result = fn(action, byref(pActionData), unActionDataSize, ulRestrictToDevice)
return result, pActionData |
def traverse(obj, target:str, default=nodefault, executable:bool=False, separator:str='.', protect:bool=True):
"""Traverse down an object, using getattr or getitem.
If ``executable`` is ``True`` any executable function encountered will be, with no arguments. Traversal will
continue on the result of that call. You can change the separator as desired, i.e. to a '/'.
By default attributes (but not array elements) prefixed with an underscore are taboo. They will not resolve,
raising a LookupError.
Certain allowances are made: if a 'path segment' is numerical, it's treated as an array index. If attribute
lookup fails, it will re-try on that object using array notation and continue from there. This makes lookup
very flexible.
"""
# TODO: Support numerical slicing, i.e. ``1:4``, or even just ``:-1`` and things.
assert check_argument_types()
value = obj
remainder = target
if not target:
return obj
while separator:
name, separator, remainder = remainder.partition(separator)
numeric = name.lstrip('-').isdigit()
try:
if numeric or (protect and name.startswith('_')):
raise AttributeError()
value = getattr(value, name)
if executable and callable(value):
value = value()
except AttributeError:
try:
value = value[int(name) if numeric else name]
except (KeyError, TypeError):
if default is nodefault:
raise LookupError("Could not resolve '" + target + "' on: " + repr(obj))
return default
return value | Traverse down an object, using getattr or getitem.
If ``executable`` is ``True`` any executable function encountered will be, with no arguments. Traversal will
continue on the result of that call. You can change the separator as desired, i.e. to a '/'.
By default attributes (but not array elements) prefixed with an underscore are taboo. They will not resolve,
raising a LookupError.
Certain allowances are made: if a 'path segment' is numerical, it's treated as an array index. If attribute
lookup fails, it will re-try on that object using array notation and continue from there. This makes lookup
very flexible. | Below is the the instruction that describes the task:
### Input:
Traverse down an object, using getattr or getitem.
If ``executable`` is ``True`` any executable function encountered will be, with no arguments. Traversal will
continue on the result of that call. You can change the separator as desired, i.e. to a '/'.
By default attributes (but not array elements) prefixed with an underscore are taboo. They will not resolve,
raising a LookupError.
Certain allowances are made: if a 'path segment' is numerical, it's treated as an array index. If attribute
lookup fails, it will re-try on that object using array notation and continue from there. This makes lookup
very flexible.
### Response:
def traverse(obj, target:str, default=nodefault, executable:bool=False, separator:str='.', protect:bool=True):
"""Traverse down an object, using getattr or getitem.
If ``executable`` is ``True`` any executable function encountered will be, with no arguments. Traversal will
continue on the result of that call. You can change the separator as desired, i.e. to a '/'.
By default attributes (but not array elements) prefixed with an underscore are taboo. They will not resolve,
raising a LookupError.
Certain allowances are made: if a 'path segment' is numerical, it's treated as an array index. If attribute
lookup fails, it will re-try on that object using array notation and continue from there. This makes lookup
very flexible.
"""
# TODO: Support numerical slicing, i.e. ``1:4``, or even just ``:-1`` and things.
assert check_argument_types()
value = obj
remainder = target
if not target:
return obj
while separator:
name, separator, remainder = remainder.partition(separator)
numeric = name.lstrip('-').isdigit()
try:
if numeric or (protect and name.startswith('_')):
raise AttributeError()
value = getattr(value, name)
if executable and callable(value):
value = value()
except AttributeError:
try:
value = value[int(name) if numeric else name]
except (KeyError, TypeError):
if default is nodefault:
raise LookupError("Could not resolve '" + target + "' on: " + repr(obj))
return default
return value |
def _set_dst_vtep_ip(self, v, load=False):
"""
Setter method for dst_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/dst_vtep_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_vtep_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_vtep_ip() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip-host", parent=self, choice=(u'choice-dst-vtep-ip', u'case-dst-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst vtep ip address: A.B.C.D', u'alt-name': u'dst-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dst_vtep_ip must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip-host", parent=self, choice=(u'choice-dst-vtep-ip', u'case-dst-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst vtep ip address: A.B.C.D', u'alt-name': u'dst-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True)""",
})
self.__dst_vtep_ip = t
if hasattr(self, '_set'):
self._set() | Setter method for dst_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/dst_vtep_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_vtep_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_vtep_ip() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for dst_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/dst_vtep_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_vtep_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_vtep_ip() directly.
### Response:
def _set_dst_vtep_ip(self, v, load=False):
"""
Setter method for dst_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/dst_vtep_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_vtep_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_vtep_ip() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip-host", parent=self, choice=(u'choice-dst-vtep-ip', u'case-dst-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst vtep ip address: A.B.C.D', u'alt-name': u'dst-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dst_vtep_ip must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip-host", parent=self, choice=(u'choice-dst-vtep-ip', u'case-dst-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst vtep ip address: A.B.C.D', u'alt-name': u'dst-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True)""",
})
self.__dst_vtep_ip = t
if hasattr(self, '_set'):
self._set() |
def fix_e502(self, result):
"""Remove extraneous escape of newline."""
(line_index, _, target) = get_index_offset_contents(result,
self.source)
self.source[line_index] = target.rstrip('\n\r \t\\') + '\n' | Remove extraneous escape of newline. | Below is the the instruction that describes the task:
### Input:
Remove extraneous escape of newline.
### Response:
def fix_e502(self, result):
"""Remove extraneous escape of newline."""
(line_index, _, target) = get_index_offset_contents(result,
self.source)
self.source[line_index] = target.rstrip('\n\r \t\\') + '\n' |
def find_revision_number(self, revision=None):
"""Find the local revision number of the given revision."""
# Make sure the local repository exists.
self.create()
# Try to find the revision number of the specified revision.
revision = self.expand_branch_name(revision)
output = self.context.capture('git', 'rev-list', revision, '--count')
if not (output and output.isdigit()):
msg = "Failed to find local revision number! ('git rev-list --count' gave unexpected output)"
raise ValueError(msg)
return int(output) | Find the local revision number of the given revision. | Below is the the instruction that describes the task:
### Input:
Find the local revision number of the given revision.
### Response:
def find_revision_number(self, revision=None):
"""Find the local revision number of the given revision."""
# Make sure the local repository exists.
self.create()
# Try to find the revision number of the specified revision.
revision = self.expand_branch_name(revision)
output = self.context.capture('git', 'rev-list', revision, '--count')
if not (output and output.isdigit()):
msg = "Failed to find local revision number! ('git rev-list --count' gave unexpected output)"
raise ValueError(msg)
return int(output) |
def task(self, _fn=None, queue=None, hard_timeout=None, unique=None,
lock=None, lock_key=None, retry=None, retry_on=None,
retry_method=None, schedule=None, batch=False,
max_queue_size=None):
"""
Function decorator that defines the behavior of the function when it is
used as a task. To use the default behavior, tasks don't need to be
decorated.
See README.rst for an explanation of the options.
"""
def _delay(func):
def _delay_inner(*args, **kwargs):
return self.delay(func, args=args, kwargs=kwargs)
return _delay_inner
# Periodic tasks are unique.
if schedule is not None:
unique = True
def _wrap(func):
if hard_timeout is not None:
func._task_hard_timeout = hard_timeout
if queue is not None:
func._task_queue = queue
if unique is not None:
func._task_unique = unique
if lock is not None:
func._task_lock = lock
if lock_key is not None:
func._task_lock_key = lock_key
if retry is not None:
func._task_retry = retry
if retry_on is not None:
func._task_retry_on = retry_on
if retry_method is not None:
func._task_retry_method = retry_method
if batch is not None:
func._task_batch = batch
if schedule is not None:
func._task_schedule = schedule
if max_queue_size is not None:
func._task_max_queue_size = max_queue_size
func.delay = _delay(func)
if schedule is not None:
serialized_func = serialize_func_name(func)
assert serialized_func not in self.periodic_task_funcs, \
"attempted duplicate registration of periodic task"
self.periodic_task_funcs[serialized_func] = func
return func
return _wrap if _fn is None else _wrap(_fn) | Function decorator that defines the behavior of the function when it is
used as a task. To use the default behavior, tasks don't need to be
decorated.
See README.rst for an explanation of the options. | Below is the the instruction that describes the task:
### Input:
Function decorator that defines the behavior of the function when it is
used as a task. To use the default behavior, tasks don't need to be
decorated.
See README.rst for an explanation of the options.
### Response:
def task(self, _fn=None, queue=None, hard_timeout=None, unique=None,
lock=None, lock_key=None, retry=None, retry_on=None,
retry_method=None, schedule=None, batch=False,
max_queue_size=None):
"""
Function decorator that defines the behavior of the function when it is
used as a task. To use the default behavior, tasks don't need to be
decorated.
See README.rst for an explanation of the options.
"""
def _delay(func):
def _delay_inner(*args, **kwargs):
return self.delay(func, args=args, kwargs=kwargs)
return _delay_inner
# Periodic tasks are unique.
if schedule is not None:
unique = True
def _wrap(func):
if hard_timeout is not None:
func._task_hard_timeout = hard_timeout
if queue is not None:
func._task_queue = queue
if unique is not None:
func._task_unique = unique
if lock is not None:
func._task_lock = lock
if lock_key is not None:
func._task_lock_key = lock_key
if retry is not None:
func._task_retry = retry
if retry_on is not None:
func._task_retry_on = retry_on
if retry_method is not None:
func._task_retry_method = retry_method
if batch is not None:
func._task_batch = batch
if schedule is not None:
func._task_schedule = schedule
if max_queue_size is not None:
func._task_max_queue_size = max_queue_size
func.delay = _delay(func)
if schedule is not None:
serialized_func = serialize_func_name(func)
assert serialized_func not in self.periodic_task_funcs, \
"attempted duplicate registration of periodic task"
self.periodic_task_funcs[serialized_func] = func
return func
return _wrap if _fn is None else _wrap(_fn) |
def expire(self, time=None):
"""Remove expired items from the cache."""
if time is None:
time = self.__timer()
root = self.__root
curr = root.next
links = self.__links
cache_delitem = Cache.__delitem__
while curr is not root and curr.expire < time:
cache_delitem(self, curr.key)
del links[curr.key]
next = curr.next
curr.unlink()
curr = next | Remove expired items from the cache. | Below is the the instruction that describes the task:
### Input:
Remove expired items from the cache.
### Response:
def expire(self, time=None):
"""Remove expired items from the cache."""
if time is None:
time = self.__timer()
root = self.__root
curr = root.next
links = self.__links
cache_delitem = Cache.__delitem__
while curr is not root and curr.expire < time:
cache_delitem(self, curr.key)
del links[curr.key]
next = curr.next
curr.unlink()
curr = next |
def load_yaml(path):
# type: (str) -> OrderedDict
"""Load YAML file into an ordered dictionary
Args:
path (str): Path to YAML file
Returns:
OrderedDict: Ordered dictionary containing loaded YAML file
"""
with open(path, 'rt') as f:
yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader)
if not yamldict:
raise (LoadError('YAML file: %s is empty!' % path))
return yamldict | Load YAML file into an ordered dictionary
Args:
path (str): Path to YAML file
Returns:
OrderedDict: Ordered dictionary containing loaded YAML file | Below is the the instruction that describes the task:
### Input:
Load YAML file into an ordered dictionary
Args:
path (str): Path to YAML file
Returns:
OrderedDict: Ordered dictionary containing loaded YAML file
### Response:
def load_yaml(path):
# type: (str) -> OrderedDict
"""Load YAML file into an ordered dictionary
Args:
path (str): Path to YAML file
Returns:
OrderedDict: Ordered dictionary containing loaded YAML file
"""
with open(path, 'rt') as f:
yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader)
if not yamldict:
raise (LoadError('YAML file: %s is empty!' % path))
return yamldict |
def update_privilege(self, obj, target):
'''Get privileges from metadata of the source in s3, and apply them to target'''
if 'privilege' in obj['Metadata']:
os.chmod(target, int(obj['Metadata']['privilege'], 8)) | Get privileges from metadata of the source in s3, and apply them to target | Below is the the instruction that describes the task:
### Input:
Get privileges from metadata of the source in s3, and apply them to target
### Response:
def update_privilege(self, obj, target):
'''Get privileges from metadata of the source in s3, and apply them to target'''
if 'privilege' in obj['Metadata']:
os.chmod(target, int(obj['Metadata']['privilege'], 8)) |
def find_children(self, tag=None, namespace=None):
"""Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
:param tag: str (optional) The desired tag
:param namespace: str (optional) The desired namespace
:return: A list of elements whose tag and/or namespace match the
parameters values
"""
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results | Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
:param tag: str (optional) The desired tag
:param namespace: str (optional) The desired namespace
:return: A list of elements whose tag and/or namespace match the
parameters values | Below is the the instruction that describes the task:
### Input:
Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
:param tag: str (optional) The desired tag
:param namespace: str (optional) The desired namespace
:return: A list of elements whose tag and/or namespace match the
parameters values
### Response:
def find_children(self, tag=None, namespace=None):
"""Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
:param tag: str (optional) The desired tag
:param namespace: str (optional) The desired namespace
:return: A list of elements whose tag and/or namespace match the
parameters values
"""
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results |
def samaccountname(self, base_dn, distinguished_name):
"""Retrieve the sAMAccountName for a specific DistinguishedName
:param str base_dn: The base DN to search within
:param list distinguished_name: The base DN to search within
:param list attributes: Object attributes to populate, defaults to all
:return: A populated ADUser object
:rtype: ADUser
"""
mappings = self.samaccountnames(base_dn, [distinguished_name])
try:
# Usually we will find a match, but perhaps not always
return mappings[distinguished_name]
except KeyError:
logging.info("%s - unable to retrieve object from AD by DistinguishedName",
distinguished_name) | Retrieve the sAMAccountName for a specific DistinguishedName
:param str base_dn: The base DN to search within
:param list distinguished_name: The base DN to search within
:param list attributes: Object attributes to populate, defaults to all
:return: A populated ADUser object
:rtype: ADUser | Below is the the instruction that describes the task:
### Input:
Retrieve the sAMAccountName for a specific DistinguishedName
:param str base_dn: The base DN to search within
:param list distinguished_name: The base DN to search within
:param list attributes: Object attributes to populate, defaults to all
:return: A populated ADUser object
:rtype: ADUser
### Response:
def samaccountname(self, base_dn, distinguished_name):
"""Retrieve the sAMAccountName for a specific DistinguishedName
:param str base_dn: The base DN to search within
:param list distinguished_name: The base DN to search within
:param list attributes: Object attributes to populate, defaults to all
:return: A populated ADUser object
:rtype: ADUser
"""
mappings = self.samaccountnames(base_dn, [distinguished_name])
try:
# Usually we will find a match, but perhaps not always
return mappings[distinguished_name]
except KeyError:
logging.info("%s - unable to retrieve object from AD by DistinguishedName",
distinguished_name) |
def avail_locations(call=None):
'''
List all available locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
conn = get_conn(service='SoftLayer_Product_Package')
locations = conn.getLocations(id=50)
for location in locations:
ret[location['id']] = {
'id': location['id'],
'name': location['name'],
'location': location['longName'],
}
available = conn.getAvailableLocations(id=50)
for location in available:
if location.get('isAvailable', 0) is 0:
continue
ret[location['locationId']]['available'] = True
return ret | List all available locations | Below is the the instruction that describes the task:
### Input:
List all available locations
### Response:
def avail_locations(call=None):
'''
List all available locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
conn = get_conn(service='SoftLayer_Product_Package')
locations = conn.getLocations(id=50)
for location in locations:
ret[location['id']] = {
'id': location['id'],
'name': location['name'],
'location': location['longName'],
}
available = conn.getAvailableLocations(id=50)
for location in available:
if location.get('isAvailable', 0) is 0:
continue
ret[location['locationId']]['available'] = True
return ret |
def validate_file(parser, arg):
"""Validates that `arg` is a valid file."""
if not os.path.isfile(arg):
parser.error("%s is not a file." % arg)
return arg | Validates that `arg` is a valid file. | Below is the the instruction that describes the task:
### Input:
Validates that `arg` is a valid file.
### Response:
def validate_file(parser, arg):
"""Validates that `arg` is a valid file."""
if not os.path.isfile(arg):
parser.error("%s is not a file." % arg)
return arg |
def ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y):
# type: (typing.Sequence[canmatrix.ArbitrationId], typing.Sequence[int], typing.Sequence[canmatrix.ArbitrationId], typing.Sequence[int]) -> typing.Iterable[typing.Tuple[canmatrix.ArbitrationId, canmatrix.ArbitrationId]]
"""Yield arbitration ids which has the same pgn."""
for id_a, pgn_a in zip(id_x, pgn_x):
for id_b, pgn_b in zip(id_y, pgn_y):
if pgn_a == pgn_b:
yield (id_a, id_b) | Yield arbitration ids which has the same pgn. | Below is the the instruction that describes the task:
### Input:
Yield arbitration ids which has the same pgn.
### Response:
def ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y):
# type: (typing.Sequence[canmatrix.ArbitrationId], typing.Sequence[int], typing.Sequence[canmatrix.ArbitrationId], typing.Sequence[int]) -> typing.Iterable[typing.Tuple[canmatrix.ArbitrationId, canmatrix.ArbitrationId]]
"""Yield arbitration ids which has the same pgn."""
for id_a, pgn_a in zip(id_x, pgn_x):
for id_b, pgn_b in zip(id_y, pgn_y):
if pgn_a == pgn_b:
yield (id_a, id_b) |
def set_wheel_mode(self, ids):
""" Sets the specified motors to wheel mode. """
self.set_control_mode(dict(zip(ids, itertools.repeat('wheel')))) | Sets the specified motors to wheel mode. | Below is the the instruction that describes the task:
### Input:
Sets the specified motors to wheel mode.
### Response:
def set_wheel_mode(self, ids):
""" Sets the specified motors to wheel mode. """
self.set_control_mode(dict(zip(ids, itertools.repeat('wheel')))) |
def server_bind(self):
"""Override of TCPServer.server_bind() that tracks bind-time assigned random ports."""
TCPServer.server_bind(self)
_, self.server_port = self.socket.getsockname()[:2] | Override of TCPServer.server_bind() that tracks bind-time assigned random ports. | Below is the the instruction that describes the task:
### Input:
Override of TCPServer.server_bind() that tracks bind-time assigned random ports.
### Response:
def server_bind(self):
"""Override of TCPServer.server_bind() that tracks bind-time assigned random ports."""
TCPServer.server_bind(self)
_, self.server_port = self.socket.getsockname()[:2] |
def find_repo_by_path(i):
"""
Input: {
path - path to repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
}
"""
p=i['path']
if p!='': p=os.path.normpath(p)
found=False
if p==work['dir_default_repo']:
uoa=cfg['repo_name_default']
uid=cfg['repo_uid_default']
alias=uoa
found=True
elif p==work['dir_local_repo']:
uoa=cfg['repo_name_local']
uid=cfg['repo_uid_local']
alias=uoa
found=True
else:
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
for q in cache_repo_info:
qq=cache_repo_info[q]
if p==qq['dict'].get('path',''):
uoa=qq['data_uoa']
uid=qq['data_uid']
alias=uid
if not is_uid(uoa): alias=uoa
found=True
break
if not found:
return {'return':16, 'error': 'repository not found in this path'}
return {'return':0, 'repo_uoa': uoa, 'repo_uid': uid, 'repo_alias':alias} | Input: {
path - path to repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
} | Below is the the instruction that describes the task:
### Input:
Input: {
path - path to repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
}
### Response:
def find_repo_by_path(i):
"""
Input: {
path - path to repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
}
"""
p=i['path']
if p!='': p=os.path.normpath(p)
found=False
if p==work['dir_default_repo']:
uoa=cfg['repo_name_default']
uid=cfg['repo_uid_default']
alias=uoa
found=True
elif p==work['dir_local_repo']:
uoa=cfg['repo_name_local']
uid=cfg['repo_uid_local']
alias=uoa
found=True
else:
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
for q in cache_repo_info:
qq=cache_repo_info[q]
if p==qq['dict'].get('path',''):
uoa=qq['data_uoa']
uid=qq['data_uid']
alias=uid
if not is_uid(uoa): alias=uoa
found=True
break
if not found:
return {'return':16, 'error': 'repository not found in this path'}
return {'return':0, 'repo_uoa': uoa, 'repo_uid': uid, 'repo_alias':alias} |
def tokenize_annotated(doc, annotation):
"""Tokenize a document and add an annotation attribute to each token
"""
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens | Tokenize a document and add an annotation attribute to each token | Below is the the instruction that describes the task:
### Input:
Tokenize a document and add an annotation attribute to each token
### Response:
def tokenize_annotated(doc, annotation):
"""Tokenize a document and add an annotation attribute to each token
"""
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens |
def create_cfg_segment(filename, filecontent, description, auth, url):
"""
Takes a str into var filecontent which represents the entire content of a configuration
segment, or partial configuration file. Takes a str into var description which represents the
description of the configuration segment
:param filename: str containing the name of the configuration segment.
:param filecontent: str containing the entire contents of the configuration segment
:param description: str contrianing the description of the configuration segment
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: If successful, Boolena of type True
:rtype: Boolean
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.icc import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> filecontent = 'sample file content'
>>> create_new_file = create_cfg_segment('CW7SNMP.cfg',
filecontent,
'My New Template',
auth.creds,
auth.url)
>>> template_id = get_template_id('CW7SNMP.cfg', auth.creds, auth.url)
>>> assert type(template_id) is str
>>>
"""
payload = {"confFileName": filename,
"confFileType": "2",
"cfgFileParent": "-1",
"confFileDesc": description,
"content": filecontent}
f_url = url + "/imcrs/icc/confFile"
response = requests.post(f_url, data=(json.dumps(payload)), auth=auth, headers=HEADERS)
try:
if response.status_code == 201:
print("Template successfully created")
return response.status_code
elif response.status_code is not 201:
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " create_cfg_segment: An Error has occured" | Takes a str into var filecontent which represents the entire content of a configuration
segment, or partial configuration file. Takes a str into var description which represents the
description of the configuration segment
:param filename: str containing the name of the configuration segment.
:param filecontent: str containing the entire contents of the configuration segment
:param description: str contrianing the description of the configuration segment
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: If successful, Boolena of type True
:rtype: Boolean
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.icc import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> filecontent = 'sample file content'
>>> create_new_file = create_cfg_segment('CW7SNMP.cfg',
filecontent,
'My New Template',
auth.creds,
auth.url)
>>> template_id = get_template_id('CW7SNMP.cfg', auth.creds, auth.url)
>>> assert type(template_id) is str
>>> | Below is the the instruction that describes the task:
### Input:
Takes a str into var filecontent which represents the entire content of a configuration
segment, or partial configuration file. Takes a str into var description which represents the
description of the configuration segment
:param filename: str containing the name of the configuration segment.
:param filecontent: str containing the entire contents of the configuration segment
:param description: str contrianing the description of the configuration segment
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: If successful, Boolena of type True
:rtype: Boolean
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.icc import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> filecontent = 'sample file content'
>>> create_new_file = create_cfg_segment('CW7SNMP.cfg',
filecontent,
'My New Template',
auth.creds,
auth.url)
>>> template_id = get_template_id('CW7SNMP.cfg', auth.creds, auth.url)
>>> assert type(template_id) is str
>>>
### Response:
def create_cfg_segment(filename, filecontent, description, auth, url):
"""
Takes a str into var filecontent which represents the entire content of a configuration
segment, or partial configuration file. Takes a str into var description which represents the
description of the configuration segment
:param filename: str containing the name of the configuration segment.
:param filecontent: str containing the entire contents of the configuration segment
:param description: str contrianing the description of the configuration segment
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: If successful, Boolena of type True
:rtype: Boolean
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.icc import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> filecontent = 'sample file content'
>>> create_new_file = create_cfg_segment('CW7SNMP.cfg',
filecontent,
'My New Template',
auth.creds,
auth.url)
>>> template_id = get_template_id('CW7SNMP.cfg', auth.creds, auth.url)
>>> assert type(template_id) is str
>>>
"""
payload = {"confFileName": filename,
"confFileType": "2",
"cfgFileParent": "-1",
"confFileDesc": description,
"content": filecontent}
f_url = url + "/imcrs/icc/confFile"
response = requests.post(f_url, data=(json.dumps(payload)), auth=auth, headers=HEADERS)
try:
if response.status_code == 201:
print("Template successfully created")
return response.status_code
elif response.status_code is not 201:
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " create_cfg_segment: An Error has occured" |
def create_new_label_by_content_id(self, content_id, label_names, callback=None):
"""
Adds a list of labels to the specified content.
:param content_id (string): A string containing the id of the labels content container.
:param label_names (list): A list of labels (strings) to apply to the content.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/label endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
"""
assert isinstance(label_names, list)
assert all(isinstance(ln, dict) and set(ln.keys()) == {"prefix", "name"} for ln in label_names)
return self._service_post_request("rest/api/content/{id}/label".format(id=content_id),
data=json.dumps(label_names), headers={"Content-Type": "application/json"},
callback=callback) | Adds a list of labels to the specified content.
:param content_id (string): A string containing the id of the labels content container.
:param label_names (list): A list of labels (strings) to apply to the content.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/label endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially. | Below is the the instruction that describes the task:
### Input:
Adds a list of labels to the specified content.
:param content_id (string): A string containing the id of the labels content container.
:param label_names (list): A list of labels (strings) to apply to the content.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/label endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
### Response:
def create_new_label_by_content_id(self, content_id, label_names, callback=None):
"""
Adds a list of labels to the specified content.
:param content_id (string): A string containing the id of the labels content container.
:param label_names (list): A list of labels (strings) to apply to the content.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/label endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
"""
assert isinstance(label_names, list)
assert all(isinstance(ln, dict) and set(ln.keys()) == {"prefix", "name"} for ln in label_names)
return self._service_post_request("rest/api/content/{id}/label".format(id=content_id),
data=json.dumps(label_names), headers={"Content-Type": "application/json"},
callback=callback) |
def urlToIds(url):
"""
Resolve a ``join.skype.com`` URL and returns various identifiers for the group conversation.
Args:
url (str): public join URL, or identifier from it
Returns:
dict: related conversation's identifiers -- keys: ``id``, ``long``, ``blob``
"""
urlId = url.split("/")[-1]
convUrl = "https://join.skype.com/api/v2/conversation/"
json = SkypeConnection.externalCall("POST", convUrl, json={"shortId": urlId, "type": "wl"}).json()
return {"id": json.get("Resource"),
"long": json.get("Id"),
"blob": json.get("ChatBlob")} | Resolve a ``join.skype.com`` URL and returns various identifiers for the group conversation.
Args:
url (str): public join URL, or identifier from it
Returns:
dict: related conversation's identifiers -- keys: ``id``, ``long``, ``blob`` | Below is the the instruction that describes the task:
### Input:
Resolve a ``join.skype.com`` URL and returns various identifiers for the group conversation.
Args:
url (str): public join URL, or identifier from it
Returns:
dict: related conversation's identifiers -- keys: ``id``, ``long``, ``blob``
### Response:
def urlToIds(url):
"""
Resolve a ``join.skype.com`` URL and returns various identifiers for the group conversation.
Args:
url (str): public join URL, or identifier from it
Returns:
dict: related conversation's identifiers -- keys: ``id``, ``long``, ``blob``
"""
urlId = url.split("/")[-1]
convUrl = "https://join.skype.com/api/v2/conversation/"
json = SkypeConnection.externalCall("POST", convUrl, json={"shortId": urlId, "type": "wl"}).json()
return {"id": json.get("Resource"),
"long": json.get("Id"),
"blob": json.get("ChatBlob")} |
def copyCurrentLayout(self, sourceViewSUID, targetViewSUID, body, verbose=None):
"""
Copy one network view layout onto another, setting the node location and view scale to match. This makes visually comparing networks simple.
:param sourceViewSUID: Source network view SUID (or "current")
:param targetViewSUID: Target network view SUID (or "current")
:param body: Clone the specified network view layout onto another network view -- Not required, can be None
:param verbose: print more
:returns: 200: successful operation; 404: Network View does not exist
"""
response=api(url=self.___url+'apply/layouts/copycat/'+str(sourceViewSUID)+'/'+str(targetViewSUID)+'', method="PUT", body=body, verbose=verbose)
return response | Copy one network view layout onto another, setting the node location and view scale to match. This makes visually comparing networks simple.
:param sourceViewSUID: Source network view SUID (or "current")
:param targetViewSUID: Target network view SUID (or "current")
:param body: Clone the specified network view layout onto another network view -- Not required, can be None
:param verbose: print more
:returns: 200: successful operation; 404: Network View does not exist | Below is the the instruction that describes the task:
### Input:
Copy one network view layout onto another, setting the node location and view scale to match. This makes visually comparing networks simple.
:param sourceViewSUID: Source network view SUID (or "current")
:param targetViewSUID: Target network view SUID (or "current")
:param body: Clone the specified network view layout onto another network view -- Not required, can be None
:param verbose: print more
:returns: 200: successful operation; 404: Network View does not exist
### Response:
def copyCurrentLayout(self, sourceViewSUID, targetViewSUID, body, verbose=None):
"""
Copy one network view layout onto another, setting the node location and view scale to match. This makes visually comparing networks simple.
:param sourceViewSUID: Source network view SUID (or "current")
:param targetViewSUID: Target network view SUID (or "current")
:param body: Clone the specified network view layout onto another network view -- Not required, can be None
:param verbose: print more
:returns: 200: successful operation; 404: Network View does not exist
"""
response=api(url=self.___url+'apply/layouts/copycat/'+str(sourceViewSUID)+'/'+str(targetViewSUID)+'', method="PUT", body=body, verbose=verbose)
return response |
def log_error(msg, logger="TaskLogger"):
"""Log an ERROR message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
"""
tasklogger = get_tasklogger(logger)
tasklogger.error(msg)
return tasklogger | Log an ERROR message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger | Below is the the instruction that describes the task:
### Input:
Log an ERROR message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
### Response:
def log_error(msg, logger="TaskLogger"):
"""Log an ERROR message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
"""
tasklogger = get_tasklogger(logger)
tasklogger.error(msg)
return tasklogger |
def remove_duplicates(vector_tuple):
"""
Remove duplicates rows from N equally-sized arrays
"""
array = np.column_stack(vector_tuple)
a = np.ascontiguousarray(array)
unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))
b = unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
return list(b.T) | Remove duplicates rows from N equally-sized arrays | Below is the the instruction that describes the task:
### Input:
Remove duplicates rows from N equally-sized arrays
### Response:
def remove_duplicates(vector_tuple):
"""
Remove duplicates rows from N equally-sized arrays
"""
array = np.column_stack(vector_tuple)
a = np.ascontiguousarray(array)
unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))
b = unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
return list(b.T) |
def update_storage_policy(policy, policy_dict, service_instance=None):
'''
Updates a storage policy.
Supported capability types: scalar, set, range.
policy
Name of the policy to update.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.update_storage_policy policy='policy name'
policy_dict="$policy_dict"
'''
log.trace('updating storage policy, dict = %s', policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy])
if not policies:
raise VMwareObjectRetrievalError('Policy \'{0}\' was not found'
''.format(policy))
policy_ref = policies[0]
policy_update_spec = pbm.profile.CapabilityBasedProfileUpdateSpec()
log.trace('Setting policy values in policy_update_spec')
for prop in ['description', 'constraints']:
setattr(policy_update_spec, prop, getattr(policy_ref, prop))
_apply_policy_config(policy_update_spec, policy_dict)
salt.utils.pbm.update_storage_policy(profile_manager, policy_ref,
policy_update_spec)
return {'update_storage_policy': True} | Updates a storage policy.
Supported capability types: scalar, set, range.
policy
Name of the policy to update.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.update_storage_policy policy='policy name'
policy_dict="$policy_dict" | Below is the the instruction that describes the task:
### Input:
Updates a storage policy.
Supported capability types: scalar, set, range.
policy
Name of the policy to update.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.update_storage_policy policy='policy name'
policy_dict="$policy_dict"
### Response:
def update_storage_policy(policy, policy_dict, service_instance=None):
'''
Updates a storage policy.
Supported capability types: scalar, set, range.
policy
Name of the policy to update.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.update_storage_policy policy='policy name'
policy_dict="$policy_dict"
'''
log.trace('updating storage policy, dict = %s', policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy])
if not policies:
raise VMwareObjectRetrievalError('Policy \'{0}\' was not found'
''.format(policy))
policy_ref = policies[0]
policy_update_spec = pbm.profile.CapabilityBasedProfileUpdateSpec()
log.trace('Setting policy values in policy_update_spec')
for prop in ['description', 'constraints']:
setattr(policy_update_spec, prop, getattr(policy_ref, prop))
_apply_policy_config(policy_update_spec, policy_dict)
salt.utils.pbm.update_storage_policy(profile_manager, policy_ref,
policy_update_spec)
return {'update_storage_policy': True} |
def _hash(self, obj, parent, parents_ids=EMPTY_FROZENSET):
"""The main diff method"""
try:
result = self[obj]
except (TypeError, KeyError):
pass
else:
return result
result = not_hashed
if self._skip_this(obj, parent):
return
elif obj is None:
result = 'NONE'
elif isinstance(obj, strings):
result = prepare_string_for_hashing(
obj, ignore_string_type_changes=self.ignore_string_type_changes,
ignore_string_case=self.ignore_string_case)
elif isinstance(obj, numbers):
result = self._prep_number(obj)
elif isinstance(obj, MutableMapping):
result = self._prep_dict(obj=obj, parent=parent, parents_ids=parents_ids)
elif isinstance(obj, tuple):
result = self._prep_tuple(obj=obj, parent=parent, parents_ids=parents_ids)
elif isinstance(obj, Iterable):
result = self._prep_iterable(obj=obj, parent=parent, parents_ids=parents_ids)
else:
result = self._prep_obj(obj=obj, parent=parent, parents_ids=parents_ids)
if result is not_hashed: # pragma: no cover
self[UNPROCESSED].append(obj)
elif result is unprocessed:
pass
elif self.apply_hash:
if isinstance(obj, strings):
result_cleaned = result
else:
result_cleaned = prepare_string_for_hashing(
result, ignore_string_type_changes=self.ignore_string_type_changes,
ignore_string_case=self.ignore_string_case)
result = self.hasher(result_cleaned)
# It is important to keep the hash of all objects.
# The hashes will be later used for comparing the objects.
try:
self[obj] = result
except TypeError:
obj_id = get_id(obj)
self[obj_id] = result
return result | The main diff method | Below is the the instruction that describes the task:
### Input:
The main diff method
### Response:
def _hash(self, obj, parent, parents_ids=EMPTY_FROZENSET):
"""The main diff method"""
try:
result = self[obj]
except (TypeError, KeyError):
pass
else:
return result
result = not_hashed
if self._skip_this(obj, parent):
return
elif obj is None:
result = 'NONE'
elif isinstance(obj, strings):
result = prepare_string_for_hashing(
obj, ignore_string_type_changes=self.ignore_string_type_changes,
ignore_string_case=self.ignore_string_case)
elif isinstance(obj, numbers):
result = self._prep_number(obj)
elif isinstance(obj, MutableMapping):
result = self._prep_dict(obj=obj, parent=parent, parents_ids=parents_ids)
elif isinstance(obj, tuple):
result = self._prep_tuple(obj=obj, parent=parent, parents_ids=parents_ids)
elif isinstance(obj, Iterable):
result = self._prep_iterable(obj=obj, parent=parent, parents_ids=parents_ids)
else:
result = self._prep_obj(obj=obj, parent=parent, parents_ids=parents_ids)
if result is not_hashed: # pragma: no cover
self[UNPROCESSED].append(obj)
elif result is unprocessed:
pass
elif self.apply_hash:
if isinstance(obj, strings):
result_cleaned = result
else:
result_cleaned = prepare_string_for_hashing(
result, ignore_string_type_changes=self.ignore_string_type_changes,
ignore_string_case=self.ignore_string_case)
result = self.hasher(result_cleaned)
# It is important to keep the hash of all objects.
# The hashes will be later used for comparing the objects.
try:
self[obj] = result
except TypeError:
obj_id = get_id(obj)
self[obj_id] = result
return result |
def _format_firewall_stdout(cmd_ret):
'''
Helper function to format the stdout from the get_firewall_status function.
cmd_ret
The return dictionary that comes from a cmd.run_all call.
'''
ret_dict = {'success': True,
'rulesets': {}}
for line in cmd_ret['stdout'].splitlines():
if line.startswith('Name'):
continue
if line.startswith('---'):
continue
ruleset_status = line.split()
ret_dict['rulesets'][ruleset_status[0]] = bool(ruleset_status[1])
return ret_dict | Helper function to format the stdout from the get_firewall_status function.
cmd_ret
The return dictionary that comes from a cmd.run_all call. | Below is the the instruction that describes the task:
### Input:
Helper function to format the stdout from the get_firewall_status function.
cmd_ret
The return dictionary that comes from a cmd.run_all call.
### Response:
def _format_firewall_stdout(cmd_ret):
'''
Helper function to format the stdout from the get_firewall_status function.
cmd_ret
The return dictionary that comes from a cmd.run_all call.
'''
ret_dict = {'success': True,
'rulesets': {}}
for line in cmd_ret['stdout'].splitlines():
if line.startswith('Name'):
continue
if line.startswith('---'):
continue
ruleset_status = line.split()
ret_dict['rulesets'][ruleset_status[0]] = bool(ruleset_status[1])
return ret_dict |
def _array_setitem_with_key_seq(self, array_name, index, key_seq, value):
"""
Sets a the array value in the TOML file located by the given key sequence.
Example:
self._array_setitem(array_name, index, ('key1', 'key2', 'key3'), 'text_value')
is equivalent to doing
self.array(array_name)[index]['key1']['key2']['key3'] = 'text_value'
"""
table = self.array(array_name)[index]
key_so_far = tuple()
for key in key_seq[:-1]:
key_so_far += (key,)
new_table = self._array_make_sure_table_exists(array_name, index, key_so_far)
if new_table is not None:
table = new_table
else:
table = table[key]
table[key_seq[-1]] = value | Sets a the array value in the TOML file located by the given key sequence.
Example:
self._array_setitem(array_name, index, ('key1', 'key2', 'key3'), 'text_value')
is equivalent to doing
self.array(array_name)[index]['key1']['key2']['key3'] = 'text_value' | Below is the the instruction that describes the task:
### Input:
Sets a the array value in the TOML file located by the given key sequence.
Example:
self._array_setitem(array_name, index, ('key1', 'key2', 'key3'), 'text_value')
is equivalent to doing
self.array(array_name)[index]['key1']['key2']['key3'] = 'text_value'
### Response:
def _array_setitem_with_key_seq(self, array_name, index, key_seq, value):
"""
Sets a the array value in the TOML file located by the given key sequence.
Example:
self._array_setitem(array_name, index, ('key1', 'key2', 'key3'), 'text_value')
is equivalent to doing
self.array(array_name)[index]['key1']['key2']['key3'] = 'text_value'
"""
table = self.array(array_name)[index]
key_so_far = tuple()
for key in key_seq[:-1]:
key_so_far += (key,)
new_table = self._array_make_sure_table_exists(array_name, index, key_so_far)
if new_table is not None:
table = new_table
else:
table = table[key]
table[key_seq[-1]] = value |
def get_turicreate_object_type(url):
'''
Given url where a Turi Create object is persisted, return the Turi
Create object type: 'model', 'graph', 'sframe', or 'sarray'
'''
from .._connect import main as _glconnect
ret = _glconnect.get_unity().get_turicreate_object_type(_make_internal_url(url))
# to be consistent, we use sgraph instead of graph here
if ret == 'graph':
ret = 'sgraph'
return ret | Given url where a Turi Create object is persisted, return the Turi
Create object type: 'model', 'graph', 'sframe', or 'sarray' | Below is the the instruction that describes the task:
### Input:
Given url where a Turi Create object is persisted, return the Turi
Create object type: 'model', 'graph', 'sframe', or 'sarray'
### Response:
def get_turicreate_object_type(url):
'''
Given url where a Turi Create object is persisted, return the Turi
Create object type: 'model', 'graph', 'sframe', or 'sarray'
'''
from .._connect import main as _glconnect
ret = _glconnect.get_unity().get_turicreate_object_type(_make_internal_url(url))
# to be consistent, we use sgraph instead of graph here
if ret == 'graph':
ret = 'sgraph'
return ret |
def unapply_patch(self, patch_name, force=False):
""" Unapply patches up to patch_name. patch_name will end up as top
patch """
self._check(force)
patches = self.db.patches_after(Patch(patch_name))
for patch in reversed(patches):
self._unapply_patch(patch)
self.db.save()
self.unapplied(self.db.top_patch()) | Unapply patches up to patch_name. patch_name will end up as top
patch | Below is the the instruction that describes the task:
### Input:
Unapply patches up to patch_name. patch_name will end up as top
patch
### Response:
def unapply_patch(self, patch_name, force=False):
""" Unapply patches up to patch_name. patch_name will end up as top
patch """
self._check(force)
patches = self.db.patches_after(Patch(patch_name))
for patch in reversed(patches):
self._unapply_patch(patch)
self.db.save()
self.unapplied(self.db.top_patch()) |
def compile_mako_files(self, app_config):
'''Compiles the Mako templates within the apps of this system'''
# go through the files in the templates, scripts, and styles directories
for subdir_name in self.SEARCH_DIRS:
subdir = subdir_name.format(
app_path=app_config.path,
app_name=app_config.name,
)
def recurse_path(path):
self.message('searching for Mako templates in {}'.format(path), 1)
if os.path.exists(path):
for filename in os.listdir(path):
filepath = os.path.join(path, filename)
_, ext = os.path.splitext(filename)
if filename.startswith('__'): # __dmpcache__, __pycache__
continue
elif os.path.isdir(filepath):
recurse_path(filepath)
elif ext.lower() in ( '.htm', '.html', '.mako' ):
# create the template object, which creates the compiled .py file
self.message('compiling {}'.format(filepath), 2)
try:
get_template_for_path(filepath)
except TemplateSyntaxError:
if not self.options.get('ignore_template_errors'):
raise
recurse_path(subdir) | Compiles the Mako templates within the apps of this system | Below is the the instruction that describes the task:
### Input:
Compiles the Mako templates within the apps of this system
### Response:
def compile_mako_files(self, app_config):
'''Compiles the Mako templates within the apps of this system'''
# go through the files in the templates, scripts, and styles directories
for subdir_name in self.SEARCH_DIRS:
subdir = subdir_name.format(
app_path=app_config.path,
app_name=app_config.name,
)
def recurse_path(path):
self.message('searching for Mako templates in {}'.format(path), 1)
if os.path.exists(path):
for filename in os.listdir(path):
filepath = os.path.join(path, filename)
_, ext = os.path.splitext(filename)
if filename.startswith('__'): # __dmpcache__, __pycache__
continue
elif os.path.isdir(filepath):
recurse_path(filepath)
elif ext.lower() in ( '.htm', '.html', '.mako' ):
# create the template object, which creates the compiled .py file
self.message('compiling {}'.format(filepath), 2)
try:
get_template_for_path(filepath)
except TemplateSyntaxError:
if not self.options.get('ignore_template_errors'):
raise
recurse_path(subdir) |
def orientation(self, value):
'''setter of orientation property.'''
for values in self.__orientation:
if value in values:
# can not set upside-down until api level 18.
self.server.jsonrpc.setOrientation(values[1])
break
else:
raise ValueError("Invalid orientation.") | setter of orientation property. | Below is the the instruction that describes the task:
### Input:
setter of orientation property.
### Response:
def orientation(self, value):
'''setter of orientation property.'''
for values in self.__orientation:
if value in values:
# can not set upside-down until api level 18.
self.server.jsonrpc.setOrientation(values[1])
break
else:
raise ValueError("Invalid orientation.") |
def drawcircle(self, x, y, r = 10, colour = None, label = None):
"""
Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image !
You give these x and y in the usual ds9 pixels, (0,0) is bottom left.
I will convert this into the right PIL coordiates.
"""
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
(pilx, pily) = self.pilcoords((x,y))
pilr = self.pilscale(r)
self.draw.ellipse([(pilx-pilr+1, pily-pilr+1), (pilx+pilr+1, pily+pilr+1)], outline = colour)
if label != None:
# The we write it :
self.loadlabelfont()
textwidth = self.draw.textsize(label, font = self.labelfont)[0]
self.draw.text((pilx - float(textwidth)/2.0 + 2, pily + pilr + 4), label, fill = colour, font = self.labelfont) | Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image !
You give these x and y in the usual ds9 pixels, (0,0) is bottom left.
I will convert this into the right PIL coordiates. | Below is the the instruction that describes the task:
### Input:
Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image !
You give these x and y in the usual ds9 pixels, (0,0) is bottom left.
I will convert this into the right PIL coordiates.
### Response:
def drawcircle(self, x, y, r = 10, colour = None, label = None):
"""
Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image !
You give these x and y in the usual ds9 pixels, (0,0) is bottom left.
I will convert this into the right PIL coordiates.
"""
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
(pilx, pily) = self.pilcoords((x,y))
pilr = self.pilscale(r)
self.draw.ellipse([(pilx-pilr+1, pily-pilr+1), (pilx+pilr+1, pily+pilr+1)], outline = colour)
if label != None:
# The we write it :
self.loadlabelfont()
textwidth = self.draw.textsize(label, font = self.labelfont)[0]
self.draw.text((pilx - float(textwidth)/2.0 + 2, pily + pilr + 4), label, fill = colour, font = self.labelfont) |
def get_context(self, data, accepted_media_type, renderer_context):
"""
Returns the context used to render.
"""
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request)
raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = OrderedDict(sorted(response.items()))
renderer_content_type = ''
if renderer:
renderer_content_type = '%s' % renderer.media_type
if renderer.charset:
renderer_content_type += ' ;%s' % renderer.charset
response_headers['Content-Type'] = renderer_content_type
if getattr(view, 'paginator', None) and view.paginator.display_page_controls:
paginator = view.paginator
else:
paginator = None
context = {
'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'view': view,
'request': request,
'response': response,
'description': self.get_description(view, response.status_code),
'name': self.get_name(view),
'version': VERSION,
'paginator': paginator,
'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes],
'response_headers': response_headers,
'put_form': self.get_rendered_html_form(data, view, 'PUT', request),
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request),
'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request),
'filter_form': self.get_filter_form(data, view, request),
'raw_data_put_form': raw_data_put_form,
'raw_data_post_form': raw_data_post_form,
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
'display_edit_forms': bool(response.status_code != 403),
'api_settings': api_settings
}
return context | Returns the context used to render. | Below is the the instruction that describes the task:
### Input:
Returns the context used to render.
### Response:
def get_context(self, data, accepted_media_type, renderer_context):
"""
Returns the context used to render.
"""
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request)
raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = OrderedDict(sorted(response.items()))
renderer_content_type = ''
if renderer:
renderer_content_type = '%s' % renderer.media_type
if renderer.charset:
renderer_content_type += ' ;%s' % renderer.charset
response_headers['Content-Type'] = renderer_content_type
if getattr(view, 'paginator', None) and view.paginator.display_page_controls:
paginator = view.paginator
else:
paginator = None
context = {
'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'view': view,
'request': request,
'response': response,
'description': self.get_description(view, response.status_code),
'name': self.get_name(view),
'version': VERSION,
'paginator': paginator,
'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes],
'response_headers': response_headers,
'put_form': self.get_rendered_html_form(data, view, 'PUT', request),
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request),
'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request),
'filter_form': self.get_filter_form(data, view, request),
'raw_data_put_form': raw_data_put_form,
'raw_data_post_form': raw_data_post_form,
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
'display_edit_forms': bool(response.status_code != 403),
'api_settings': api_settings
}
return context |
def lookups(self):
"""
Access the Lookups Twilio Domain
:returns: Lookups Twilio Domain
:rtype: twilio.rest.lookups.Lookups
"""
if self._lookups is None:
from twilio.rest.lookups import Lookups
self._lookups = Lookups(self)
return self._lookups | Access the Lookups Twilio Domain
:returns: Lookups Twilio Domain
:rtype: twilio.rest.lookups.Lookups | Below is the the instruction that describes the task:
### Input:
Access the Lookups Twilio Domain
:returns: Lookups Twilio Domain
:rtype: twilio.rest.lookups.Lookups
### Response:
def lookups(self):
"""
Access the Lookups Twilio Domain
:returns: Lookups Twilio Domain
:rtype: twilio.rest.lookups.Lookups
"""
if self._lookups is None:
from twilio.rest.lookups import Lookups
self._lookups = Lookups(self)
return self._lookups |
def register_validator(flag_name,
checker,
message='Flag validation failed',
flag_values=_flagvalues.FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: str, name of the flag to be checked.
checker: callable, a function to validate the flag.
input - A single positional argument: The value of the corresponding
flag (string, boolean, etc. This value will be passed to checker
by the library).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
"""
v = SingleFlagValidator(flag_name, checker, message)
_add_validator(flag_values, v) | Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: str, name of the flag to be checked.
checker: callable, a function to validate the flag.
input - A single positional argument: The value of the corresponding
flag (string, boolean, etc. This value will be passed to checker
by the library).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name. | Below is the the instruction that describes the task:
### Input:
Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: str, name of the flag to be checked.
checker: callable, a function to validate the flag.
input - A single positional argument: The value of the corresponding
flag (string, boolean, etc. This value will be passed to checker
by the library).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
### Response:
def register_validator(flag_name,
checker,
message='Flag validation failed',
flag_values=_flagvalues.FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: str, name of the flag to be checked.
checker: callable, a function to validate the flag.
input - A single positional argument: The value of the corresponding
flag (string, boolean, etc. This value will be passed to checker
by the library).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
"""
v = SingleFlagValidator(flag_name, checker, message)
_add_validator(flag_values, v) |
def calcDistMatchArr(matchArr, tKey, mKey):
"""Calculate the euclidean distance of all array positions in "matchArr".
:param matchArr: a dictionary of ``numpy.arrays`` containing at least two
entries that are treated as cartesian coordinates.
:param tKey: #TODO: docstring
:param mKey: #TODO: docstring
:returns: #TODO: docstring
{'eucDist': numpy.array([eucDistance, eucDistance, ...]),
'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...])
}
"""
#Calculate all sorted list of all eucledian feature distances
matchArrSize = listvalues(matchArr)[0].size
distInfo = {'posPairs': list(), 'eucDist': list()}
_matrix = numpy.swapaxes(numpy.array([matchArr[tKey], matchArr[mKey]]), 0, 1)
for pos1 in range(matchArrSize-1):
for pos2 in range(pos1+1, matchArrSize):
distInfo['posPairs'].append((pos1, pos2))
distInfo['posPairs'] = numpy.array(distInfo['posPairs'])
distInfo['eucDist'] = scipy.spatial.distance.pdist(_matrix)
distSort = numpy.argsort(distInfo['eucDist'])
for key in list(viewkeys(distInfo)):
distInfo[key] = distInfo[key][distSort]
return distInfo | Calculate the euclidean distance of all array positions in "matchArr".
:param matchArr: a dictionary of ``numpy.arrays`` containing at least two
entries that are treated as cartesian coordinates.
:param tKey: #TODO: docstring
:param mKey: #TODO: docstring
:returns: #TODO: docstring
{'eucDist': numpy.array([eucDistance, eucDistance, ...]),
'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...])
} | Below is the the instruction that describes the task:
### Input:
Calculate the euclidean distance of all array positions in "matchArr".
:param matchArr: a dictionary of ``numpy.arrays`` containing at least two
entries that are treated as cartesian coordinates.
:param tKey: #TODO: docstring
:param mKey: #TODO: docstring
:returns: #TODO: docstring
{'eucDist': numpy.array([eucDistance, eucDistance, ...]),
'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...])
}
### Response:
def calcDistMatchArr(matchArr, tKey, mKey):
"""Calculate the euclidean distance of all array positions in "matchArr".
:param matchArr: a dictionary of ``numpy.arrays`` containing at least two
entries that are treated as cartesian coordinates.
:param tKey: #TODO: docstring
:param mKey: #TODO: docstring
:returns: #TODO: docstring
{'eucDist': numpy.array([eucDistance, eucDistance, ...]),
'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...])
}
"""
#Calculate all sorted list of all eucledian feature distances
matchArrSize = listvalues(matchArr)[0].size
distInfo = {'posPairs': list(), 'eucDist': list()}
_matrix = numpy.swapaxes(numpy.array([matchArr[tKey], matchArr[mKey]]), 0, 1)
for pos1 in range(matchArrSize-1):
for pos2 in range(pos1+1, matchArrSize):
distInfo['posPairs'].append((pos1, pos2))
distInfo['posPairs'] = numpy.array(distInfo['posPairs'])
distInfo['eucDist'] = scipy.spatial.distance.pdist(_matrix)
distSort = numpy.argsort(distInfo['eucDist'])
for key in list(viewkeys(distInfo)):
distInfo[key] = distInfo[key][distSort]
return distInfo |
def scale_dataset(self, dsid, variable, info):
"""Scale the data set, applying the attributes from the netCDF file"""
variable = remove_empties(variable)
scale = variable.attrs.get('scale_factor', np.array(1))
offset = variable.attrs.get('add_offset', np.array(0))
if np.issubdtype((scale + offset).dtype, np.floating) or np.issubdtype(variable.dtype, np.floating):
if '_FillValue' in variable.attrs:
variable = variable.where(
variable != variable.attrs['_FillValue'])
variable.attrs['_FillValue'] = np.nan
if 'valid_range' in variable.attrs:
variable = variable.where(
variable <= variable.attrs['valid_range'][1])
variable = variable.where(
variable >= variable.attrs['valid_range'][0])
if 'valid_max' in variable.attrs:
variable = variable.where(
variable <= variable.attrs['valid_max'])
if 'valid_min' in variable.attrs:
variable = variable.where(
variable >= variable.attrs['valid_min'])
attrs = variable.attrs
variable = variable * scale + offset
variable.attrs = attrs
variable.attrs.update({'platform_name': self.platform_name,
'sensor': self.sensor})
variable.attrs.setdefault('units', '1')
ancillary_names = variable.attrs.get('ancillary_variables', '')
try:
variable.attrs['ancillary_variables'] = ancillary_names.split()
except AttributeError:
pass
if 'palette_meanings' in variable.attrs:
variable.attrs['palette_meanings'] = [int(val)
for val in variable.attrs['palette_meanings'].split()]
if variable.attrs['palette_meanings'][0] == 1:
variable.attrs['palette_meanings'] = [0] + variable.attrs['palette_meanings']
variable = xr.DataArray(da.vstack((np.array(variable.attrs['fill_value_color']), variable.data)),
coords=variable.coords, dims=variable.dims, attrs=variable.attrs)
val, idx = np.unique(variable.attrs['palette_meanings'], return_index=True)
variable.attrs['palette_meanings'] = val
variable = variable[idx]
if 'standard_name' in info:
variable.attrs.setdefault('standard_name', info['standard_name'])
if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti':
# pps 2014 valid range and palette don't match
variable.attrs['valid_range'] = (0., 9000.)
if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti_pal':
# pps 2014 palette has the nodata color (black) first
variable = variable[1:, :]
return variable | Scale the data set, applying the attributes from the netCDF file | Below is the the instruction that describes the task:
### Input:
Scale the data set, applying the attributes from the netCDF file
### Response:
def scale_dataset(self, dsid, variable, info):
"""Scale the data set, applying the attributes from the netCDF file"""
variable = remove_empties(variable)
scale = variable.attrs.get('scale_factor', np.array(1))
offset = variable.attrs.get('add_offset', np.array(0))
if np.issubdtype((scale + offset).dtype, np.floating) or np.issubdtype(variable.dtype, np.floating):
if '_FillValue' in variable.attrs:
variable = variable.where(
variable != variable.attrs['_FillValue'])
variable.attrs['_FillValue'] = np.nan
if 'valid_range' in variable.attrs:
variable = variable.where(
variable <= variable.attrs['valid_range'][1])
variable = variable.where(
variable >= variable.attrs['valid_range'][0])
if 'valid_max' in variable.attrs:
variable = variable.where(
variable <= variable.attrs['valid_max'])
if 'valid_min' in variable.attrs:
variable = variable.where(
variable >= variable.attrs['valid_min'])
attrs = variable.attrs
variable = variable * scale + offset
variable.attrs = attrs
variable.attrs.update({'platform_name': self.platform_name,
'sensor': self.sensor})
variable.attrs.setdefault('units', '1')
ancillary_names = variable.attrs.get('ancillary_variables', '')
try:
variable.attrs['ancillary_variables'] = ancillary_names.split()
except AttributeError:
pass
if 'palette_meanings' in variable.attrs:
variable.attrs['palette_meanings'] = [int(val)
for val in variable.attrs['palette_meanings'].split()]
if variable.attrs['palette_meanings'][0] == 1:
variable.attrs['palette_meanings'] = [0] + variable.attrs['palette_meanings']
variable = xr.DataArray(da.vstack((np.array(variable.attrs['fill_value_color']), variable.data)),
coords=variable.coords, dims=variable.dims, attrs=variable.attrs)
val, idx = np.unique(variable.attrs['palette_meanings'], return_index=True)
variable.attrs['palette_meanings'] = val
variable = variable[idx]
if 'standard_name' in info:
variable.attrs.setdefault('standard_name', info['standard_name'])
if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti':
# pps 2014 valid range and palette don't match
variable.attrs['valid_range'] = (0., 9000.)
if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti_pal':
# pps 2014 palette has the nodata color (black) first
variable = variable[1:, :]
return variable |
def afterglow(self, src=None, event=None, dst=None, **kargs):
"""Experimental clone attempt of http://sourceforge.net/projects/afterglow
each datum is reduced as src -> event -> dst and the data are graphed.
by default we have IP.src -> IP.dport -> IP.dst"""
if src is None:
src = lambda x: x['IP'].src
if event is None:
event = lambda x: x['IP'].dport
if dst is None:
dst = lambda x: x['IP'].dst
sl = {}
el = {}
dl = {}
for i in self.res:
try:
s, e, d = src(i), event(i), dst(i)
if s in sl:
n, lst = sl[s]
n += 1
if e not in lst:
lst.append(e)
sl[s] = (n, lst)
else:
sl[s] = (1, [e])
if e in el:
n, lst = el[e]
n += 1
if d not in lst:
lst.append(d)
el[e] = (n, lst)
else:
el[e] = (1, [d])
dl[d] = dl.get(d, 0) + 1
except Exception:
continue
import math
def normalize(n):
return 2 + math.log(n) / 4.0
def minmax(x):
m, M = reduce(lambda a, b: (min(a[0], b[0]), max(a[1], b[1])),
((a, a) for a in x))
if m == M:
m = 0
if M == 0:
M = 1
return m, M
mins, maxs = minmax(x for x, _ in six.itervalues(sl))
mine, maxe = minmax(x for x, _ in six.itervalues(el))
mind, maxd = minmax(six.itervalues(dl))
gr = 'digraph "afterglow" {\n\tedge [len=2.5];\n'
gr += "# src nodes\n"
for s in sl:
n, _ = sl[s]
n = 1 + float(n - mins) / (maxs - mins)
gr += '"src.%s" [label = "%s", shape=box, fillcolor="#FF0000", style=filled, fixedsize=1, height=%.2f,width=%.2f];\n' % (repr(s), repr(s), n, n) # noqa: E501
gr += "# event nodes\n"
for e in el:
n, _ = el[e]
n = n = 1 + float(n - mine) / (maxe - mine)
gr += '"evt.%s" [label = "%s", shape=circle, fillcolor="#00FFFF", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(e), repr(e), n, n) # noqa: E501
for d in dl:
n = dl[d]
n = n = 1 + float(n - mind) / (maxd - mind)
gr += '"dst.%s" [label = "%s", shape=triangle, fillcolor="#0000ff", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(d), repr(d), n, n) # noqa: E501
gr += "###\n"
for s in sl:
n, lst = sl[s]
for e in lst:
gr += ' "src.%s" -> "evt.%s";\n' % (repr(s), repr(e))
for e in el:
n, lst = el[e]
for d in lst:
gr += ' "evt.%s" -> "dst.%s";\n' % (repr(e), repr(d))
gr += "}"
return do_graph(gr, **kargs) | Experimental clone attempt of http://sourceforge.net/projects/afterglow
each datum is reduced as src -> event -> dst and the data are graphed.
by default we have IP.src -> IP.dport -> IP.dst | Below is the the instruction that describes the task:
### Input:
Experimental clone attempt of http://sourceforge.net/projects/afterglow
each datum is reduced as src -> event -> dst and the data are graphed.
by default we have IP.src -> IP.dport -> IP.dst
### Response:
def afterglow(self, src=None, event=None, dst=None, **kargs):
"""Experimental clone attempt of http://sourceforge.net/projects/afterglow
each datum is reduced as src -> event -> dst and the data are graphed.
by default we have IP.src -> IP.dport -> IP.dst"""
if src is None:
src = lambda x: x['IP'].src
if event is None:
event = lambda x: x['IP'].dport
if dst is None:
dst = lambda x: x['IP'].dst
sl = {}
el = {}
dl = {}
for i in self.res:
try:
s, e, d = src(i), event(i), dst(i)
if s in sl:
n, lst = sl[s]
n += 1
if e not in lst:
lst.append(e)
sl[s] = (n, lst)
else:
sl[s] = (1, [e])
if e in el:
n, lst = el[e]
n += 1
if d not in lst:
lst.append(d)
el[e] = (n, lst)
else:
el[e] = (1, [d])
dl[d] = dl.get(d, 0) + 1
except Exception:
continue
import math
def normalize(n):
return 2 + math.log(n) / 4.0
def minmax(x):
m, M = reduce(lambda a, b: (min(a[0], b[0]), max(a[1], b[1])),
((a, a) for a in x))
if m == M:
m = 0
if M == 0:
M = 1
return m, M
mins, maxs = minmax(x for x, _ in six.itervalues(sl))
mine, maxe = minmax(x for x, _ in six.itervalues(el))
mind, maxd = minmax(six.itervalues(dl))
gr = 'digraph "afterglow" {\n\tedge [len=2.5];\n'
gr += "# src nodes\n"
for s in sl:
n, _ = sl[s]
n = 1 + float(n - mins) / (maxs - mins)
gr += '"src.%s" [label = "%s", shape=box, fillcolor="#FF0000", style=filled, fixedsize=1, height=%.2f,width=%.2f];\n' % (repr(s), repr(s), n, n) # noqa: E501
gr += "# event nodes\n"
for e in el:
n, _ = el[e]
n = n = 1 + float(n - mine) / (maxe - mine)
gr += '"evt.%s" [label = "%s", shape=circle, fillcolor="#00FFFF", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(e), repr(e), n, n) # noqa: E501
for d in dl:
n = dl[d]
n = n = 1 + float(n - mind) / (maxd - mind)
gr += '"dst.%s" [label = "%s", shape=triangle, fillcolor="#0000ff", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(d), repr(d), n, n) # noqa: E501
gr += "###\n"
for s in sl:
n, lst = sl[s]
for e in lst:
gr += ' "src.%s" -> "evt.%s";\n' % (repr(s), repr(e))
for e in el:
n, lst = el[e]
for d in lst:
gr += ' "evt.%s" -> "dst.%s";\n' % (repr(e), repr(d))
gr += "}"
return do_graph(gr, **kargs) |
def displayhook(value):
"""
Runs all of the registered display hook methods with the given value.
Look at the sys.displayhook documentation for more information.
:param value | <variant>
"""
global _displayhooks
new_hooks = []
for hook_ref in _displayhooks:
hook = hook_ref()
if hook:
hook(value)
new_hooks.append(hook_ref)
_displayhooks = new_hooks
sys.__displayhook__(value) | Runs all of the registered display hook methods with the given value.
Look at the sys.displayhook documentation for more information.
:param value | <variant> | Below is the the instruction that describes the task:
### Input:
Runs all of the registered display hook methods with the given value.
Look at the sys.displayhook documentation for more information.
:param value | <variant>
### Response:
def displayhook(value):
"""
Runs all of the registered display hook methods with the given value.
Look at the sys.displayhook documentation for more information.
:param value | <variant>
"""
global _displayhooks
new_hooks = []
for hook_ref in _displayhooks:
hook = hook_ref()
if hook:
hook(value)
new_hooks.append(hook_ref)
_displayhooks = new_hooks
sys.__displayhook__(value) |
def get_joystick_buttons(joy):
"""
Returns the state of all buttons of the specified joystick.
Wrapper for:
const unsigned char* glfwGetJoystickButtons(int joy, int* count);
"""
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickButtons(joy, count)
return result, count_value.value | Returns the state of all buttons of the specified joystick.
Wrapper for:
const unsigned char* glfwGetJoystickButtons(int joy, int* count); | Below is the the instruction that describes the task:
### Input:
Returns the state of all buttons of the specified joystick.
Wrapper for:
const unsigned char* glfwGetJoystickButtons(int joy, int* count);
### Response:
def get_joystick_buttons(joy):
"""
Returns the state of all buttons of the specified joystick.
Wrapper for:
const unsigned char* glfwGetJoystickButtons(int joy, int* count);
"""
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickButtons(joy, count)
return result, count_value.value |
def _get_start_revision(self, graph, benchmark, entry_name):
"""
Compute the first revision allowed by asv.conf.json.
Revisions correspond to linearized commit history and the
regression detection runs on this order --- the starting commit
thus corresponds to a specific starting revision.
"""
start_revision = min(six.itervalues(self.revisions))
if graph.params.get('branch'):
branch_suffix = '@' + graph.params.get('branch')
else:
branch_suffix = ''
for regex, start_commit in six.iteritems(self.conf.regressions_first_commits):
if re.match(regex, entry_name + branch_suffix):
if start_commit is None:
# Disable regression detection completely
return None
if self.conf.branches == [None]:
key = (start_commit, None)
else:
key = (start_commit, graph.params.get('branch'))
if key not in self._start_revisions:
spec = self.repo.get_new_range_spec(*key)
start_hash = self.repo.get_hash_from_name(start_commit)
for commit in [start_hash] + self.repo.get_hashes_from_range(spec):
rev = self.revisions.get(commit)
if rev is not None:
self._start_revisions[key] = rev
break
else:
# Commit not found in the branch --- warn and ignore.
log.warning(("Commit {0} specified in `regressions_first_commits` "
"not found in branch").format(start_commit))
self._start_revisions[key] = -1
start_revision = max(start_revision, self._start_revisions[key] + 1)
return start_revision | Compute the first revision allowed by asv.conf.json.
Revisions correspond to linearized commit history and the
regression detection runs on this order --- the starting commit
thus corresponds to a specific starting revision. | Below is the the instruction that describes the task:
### Input:
Compute the first revision allowed by asv.conf.json.
Revisions correspond to linearized commit history and the
regression detection runs on this order --- the starting commit
thus corresponds to a specific starting revision.
### Response:
def _get_start_revision(self, graph, benchmark, entry_name):
"""
Compute the first revision allowed by asv.conf.json.
Revisions correspond to linearized commit history and the
regression detection runs on this order --- the starting commit
thus corresponds to a specific starting revision.
"""
start_revision = min(six.itervalues(self.revisions))
if graph.params.get('branch'):
branch_suffix = '@' + graph.params.get('branch')
else:
branch_suffix = ''
for regex, start_commit in six.iteritems(self.conf.regressions_first_commits):
if re.match(regex, entry_name + branch_suffix):
if start_commit is None:
# Disable regression detection completely
return None
if self.conf.branches == [None]:
key = (start_commit, None)
else:
key = (start_commit, graph.params.get('branch'))
if key not in self._start_revisions:
spec = self.repo.get_new_range_spec(*key)
start_hash = self.repo.get_hash_from_name(start_commit)
for commit in [start_hash] + self.repo.get_hashes_from_range(spec):
rev = self.revisions.get(commit)
if rev is not None:
self._start_revisions[key] = rev
break
else:
# Commit not found in the branch --- warn and ignore.
log.warning(("Commit {0} specified in `regressions_first_commits` "
"not found in branch").format(start_commit))
self._start_revisions[key] = -1
start_revision = max(start_revision, self._start_revisions[key] + 1)
return start_revision |
def unescape_LDAP(ldap_string):
# type: (str) -> str
# pylint: disable=C0103
"""
Unespaces an LDAP string
:param ldap_string: The string to unescape
:return: The unprotected string
"""
if ldap_string is None:
return None
if ESCAPE_CHARACTER not in ldap_string:
# No need to loop
return ldap_string
escaped = False
result = ""
for character in ldap_string:
if not escaped and character == ESCAPE_CHARACTER:
# Escape character found
escaped = True
else:
# Copy the character
escaped = False
result += character
return result | Unespaces an LDAP string
:param ldap_string: The string to unescape
:return: The unprotected string | Below is the the instruction that describes the task:
### Input:
Unespaces an LDAP string
:param ldap_string: The string to unescape
:return: The unprotected string
### Response:
def unescape_LDAP(ldap_string):
# type: (str) -> str
# pylint: disable=C0103
"""
Unespaces an LDAP string
:param ldap_string: The string to unescape
:return: The unprotected string
"""
if ldap_string is None:
return None
if ESCAPE_CHARACTER not in ldap_string:
# No need to loop
return ldap_string
escaped = False
result = ""
for character in ldap_string:
if not escaped and character == ESCAPE_CHARACTER:
# Escape character found
escaped = True
else:
# Copy the character
escaped = False
result += character
return result |
def datetime(self, to_timezone=None, naive=False):
"""Returns a timezone-aware datetime...
Defaulting to UTC (as it should).
Keyword Arguments:
to_timezone {str} -- timezone to convert to (default: None/UTC)
naive {bool} -- if True,
the tzinfo is simply dropped (default: False)
"""
if to_timezone:
dt = self.datetime().astimezone(pytz.timezone(to_timezone))
else:
dt = Datetime.utcfromtimestamp(self._epoch)
dt.replace(tzinfo=self._tz)
# Strip the timezone info if requested to do so.
if naive:
return dt.replace(tzinfo=None)
else:
if dt.tzinfo is None:
dt = dt.replace(tzinfo=self._tz)
return dt | Returns a timezone-aware datetime...
Defaulting to UTC (as it should).
Keyword Arguments:
to_timezone {str} -- timezone to convert to (default: None/UTC)
naive {bool} -- if True,
the tzinfo is simply dropped (default: False) | Below is the the instruction that describes the task:
### Input:
Returns a timezone-aware datetime...
Defaulting to UTC (as it should).
Keyword Arguments:
to_timezone {str} -- timezone to convert to (default: None/UTC)
naive {bool} -- if True,
the tzinfo is simply dropped (default: False)
### Response:
def datetime(self, to_timezone=None, naive=False):
"""Returns a timezone-aware datetime...
Defaulting to UTC (as it should).
Keyword Arguments:
to_timezone {str} -- timezone to convert to (default: None/UTC)
naive {bool} -- if True,
the tzinfo is simply dropped (default: False)
"""
if to_timezone:
dt = self.datetime().astimezone(pytz.timezone(to_timezone))
else:
dt = Datetime.utcfromtimestamp(self._epoch)
dt.replace(tzinfo=self._tz)
# Strip the timezone info if requested to do so.
if naive:
return dt.replace(tzinfo=None)
else:
if dt.tzinfo is None:
dt = dt.replace(tzinfo=self._tz)
return dt |
def get_country_by_name(self, country_name) -> 'Country':
"""
Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country
"""
VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError)
if country_name not in self._countries_by_name.keys():
for country in self.countries:
if country.country_name == country_name:
return country
raise ValueError(country_name)
else:
return self._countries_by_name[country_name] | Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country | Below is the the instruction that describes the task:
### Input:
Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country
### Response:
def get_country_by_name(self, country_name) -> 'Country':
"""
Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country
"""
VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError)
if country_name not in self._countries_by_name.keys():
for country in self.countries:
if country.country_name == country_name:
return country
raise ValueError(country_name)
else:
return self._countries_by_name[country_name] |
def get_server_networks(self, network, public=False, private=False,
key=None):
"""
Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks. By default, the UUID
values are returned with the key of "net-id", which is what novaclient
expects. Other tools may require different values, such as 'uuid'. If
that is the case, pass the desired key as the 'key' parameter.
By default only this network is included. If you wish to create a
server that has either the public (internet) or private (ServiceNet)
networks, you have to pass those parameters in with values of True.
"""
return _get_server_networks(network, public=public, private=private,
key=key) | Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks. By default, the UUID
values are returned with the key of "net-id", which is what novaclient
expects. Other tools may require different values, such as 'uuid'. If
that is the case, pass the desired key as the 'key' parameter.
By default only this network is included. If you wish to create a
server that has either the public (internet) or private (ServiceNet)
networks, you have to pass those parameters in with values of True. | Below is the the instruction that describes the task:
### Input:
Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks. By default, the UUID
values are returned with the key of "net-id", which is what novaclient
expects. Other tools may require different values, such as 'uuid'. If
that is the case, pass the desired key as the 'key' parameter.
By default only this network is included. If you wish to create a
server that has either the public (internet) or private (ServiceNet)
networks, you have to pass those parameters in with values of True.
### Response:
def get_server_networks(self, network, public=False, private=False,
key=None):
"""
Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks. By default, the UUID
values are returned with the key of "net-id", which is what novaclient
expects. Other tools may require different values, such as 'uuid'. If
that is the case, pass the desired key as the 'key' parameter.
By default only this network is included. If you wish to create a
server that has either the public (internet) or private (ServiceNet)
networks, you have to pass those parameters in with values of True.
"""
return _get_server_networks(network, public=public, private=private,
key=key) |
def headerData(self, section, orientation, role):
"""Get the information to put in the header."""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return Qt.AlignCenter | Qt.AlignBottom
else:
return Qt.AlignRight | Qt.AlignVCenter
if role != Qt.DisplayRole and role != Qt.ToolTipRole:
return None
if self.axis == 1 and self._shape[1] <= 1:
return None
orient_axis = 0 if orientation == Qt.Horizontal else 1
if self.model.header_shape[orient_axis] > 1:
header = section
else:
header = self.model.header(self.axis, section)
# Don't perform any conversion on strings
# because it leads to differences between
# the data present in the dataframe and
# what is shown by Spyder
if not is_type_text_string(header):
header = to_text_string(header)
return header | Get the information to put in the header. | Below is the the instruction that describes the task:
### Input:
Get the information to put in the header.
### Response:
def headerData(self, section, orientation, role):
"""Get the information to put in the header."""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return Qt.AlignCenter | Qt.AlignBottom
else:
return Qt.AlignRight | Qt.AlignVCenter
if role != Qt.DisplayRole and role != Qt.ToolTipRole:
return None
if self.axis == 1 and self._shape[1] <= 1:
return None
orient_axis = 0 if orientation == Qt.Horizontal else 1
if self.model.header_shape[orient_axis] > 1:
header = section
else:
header = self.model.header(self.axis, section)
# Don't perform any conversion on strings
# because it leads to differences between
# the data present in the dataframe and
# what is shown by Spyder
if not is_type_text_string(header):
header = to_text_string(header)
return header |
def distL2(x1,y1,x2,y2):
"""Compute the L2-norm (Euclidean) distance between two points.
The distance is rounded to the closest integer, for compatibility
with the TSPLIB convention.
The two points are located on coordinates (x1,y1) and (x2,y2),
sent as parameters"""
xdiff = x2 - x1
ydiff = y2 - y1
return int(math.sqrt(xdiff*xdiff + ydiff*ydiff) + .5) | Compute the L2-norm (Euclidean) distance between two points.
The distance is rounded to the closest integer, for compatibility
with the TSPLIB convention.
The two points are located on coordinates (x1,y1) and (x2,y2),
sent as parameters | Below is the the instruction that describes the task:
### Input:
Compute the L2-norm (Euclidean) distance between two points.
The distance is rounded to the closest integer, for compatibility
with the TSPLIB convention.
The two points are located on coordinates (x1,y1) and (x2,y2),
sent as parameters
### Response:
def distL2(x1,y1,x2,y2):
"""Compute the L2-norm (Euclidean) distance between two points.
The distance is rounded to the closest integer, for compatibility
with the TSPLIB convention.
The two points are located on coordinates (x1,y1) and (x2,y2),
sent as parameters"""
xdiff = x2 - x1
ydiff = y2 - y1
return int(math.sqrt(xdiff*xdiff + ydiff*ydiff) + .5) |
def str_display_width(s):
'''
from elist.utils import *
str_display_width('a')
str_display_width('去')
'''
s= str(s)
width = 0
len = s.__len__()
for i in range(0,len):
sublen = s[i].encode().__len__()
sublen = int(sublen/2 + 1/2)
width = width + sublen
return(width) | from elist.utils import *
str_display_width('a')
str_display_width('去') | Below is the the instruction that describes the task:
### Input:
from elist.utils import *
str_display_width('a')
str_display_width('去')
### Response:
def str_display_width(s):
'''
from elist.utils import *
str_display_width('a')
str_display_width('去')
'''
s= str(s)
width = 0
len = s.__len__()
for i in range(0,len):
sublen = s[i].encode().__len__()
sublen = int(sublen/2 + 1/2)
width = width + sublen
return(width) |
def command_x(self, x, to=None):
"""
Sends a character to the currently active element with Command
pressed. This method takes care of pressing and releasing
Command.
"""
if to is None:
ActionChains(self.driver) \
.send_keys([Keys.COMMAND, x, Keys.COMMAND]) \
.perform()
else:
self.send_keys(to, [Keys.COMMAND, x, Keys.COMMAND]) | Sends a character to the currently active element with Command
pressed. This method takes care of pressing and releasing
Command. | Below is the the instruction that describes the task:
### Input:
Sends a character to the currently active element with Command
pressed. This method takes care of pressing and releasing
Command.
### Response:
def command_x(self, x, to=None):
"""
Sends a character to the currently active element with Command
pressed. This method takes care of pressing and releasing
Command.
"""
if to is None:
ActionChains(self.driver) \
.send_keys([Keys.COMMAND, x, Keys.COMMAND]) \
.perform()
else:
self.send_keys(to, [Keys.COMMAND, x, Keys.COMMAND]) |
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Check account concurrency limits. | Below is the the instruction that describes the task:
### Input:
Check account concurrency limits.
### Response:
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) |
def parent_images(self):
"""
:return: list of parent images -- one image per each stage's FROM instruction
"""
parents = []
for instr in self.structure:
if instr['instruction'] != 'FROM':
continue
image, _ = image_from(instr['value'])
if image is not None:
parents.append(image)
return parents | :return: list of parent images -- one image per each stage's FROM instruction | Below is the the instruction that describes the task:
### Input:
:return: list of parent images -- one image per each stage's FROM instruction
### Response:
def parent_images(self):
"""
:return: list of parent images -- one image per each stage's FROM instruction
"""
parents = []
for instr in self.structure:
if instr['instruction'] != 'FROM':
continue
image, _ = image_from(instr['value'])
if image is not None:
parents.append(image)
return parents |
def constructRows(self, items):
"""
Build row objects that are serializable using Athena for sending to the
client.
@param items: an iterable of objects compatible with my columns'
C{extractValue} methods.
@return: a list of dictionaries, where each dictionary has a string key
for each column name in my list of columns.
"""
rows = []
for item in items:
row = dict((colname, col.extractValue(self, item))
for (colname, col) in self.columns.iteritems())
link = self.linkToItem(item)
if link is not None:
row[u'__id__'] = link
rows.append(row)
return rows | Build row objects that are serializable using Athena for sending to the
client.
@param items: an iterable of objects compatible with my columns'
C{extractValue} methods.
@return: a list of dictionaries, where each dictionary has a string key
for each column name in my list of columns. | Below is the the instruction that describes the task:
### Input:
Build row objects that are serializable using Athena for sending to the
client.
@param items: an iterable of objects compatible with my columns'
C{extractValue} methods.
@return: a list of dictionaries, where each dictionary has a string key
for each column name in my list of columns.
### Response:
def constructRows(self, items):
"""
Build row objects that are serializable using Athena for sending to the
client.
@param items: an iterable of objects compatible with my columns'
C{extractValue} methods.
@return: a list of dictionaries, where each dictionary has a string key
for each column name in my list of columns.
"""
rows = []
for item in items:
row = dict((colname, col.extractValue(self, item))
for (colname, col) in self.columns.iteritems())
link = self.linkToItem(item)
if link is not None:
row[u'__id__'] = link
rows.append(row)
return rows |
def get_timing_signal_1d(length,
channels,
min_timescale=1.0,
max_timescale=1.0e4,
start_index=0):
"""Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels]
"""
position = tf.to_float(tf.range(length) + start_index)
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
tf.maximum(tf.to_float(num_timescales) - 1, 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return signal | Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels] | Below is the the instruction that describes the task:
### Input:
Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels]
### Response:
def get_timing_signal_1d(length,
channels,
min_timescale=1.0,
max_timescale=1.0e4,
start_index=0):
"""Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels]
"""
position = tf.to_float(tf.range(length) + start_index)
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
tf.maximum(tf.to_float(num_timescales) - 1, 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return signal |
def normalizeGlyphNote(value):
"""
Normalizes Glyph Note.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string
"""
if not isinstance(value, basestring):
raise TypeError("Note must be a string, not %s."
% type(value).__name__)
return unicode(value) | Normalizes Glyph Note.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string | Below is the the instruction that describes the task:
### Input:
Normalizes Glyph Note.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string
### Response:
def normalizeGlyphNote(value):
"""
Normalizes Glyph Note.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string
"""
if not isinstance(value, basestring):
raise TypeError("Note must be a string, not %s."
% type(value).__name__)
return unicode(value) |
def crack_secret_exponent_from_k(generator, signed_value, sig, k):
"""
Given a signature of a signed_value and a known k, return the secret exponent.
"""
r, s = sig
return ((s * k - signed_value) * generator.inverse(r)) % generator.order() | Given a signature of a signed_value and a known k, return the secret exponent. | Below is the the instruction that describes the task:
### Input:
Given a signature of a signed_value and a known k, return the secret exponent.
### Response:
def crack_secret_exponent_from_k(generator, signed_value, sig, k):
"""
Given a signature of a signed_value and a known k, return the secret exponent.
"""
r, s = sig
return ((s * k - signed_value) * generator.inverse(r)) % generator.order() |
def back_slash_to_front_converter(string):
"""
Replacing all \ in the str to /
:param string: single string to modify
:type string: str
"""
try:
if not string or not isinstance(string, str):
return string
return string.replace('\\', '/')
except Exception:
return string | Replacing all \ in the str to /
:param string: single string to modify
:type string: str | Below is the the instruction that describes the task:
### Input:
Replacing all \ in the str to /
:param string: single string to modify
:type string: str
### Response:
def back_slash_to_front_converter(string):
"""
Replacing all \ in the str to /
:param string: single string to modify
:type string: str
"""
try:
if not string or not isinstance(string, str):
return string
return string.replace('\\', '/')
except Exception:
return string |
def urlize(text, trim_url_limit=None, nofollow=False):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(unicode(escape(text)))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words) | Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute. | Below is the the instruction that describes the task:
### Input:
Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
### Response:
def urlize(text, trim_url_limit=None, nofollow=False):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(unicode(escape(text)))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words) |
def relabel(label_list, projections):
"""
Relabel an entire :py:class:`~audiomate.annotations.LabelList` using user-defined projections.
Labels can be renamed, removed or overlapping labels can be flattened to a single label per segment.
Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key)
to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that
apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not
required to specify a projection for every single combination of labels.
This method raises a :py:class:`~audiomate.corpus.utils.labellist.UnmappedLabelsException` if a projection for one
or more combinations of labels is not defined.
Args:
label_list (audiomate.annotations.LabelList): The label list to relabel
projections (dict): A dictionary that maps tuples of label combinations to string
labels.
Returns:
audiomate.annotations.LabelList: New label list with remapped labels
Raises:
UnmappedLabelsException: If a projection for one or more combinations of labels is not defined.
Example:
>>> projections = {
... ('a',): 'a',
... ('b',): 'b',
... ('c',): 'c',
... ('a', 'b',): 'a_b',
... ('a', 'b', 'c',): 'a_b_c',
... ('**',): 'b_c',
... }
>>> label_list = annotations.LabelList(labels=[
... annotations.Label('a', 3.2, 4.5),
... annotations.Label('b', 4.0, 4.9),
... annotations.Label('c', 4.2, 5.1)
... ])
>>> ll = relabel(label_list, projections)
>>> [l.value for l in ll]
['a', 'a_b', 'a_b_c', 'b_c', 'c']
"""
unmapped_combinations = find_missing_projections(label_list, projections)
if len(unmapped_combinations) > 0:
raise UnmappedLabelsException('Unmapped combinations: {}'.format(unmapped_combinations))
new_labels = []
for labeled_segment in label_list.ranges():
combination = tuple(sorted([label.value for label in labeled_segment[2]]))
label_mapping = projections[combination] if combination in projections else projections[WILDCARD_COMBINATION]
if label_mapping == '':
continue
new_labels.append(annotations.Label(label_mapping, labeled_segment[0], labeled_segment[1]))
return annotations.LabelList(idx=label_list.idx, labels=new_labels) | Relabel an entire :py:class:`~audiomate.annotations.LabelList` using user-defined projections.
Labels can be renamed, removed or overlapping labels can be flattened to a single label per segment.
Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key)
to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that
apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not
required to specify a projection for every single combination of labels.
This method raises a :py:class:`~audiomate.corpus.utils.labellist.UnmappedLabelsException` if a projection for one
or more combinations of labels is not defined.
Args:
label_list (audiomate.annotations.LabelList): The label list to relabel
projections (dict): A dictionary that maps tuples of label combinations to string
labels.
Returns:
audiomate.annotations.LabelList: New label list with remapped labels
Raises:
UnmappedLabelsException: If a projection for one or more combinations of labels is not defined.
Example:
>>> projections = {
... ('a',): 'a',
... ('b',): 'b',
... ('c',): 'c',
... ('a', 'b',): 'a_b',
... ('a', 'b', 'c',): 'a_b_c',
... ('**',): 'b_c',
... }
>>> label_list = annotations.LabelList(labels=[
... annotations.Label('a', 3.2, 4.5),
... annotations.Label('b', 4.0, 4.9),
... annotations.Label('c', 4.2, 5.1)
... ])
>>> ll = relabel(label_list, projections)
>>> [l.value for l in ll]
['a', 'a_b', 'a_b_c', 'b_c', 'c'] | Below is the the instruction that describes the task:
### Input:
Relabel an entire :py:class:`~audiomate.annotations.LabelList` using user-defined projections.
Labels can be renamed, removed or overlapping labels can be flattened to a single label per segment.
Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key)
to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that
apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not
required to specify a projection for every single combination of labels.
This method raises a :py:class:`~audiomate.corpus.utils.labellist.UnmappedLabelsException` if a projection for one
or more combinations of labels is not defined.
Args:
label_list (audiomate.annotations.LabelList): The label list to relabel
projections (dict): A dictionary that maps tuples of label combinations to string
labels.
Returns:
audiomate.annotations.LabelList: New label list with remapped labels
Raises:
UnmappedLabelsException: If a projection for one or more combinations of labels is not defined.
Example:
>>> projections = {
... ('a',): 'a',
... ('b',): 'b',
... ('c',): 'c',
... ('a', 'b',): 'a_b',
... ('a', 'b', 'c',): 'a_b_c',
... ('**',): 'b_c',
... }
>>> label_list = annotations.LabelList(labels=[
... annotations.Label('a', 3.2, 4.5),
... annotations.Label('b', 4.0, 4.9),
... annotations.Label('c', 4.2, 5.1)
... ])
>>> ll = relabel(label_list, projections)
>>> [l.value for l in ll]
['a', 'a_b', 'a_b_c', 'b_c', 'c']
### Response:
def relabel(label_list, projections):
"""
Relabel an entire :py:class:`~audiomate.annotations.LabelList` using user-defined projections.
Labels can be renamed, removed or overlapping labels can be flattened to a single label per segment.
Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key)
to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that
apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not
required to specify a projection for every single combination of labels.
This method raises a :py:class:`~audiomate.corpus.utils.labellist.UnmappedLabelsException` if a projection for one
or more combinations of labels is not defined.
Args:
label_list (audiomate.annotations.LabelList): The label list to relabel
projections (dict): A dictionary that maps tuples of label combinations to string
labels.
Returns:
audiomate.annotations.LabelList: New label list with remapped labels
Raises:
UnmappedLabelsException: If a projection for one or more combinations of labels is not defined.
Example:
>>> projections = {
... ('a',): 'a',
... ('b',): 'b',
... ('c',): 'c',
... ('a', 'b',): 'a_b',
... ('a', 'b', 'c',): 'a_b_c',
... ('**',): 'b_c',
... }
>>> label_list = annotations.LabelList(labels=[
... annotations.Label('a', 3.2, 4.5),
... annotations.Label('b', 4.0, 4.9),
... annotations.Label('c', 4.2, 5.1)
... ])
>>> ll = relabel(label_list, projections)
>>> [l.value for l in ll]
['a', 'a_b', 'a_b_c', 'b_c', 'c']
"""
unmapped_combinations = find_missing_projections(label_list, projections)
if len(unmapped_combinations) > 0:
raise UnmappedLabelsException('Unmapped combinations: {}'.format(unmapped_combinations))
new_labels = []
for labeled_segment in label_list.ranges():
combination = tuple(sorted([label.value for label in labeled_segment[2]]))
label_mapping = projections[combination] if combination in projections else projections[WILDCARD_COMBINATION]
if label_mapping == '':
continue
new_labels.append(annotations.Label(label_mapping, labeled_segment[0], labeled_segment[1]))
return annotations.LabelList(idx=label_list.idx, labels=new_labels) |
def _find_server(account, servername=None):
""" Find and return a PlexServer object. """
servers = servers = [s for s in account.resources() if 'server' in s.provides]
# If servername specified find and return it
if servername is not None:
for server in servers:
if server.name == servername:
return server.connect()
raise SystemExit('Unknown server name: %s' % servername)
# If servername not specified; allow user to choose
return utils.choose('Choose a Server', servers, 'name').connect() | Find and return a PlexServer object. | Below is the the instruction that describes the task:
### Input:
Find and return a PlexServer object.
### Response:
def _find_server(account, servername=None):
""" Find and return a PlexServer object. """
servers = servers = [s for s in account.resources() if 'server' in s.provides]
# If servername specified find and return it
if servername is not None:
for server in servers:
if server.name == servername:
return server.connect()
raise SystemExit('Unknown server name: %s' % servername)
# If servername not specified; allow user to choose
return utils.choose('Choose a Server', servers, 'name').connect() |
def upload(self, remote_path, local_path, progress=None):
"""Uploads resource to remote path on WebDAV server.
In case resource is directory it will upload all nested files and directories.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PUT
:param remote_path: the path for uploading resources on WebDAV server. Can be file and directory.
:param local_path: the path to local resource for uploading.
:param progress: Progress function. Not supported now.
"""
if os.path.isdir(local_path):
self.upload_directory(local_path=local_path, remote_path=remote_path, progress=progress)
else:
self.upload_file(local_path=local_path, remote_path=remote_path) | Uploads resource to remote path on WebDAV server.
In case resource is directory it will upload all nested files and directories.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PUT
:param remote_path: the path for uploading resources on WebDAV server. Can be file and directory.
:param local_path: the path to local resource for uploading.
:param progress: Progress function. Not supported now. | Below is the the instruction that describes the task:
### Input:
Uploads resource to remote path on WebDAV server.
In case resource is directory it will upload all nested files and directories.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PUT
:param remote_path: the path for uploading resources on WebDAV server. Can be file and directory.
:param local_path: the path to local resource for uploading.
:param progress: Progress function. Not supported now.
### Response:
def upload(self, remote_path, local_path, progress=None):
"""Uploads resource to remote path on WebDAV server.
In case resource is directory it will upload all nested files and directories.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PUT
:param remote_path: the path for uploading resources on WebDAV server. Can be file and directory.
:param local_path: the path to local resource for uploading.
:param progress: Progress function. Not supported now.
"""
if os.path.isdir(local_path):
self.upload_directory(local_path=local_path, remote_path=remote_path, progress=progress)
else:
self.upload_file(local_path=local_path, remote_path=remote_path) |
def iter_contributor_statistics(self, number=-1, etag=None):
"""Iterate over the contributors list.
See also: http://developer.github.com/v3/repos/statistics/
:param int number: (optional), number of weeks to return. Default -1
will return all of the weeks.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`ContributorStats <github3.repos.stats.ContributorStats>`
.. note:: All statistics methods may return a 202. On those occasions,
you will not receive any objects. You should store your
iterator and check the new ``last_status`` attribute. If it
is a 202 you should wait before re-requesting.
.. versionadded:: 0.7
"""
url = self._build_url('stats', 'contributors', base_url=self._api)
return self._iter(int(number), url, ContributorStats, etag=etag) | Iterate over the contributors list.
See also: http://developer.github.com/v3/repos/statistics/
:param int number: (optional), number of weeks to return. Default -1
will return all of the weeks.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`ContributorStats <github3.repos.stats.ContributorStats>`
.. note:: All statistics methods may return a 202. On those occasions,
you will not receive any objects. You should store your
iterator and check the new ``last_status`` attribute. If it
is a 202 you should wait before re-requesting.
.. versionadded:: 0.7 | Below is the the instruction that describes the task:
### Input:
Iterate over the contributors list.
See also: http://developer.github.com/v3/repos/statistics/
:param int number: (optional), number of weeks to return. Default -1
will return all of the weeks.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`ContributorStats <github3.repos.stats.ContributorStats>`
.. note:: All statistics methods may return a 202. On those occasions,
you will not receive any objects. You should store your
iterator and check the new ``last_status`` attribute. If it
is a 202 you should wait before re-requesting.
.. versionadded:: 0.7
### Response:
def iter_contributor_statistics(self, number=-1, etag=None):
"""Iterate over the contributors list.
See also: http://developer.github.com/v3/repos/statistics/
:param int number: (optional), number of weeks to return. Default -1
will return all of the weeks.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`ContributorStats <github3.repos.stats.ContributorStats>`
.. note:: All statistics methods may return a 202. On those occasions,
you will not receive any objects. You should store your
iterator and check the new ``last_status`` attribute. If it
is a 202 you should wait before re-requesting.
.. versionadded:: 0.7
"""
url = self._build_url('stats', 'contributors', base_url=self._api)
return self._iter(int(number), url, ContributorStats, etag=etag) |
def _getH2singleTrait(self, K, verbose=None):
"""
Internal function for parameter initialization
estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise)
Args:
K: covariance matrix of the non-noise random effect term
"""
verbose = dlimix.getVerbose(verbose)
# Fit single trait model
varg = sp.zeros(self.P)
varn = sp.zeros(self.P)
fixed = sp.zeros((1,self.P))
for p in range(self.P):
y = self.Y[:,p:p+1]
# check if some sull value
I = sp.isnan(y[:,0])
if I.sum()>0:
y = y[~I,:]
_K = K[~I,:][:,~I]
else:
_K = copy.copy(K)
lmm = dlimix.CLMM()
lmm.setK(_K)
lmm.setSNPs(sp.ones((y.shape[0],1)))
lmm.setPheno(y)
lmm.setCovs(sp.zeros((y.shape[0],1)))
lmm.setVarcompApprox0(-20, 20, 1000)
lmm.process()
delta = sp.exp(lmm.getLdelta0()[0,0])
Vtot = sp.exp(lmm.getLSigma()[0,0])
varg[p] = Vtot
varn[p] = delta*Vtot
fixed[:,p] = lmm.getBetaSNP()
if verbose: print(p)
sth = {}
sth['varg'] = varg
sth['varn'] = varn
sth['fixed'] = fixed
return sth | Internal function for parameter initialization
estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise)
Args:
K: covariance matrix of the non-noise random effect term | Below is the the instruction that describes the task:
### Input:
Internal function for parameter initialization
estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise)
Args:
K: covariance matrix of the non-noise random effect term
### Response:
def _getH2singleTrait(self, K, verbose=None):
"""
Internal function for parameter initialization
estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise)
Args:
K: covariance matrix of the non-noise random effect term
"""
verbose = dlimix.getVerbose(verbose)
# Fit single trait model
varg = sp.zeros(self.P)
varn = sp.zeros(self.P)
fixed = sp.zeros((1,self.P))
for p in range(self.P):
y = self.Y[:,p:p+1]
# check if some sull value
I = sp.isnan(y[:,0])
if I.sum()>0:
y = y[~I,:]
_K = K[~I,:][:,~I]
else:
_K = copy.copy(K)
lmm = dlimix.CLMM()
lmm.setK(_K)
lmm.setSNPs(sp.ones((y.shape[0],1)))
lmm.setPheno(y)
lmm.setCovs(sp.zeros((y.shape[0],1)))
lmm.setVarcompApprox0(-20, 20, 1000)
lmm.process()
delta = sp.exp(lmm.getLdelta0()[0,0])
Vtot = sp.exp(lmm.getLSigma()[0,0])
varg[p] = Vtot
varn[p] = delta*Vtot
fixed[:,p] = lmm.getBetaSNP()
if verbose: print(p)
sth = {}
sth['varg'] = varg
sth['varn'] = varn
sth['fixed'] = fixed
return sth |
def transform(self, blocks, y=None):
"""
Computes the content to tag ratio per block, smooths the values, then
predicts content (1) or not-content (0) using a fit k-means cluster model.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`np.ndarray`: 2D array of shape (len(feature_mat), 1), where
values are either 0 or 1, corresponding to the kmeans prediction
of content (1) or not-content (0).
"""
preds = (self.kmeans.predict(make_weninger_features(blocks)) > 0).astype(int)
return np.reshape(preds, (-1, 1)) | Computes the content to tag ratio per block, smooths the values, then
predicts content (1) or not-content (0) using a fit k-means cluster model.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`np.ndarray`: 2D array of shape (len(feature_mat), 1), where
values are either 0 or 1, corresponding to the kmeans prediction
of content (1) or not-content (0). | Below is the the instruction that describes the task:
### Input:
Computes the content to tag ratio per block, smooths the values, then
predicts content (1) or not-content (0) using a fit k-means cluster model.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`np.ndarray`: 2D array of shape (len(feature_mat), 1), where
values are either 0 or 1, corresponding to the kmeans prediction
of content (1) or not-content (0).
### Response:
def transform(self, blocks, y=None):
"""
Computes the content to tag ratio per block, smooths the values, then
predicts content (1) or not-content (0) using a fit k-means cluster model.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`np.ndarray`: 2D array of shape (len(feature_mat), 1), where
values are either 0 or 1, corresponding to the kmeans prediction
of content (1) or not-content (0).
"""
preds = (self.kmeans.predict(make_weninger_features(blocks)) > 0).astype(int)
return np.reshape(preds, (-1, 1)) |
def job_exists(name=None):
'''
Check whether the job exists in configured Jenkins jobs.
:param name: The name of the job is check if it exists.
:return: True if job exists, False if job does not exist.
CLI Example:
.. code-block:: bash
salt '*' jenkins.job_exists jobname
'''
if not name:
raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect()
if server.job_exists(name):
return True
else:
return False | Check whether the job exists in configured Jenkins jobs.
:param name: The name of the job is check if it exists.
:return: True if job exists, False if job does not exist.
CLI Example:
.. code-block:: bash
salt '*' jenkins.job_exists jobname | Below is the the instruction that describes the task:
### Input:
Check whether the job exists in configured Jenkins jobs.
:param name: The name of the job is check if it exists.
:return: True if job exists, False if job does not exist.
CLI Example:
.. code-block:: bash
salt '*' jenkins.job_exists jobname
### Response:
def job_exists(name=None):
'''
Check whether the job exists in configured Jenkins jobs.
:param name: The name of the job is check if it exists.
:return: True if job exists, False if job does not exist.
CLI Example:
.. code-block:: bash
salt '*' jenkins.job_exists jobname
'''
if not name:
raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect()
if server.job_exists(name):
return True
else:
return False |
def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args) | Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool | Below is the the instruction that describes the task:
### Input:
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
### Response:
def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args) |
async def connect(url, *, apikey=None, insecure=False):
"""Connect to a remote MAAS instance with `apikey`.
Returns a new :class:`Profile` which has NOT been saved. To connect AND
save a new profile::
profile = connect(url, apikey=apikey)
profile = profile.replace(name="mad-hatter")
with profiles.ProfileStore.open() as config:
config.save(profile)
# Optionally, set it as the default.
config.default = profile.name
"""
url = api_url(url)
url = urlparse(url)
if url.username is not None:
raise ConnectError(
"Cannot provide user-name explicitly in URL (%r) when connecting; "
"use login instead." % url.username)
if url.password is not None:
raise ConnectError(
"Cannot provide password explicitly in URL (%r) when connecting; "
"use login instead." % url.username)
if apikey is None:
credentials = None # Anonymous access.
else:
credentials = Credentials.parse(apikey)
description = await fetch_api_description(url, insecure)
# Return a new (unsaved) profile.
return Profile(
name=url.netloc, url=url.geturl(), credentials=credentials,
description=description) | Connect to a remote MAAS instance with `apikey`.
Returns a new :class:`Profile` which has NOT been saved. To connect AND
save a new profile::
profile = connect(url, apikey=apikey)
profile = profile.replace(name="mad-hatter")
with profiles.ProfileStore.open() as config:
config.save(profile)
# Optionally, set it as the default.
config.default = profile.name | Below is the the instruction that describes the task:
### Input:
Connect to a remote MAAS instance with `apikey`.
Returns a new :class:`Profile` which has NOT been saved. To connect AND
save a new profile::
profile = connect(url, apikey=apikey)
profile = profile.replace(name="mad-hatter")
with profiles.ProfileStore.open() as config:
config.save(profile)
# Optionally, set it as the default.
config.default = profile.name
### Response:
async def connect(url, *, apikey=None, insecure=False):
"""Connect to a remote MAAS instance with `apikey`.
Returns a new :class:`Profile` which has NOT been saved. To connect AND
save a new profile::
profile = connect(url, apikey=apikey)
profile = profile.replace(name="mad-hatter")
with profiles.ProfileStore.open() as config:
config.save(profile)
# Optionally, set it as the default.
config.default = profile.name
"""
url = api_url(url)
url = urlparse(url)
if url.username is not None:
raise ConnectError(
"Cannot provide user-name explicitly in URL (%r) when connecting; "
"use login instead." % url.username)
if url.password is not None:
raise ConnectError(
"Cannot provide password explicitly in URL (%r) when connecting; "
"use login instead." % url.username)
if apikey is None:
credentials = None # Anonymous access.
else:
credentials = Credentials.parse(apikey)
description = await fetch_api_description(url, insecure)
# Return a new (unsaved) profile.
return Profile(
name=url.netloc, url=url.geturl(), credentials=credentials,
description=description) |
def _sensoryComputeInferenceMode(self, anchorInput):
"""
Infer the location from sensory input. Activate any cells with enough active
synapses to this sensory input. Deactivate all other cells.
@param anchorInput (numpy array)
A sensory input. This will often come from a feature-location pair layer.
"""
if len(anchorInput) == 0:
return
overlaps = self.connections.computeActivity(anchorInput,
self.connectedPermanence)
activeSegments = np.where(overlaps >= self.activationThreshold)[0]
sensorySupportedCells = np.unique(
self.connections.mapSegmentsToCells(activeSegments))
self.bumpPhases = self.cellPhases[:,sensorySupportedCells]
self._computeActiveCells()
self.activeSegments = activeSegments
self.sensoryAssociatedCells = sensorySupportedCells | Infer the location from sensory input. Activate any cells with enough active
synapses to this sensory input. Deactivate all other cells.
@param anchorInput (numpy array)
A sensory input. This will often come from a feature-location pair layer. | Below is the the instruction that describes the task:
### Input:
Infer the location from sensory input. Activate any cells with enough active
synapses to this sensory input. Deactivate all other cells.
@param anchorInput (numpy array)
A sensory input. This will often come from a feature-location pair layer.
### Response:
def _sensoryComputeInferenceMode(self, anchorInput):
"""
Infer the location from sensory input. Activate any cells with enough active
synapses to this sensory input. Deactivate all other cells.
@param anchorInput (numpy array)
A sensory input. This will often come from a feature-location pair layer.
"""
if len(anchorInput) == 0:
return
overlaps = self.connections.computeActivity(anchorInput,
self.connectedPermanence)
activeSegments = np.where(overlaps >= self.activationThreshold)[0]
sensorySupportedCells = np.unique(
self.connections.mapSegmentsToCells(activeSegments))
self.bumpPhases = self.cellPhases[:,sensorySupportedCells]
self._computeActiveCells()
self.activeSegments = activeSegments
self.sensoryAssociatedCells = sensorySupportedCells |
def __expand_cluster(self, index_point):
"""!
@brief Expands cluster from specified point in the input data space.
@param[in] index_point (list): Index of a point from the data.
@return (list) Return tuple of list of indexes that belong to the same cluster and list of points that are marked as noise: (cluster, noise), or None if nothing has been expanded.
"""
cluster = None
self.__visited[index_point] = True
neighbors = self.__neighbor_searcher(index_point)
if len(neighbors) >= self.__neighbors:
cluster = [index_point]
self.__belong[index_point] = True
for i in neighbors:
if self.__visited[i] is False:
self.__visited[i] = True
next_neighbors = self.__neighbor_searcher(i)
if len(next_neighbors) >= self.__neighbors:
neighbors += [k for k in next_neighbors if ( (k in neighbors) == False) and k != index_point]
if self.__belong[i] is False:
cluster.append(i)
self.__belong[i] = True
return cluster | !
@brief Expands cluster from specified point in the input data space.
@param[in] index_point (list): Index of a point from the data.
@return (list) Return tuple of list of indexes that belong to the same cluster and list of points that are marked as noise: (cluster, noise), or None if nothing has been expanded. | Below is the the instruction that describes the task:
### Input:
!
@brief Expands cluster from specified point in the input data space.
@param[in] index_point (list): Index of a point from the data.
@return (list) Return tuple of list of indexes that belong to the same cluster and list of points that are marked as noise: (cluster, noise), or None if nothing has been expanded.
### Response:
def __expand_cluster(self, index_point):
"""!
@brief Expands cluster from specified point in the input data space.
@param[in] index_point (list): Index of a point from the data.
@return (list) Return tuple of list of indexes that belong to the same cluster and list of points that are marked as noise: (cluster, noise), or None if nothing has been expanded.
"""
cluster = None
self.__visited[index_point] = True
neighbors = self.__neighbor_searcher(index_point)
if len(neighbors) >= self.__neighbors:
cluster = [index_point]
self.__belong[index_point] = True
for i in neighbors:
if self.__visited[i] is False:
self.__visited[i] = True
next_neighbors = self.__neighbor_searcher(i)
if len(next_neighbors) >= self.__neighbors:
neighbors += [k for k in next_neighbors if ( (k in neighbors) == False) and k != index_point]
if self.__belong[i] is False:
cluster.append(i)
self.__belong[i] = True
return cluster |
def create_table(self, name, schema):
"""
Create a new table.
If the table already exists, nothing happens.
Example:
>>> db.create_table("foo", (("id", "integer primary key"),
("value", "text")))
Arguments:
name (str): The name of the table to create.
schema (sequence of tuples): A list of (name, type) tuples
representing each of the columns.
"""
columns = [" ".join(column) for column in schema]
self.execute("CREATE TABLE IF NOT EXISTS {name} ({columns})"
.format(name=name, columns=",".join(columns))) | Create a new table.
If the table already exists, nothing happens.
Example:
>>> db.create_table("foo", (("id", "integer primary key"),
("value", "text")))
Arguments:
name (str): The name of the table to create.
schema (sequence of tuples): A list of (name, type) tuples
representing each of the columns. | Below is the the instruction that describes the task:
### Input:
Create a new table.
If the table already exists, nothing happens.
Example:
>>> db.create_table("foo", (("id", "integer primary key"),
("value", "text")))
Arguments:
name (str): The name of the table to create.
schema (sequence of tuples): A list of (name, type) tuples
representing each of the columns.
### Response:
def create_table(self, name, schema):
"""
Create a new table.
If the table already exists, nothing happens.
Example:
>>> db.create_table("foo", (("id", "integer primary key"),
("value", "text")))
Arguments:
name (str): The name of the table to create.
schema (sequence of tuples): A list of (name, type) tuples
representing each of the columns.
"""
columns = [" ".join(column) for column in schema]
self.execute("CREATE TABLE IF NOT EXISTS {name} ({columns})"
.format(name=name, columns=",".join(columns))) |
def update_association(self, association):
"""Add the GO parents of a gene's associated GO IDs to the gene's association."""
bad_goids = set()
# Loop through all sets of GO IDs for all genes
for goids in association.values():
parents = set()
# Iterate thru each GO ID in the current gene's association
for goid in goids:
try:
parents.update(self[goid].get_all_parents())
except:
bad_goids.add(goid.strip())
# Add the GO parents of all GO IDs in the current gene's association
goids.update(parents)
if bad_goids:
sys.stdout.write("{N} GO IDs in assc. are not found in the GO-DAG: {GOs}\n".format(
N=len(bad_goids), GOs=" ".join(bad_goids))) | Add the GO parents of a gene's associated GO IDs to the gene's association. | Below is the the instruction that describes the task:
### Input:
Add the GO parents of a gene's associated GO IDs to the gene's association.
### Response:
def update_association(self, association):
"""Add the GO parents of a gene's associated GO IDs to the gene's association."""
bad_goids = set()
# Loop through all sets of GO IDs for all genes
for goids in association.values():
parents = set()
# Iterate thru each GO ID in the current gene's association
for goid in goids:
try:
parents.update(self[goid].get_all_parents())
except:
bad_goids.add(goid.strip())
# Add the GO parents of all GO IDs in the current gene's association
goids.update(parents)
if bad_goids:
sys.stdout.write("{N} GO IDs in assc. are not found in the GO-DAG: {GOs}\n".format(
N=len(bad_goids), GOs=" ".join(bad_goids))) |
def parse_query(self, query_string):
"""
Given a `query_string`, will attempt to return a xapian.Query
Required arguments:
``query_string`` -- A query string to parse
Returns a xapian.Query
"""
if query_string == '*':
return xapian.Query('') # Match everything
elif query_string == '':
return xapian.Query() # Match nothing
qp = xapian.QueryParser()
qp.set_database(self._database())
qp.set_stemmer(xapian.Stem(self.language))
qp.set_stemming_strategy(self.stemming_strategy)
qp.set_default_op(XAPIAN_OPTS[DEFAULT_OPERATOR])
qp.add_boolean_prefix(DJANGO_CT, TERM_PREFIXES[DJANGO_CT])
for field_dict in self.schema:
# since 'django_ct' has a boolean_prefix,
# we ignore it here.
if field_dict['field_name'] == DJANGO_CT:
continue
qp.add_prefix(
field_dict['field_name'],
TERM_PREFIXES['field'] + field_dict['field_name'].upper()
)
vrp = XHValueRangeProcessor(self)
qp.add_valuerangeprocessor(vrp)
return qp.parse_query(query_string, self.flags) | Given a `query_string`, will attempt to return a xapian.Query
Required arguments:
``query_string`` -- A query string to parse
Returns a xapian.Query | Below is the the instruction that describes the task:
### Input:
Given a `query_string`, will attempt to return a xapian.Query
Required arguments:
``query_string`` -- A query string to parse
Returns a xapian.Query
### Response:
def parse_query(self, query_string):
"""
Given a `query_string`, will attempt to return a xapian.Query
Required arguments:
``query_string`` -- A query string to parse
Returns a xapian.Query
"""
if query_string == '*':
return xapian.Query('') # Match everything
elif query_string == '':
return xapian.Query() # Match nothing
qp = xapian.QueryParser()
qp.set_database(self._database())
qp.set_stemmer(xapian.Stem(self.language))
qp.set_stemming_strategy(self.stemming_strategy)
qp.set_default_op(XAPIAN_OPTS[DEFAULT_OPERATOR])
qp.add_boolean_prefix(DJANGO_CT, TERM_PREFIXES[DJANGO_CT])
for field_dict in self.schema:
# since 'django_ct' has a boolean_prefix,
# we ignore it here.
if field_dict['field_name'] == DJANGO_CT:
continue
qp.add_prefix(
field_dict['field_name'],
TERM_PREFIXES['field'] + field_dict['field_name'].upper()
)
vrp = XHValueRangeProcessor(self)
qp.add_valuerangeprocessor(vrp)
return qp.parse_query(query_string, self.flags) |
Subsets and Splits