code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def visitValueSetValue(self, ctx: ShExDocParser.ValueSetValueContext):
""" valueSetValue: iriRange | literalRange | languageRange |
'.' (iriExclusion+ | literalExclusion+ | languageExclusion+) """
if ctx.iriRange() or ctx.literalRange() or ctx.languageRange():
self.visitChildren(ctx)
else: # '.' branch - wild card with exclusions
if ctx.iriExclusion():
vs_value = IriStemRange(Wildcard(), [])
self._iri_exclusions(vs_value, ctx.iriExclusion())
elif ctx.literalExclusion():
vs_value = LiteralStemRange(Wildcard(), [])
self._literal_exclusions(vs_value, ctx.literalExclusion())
else:
vs_value = LanguageStemRange(Wildcard(), [])
self._language_exclusions(vs_value, ctx.languageExclusion())
self.nodeconstraint.values.append(vs_value) | valueSetValue: iriRange | literalRange | languageRange |
'.' (iriExclusion+ | literalExclusion+ | languageExclusion+) | Below is the the instruction that describes the task:
### Input:
valueSetValue: iriRange | literalRange | languageRange |
'.' (iriExclusion+ | literalExclusion+ | languageExclusion+)
### Response:
def visitValueSetValue(self, ctx: ShExDocParser.ValueSetValueContext):
""" valueSetValue: iriRange | literalRange | languageRange |
'.' (iriExclusion+ | literalExclusion+ | languageExclusion+) """
if ctx.iriRange() or ctx.literalRange() or ctx.languageRange():
self.visitChildren(ctx)
else: # '.' branch - wild card with exclusions
if ctx.iriExclusion():
vs_value = IriStemRange(Wildcard(), [])
self._iri_exclusions(vs_value, ctx.iriExclusion())
elif ctx.literalExclusion():
vs_value = LiteralStemRange(Wildcard(), [])
self._literal_exclusions(vs_value, ctx.literalExclusion())
else:
vs_value = LanguageStemRange(Wildcard(), [])
self._language_exclusions(vs_value, ctx.languageExclusion())
self.nodeconstraint.values.append(vs_value) |
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories | List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found | Below is the the instruction that describes the task:
### Input:
List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
### Response:
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories |
def get_alias(self):
"""
Gets the alias for the table or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:returns: The table alias, auto_alias, or None
:rtype: str or None
"""
alias = None
if self.alias:
alias = self.alias
elif self.auto_alias:
alias = self.auto_alias
return alias | Gets the alias for the table or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:returns: The table alias, auto_alias, or None
:rtype: str or None | Below is the the instruction that describes the task:
### Input:
Gets the alias for the table or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:returns: The table alias, auto_alias, or None
:rtype: str or None
### Response:
def get_alias(self):
"""
Gets the alias for the table or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:returns: The table alias, auto_alias, or None
:rtype: str or None
"""
alias = None
if self.alias:
alias = self.alias
elif self.auto_alias:
alias = self.auto_alias
return alias |
def check_array(array, *args, **kwargs):
"""Validate inputs
Parameters
----------
accept_dask_array : bool, default True
accept_dask_dataframe : bool, default False
accept_unknown_chunks : bool, default False
For dask Arrays, whether to allow the `.chunks` attribute to contain
any unknown values
accept_multiple_blocks : bool, default False
For dask Arrays, whether to allow multiple blocks along the second
axis.
*args, **kwargs : tuple, dict
Passed through to scikit-learn
Returns
-------
array : obj
Same type as the input
Notes
-----
For dask.array, a small numpy array emulating ``array`` is created
and passed to scikit-learn's ``check_array`` with all the additional
arguments.
"""
accept_dask_array = kwargs.pop("accept_dask_array", True)
preserve_pandas_dataframe = kwargs.pop("preserve_pandas_dataframe", False)
accept_dask_dataframe = kwargs.pop("accept_dask_dataframe", False)
accept_unknown_chunks = kwargs.pop("accept_unknown_chunks", False)
accept_multiple_blocks = kwargs.pop("accept_multiple_blocks", False)
if isinstance(array, da.Array):
if not accept_dask_array:
raise TypeError
if not accept_unknown_chunks:
if np.isnan(array.shape[0]):
raise TypeError(
"Cannot operate on Dask array with unknown chunk sizes."
)
if not accept_multiple_blocks and array.ndim > 1:
if len(array.chunks[1]) > 1:
msg = (
"Chunking is only allowed on the first axis. "
"Use 'array.rechunk({1: array.shape[1]})' to "
"rechunk to a single block along the second axis."
)
raise TypeError(msg)
# hmmm, we want to catch things like shape errors.
# I'd like to make a small sample somehow
shape = array.shape
if len(shape) == 2:
shape = (min(10, shape[0]), shape[1])
elif shape == 1:
shape = min(10, shape[0])
sample = np.ones(shape=shape, dtype=array.dtype)
sk_validation.check_array(sample, *args, **kwargs)
return array
elif isinstance(array, dd.DataFrame):
if not accept_dask_dataframe:
raise TypeError("This estimator does not support dask dataframes.")
# TODO: sample?
return array
elif isinstance(array, pd.DataFrame) and preserve_pandas_dataframe:
# TODO: validation?
return array
else:
return sk_validation.check_array(array, *args, **kwargs) | Validate inputs
Parameters
----------
accept_dask_array : bool, default True
accept_dask_dataframe : bool, default False
accept_unknown_chunks : bool, default False
For dask Arrays, whether to allow the `.chunks` attribute to contain
any unknown values
accept_multiple_blocks : bool, default False
For dask Arrays, whether to allow multiple blocks along the second
axis.
*args, **kwargs : tuple, dict
Passed through to scikit-learn
Returns
-------
array : obj
Same type as the input
Notes
-----
For dask.array, a small numpy array emulating ``array`` is created
and passed to scikit-learn's ``check_array`` with all the additional
arguments. | Below is the the instruction that describes the task:
### Input:
Validate inputs
Parameters
----------
accept_dask_array : bool, default True
accept_dask_dataframe : bool, default False
accept_unknown_chunks : bool, default False
For dask Arrays, whether to allow the `.chunks` attribute to contain
any unknown values
accept_multiple_blocks : bool, default False
For dask Arrays, whether to allow multiple blocks along the second
axis.
*args, **kwargs : tuple, dict
Passed through to scikit-learn
Returns
-------
array : obj
Same type as the input
Notes
-----
For dask.array, a small numpy array emulating ``array`` is created
and passed to scikit-learn's ``check_array`` with all the additional
arguments.
### Response:
def check_array(array, *args, **kwargs):
"""Validate inputs
Parameters
----------
accept_dask_array : bool, default True
accept_dask_dataframe : bool, default False
accept_unknown_chunks : bool, default False
For dask Arrays, whether to allow the `.chunks` attribute to contain
any unknown values
accept_multiple_blocks : bool, default False
For dask Arrays, whether to allow multiple blocks along the second
axis.
*args, **kwargs : tuple, dict
Passed through to scikit-learn
Returns
-------
array : obj
Same type as the input
Notes
-----
For dask.array, a small numpy array emulating ``array`` is created
and passed to scikit-learn's ``check_array`` with all the additional
arguments.
"""
accept_dask_array = kwargs.pop("accept_dask_array", True)
preserve_pandas_dataframe = kwargs.pop("preserve_pandas_dataframe", False)
accept_dask_dataframe = kwargs.pop("accept_dask_dataframe", False)
accept_unknown_chunks = kwargs.pop("accept_unknown_chunks", False)
accept_multiple_blocks = kwargs.pop("accept_multiple_blocks", False)
if isinstance(array, da.Array):
if not accept_dask_array:
raise TypeError
if not accept_unknown_chunks:
if np.isnan(array.shape[0]):
raise TypeError(
"Cannot operate on Dask array with unknown chunk sizes."
)
if not accept_multiple_blocks and array.ndim > 1:
if len(array.chunks[1]) > 1:
msg = (
"Chunking is only allowed on the first axis. "
"Use 'array.rechunk({1: array.shape[1]})' to "
"rechunk to a single block along the second axis."
)
raise TypeError(msg)
# hmmm, we want to catch things like shape errors.
# I'd like to make a small sample somehow
shape = array.shape
if len(shape) == 2:
shape = (min(10, shape[0]), shape[1])
elif shape == 1:
shape = min(10, shape[0])
sample = np.ones(shape=shape, dtype=array.dtype)
sk_validation.check_array(sample, *args, **kwargs)
return array
elif isinstance(array, dd.DataFrame):
if not accept_dask_dataframe:
raise TypeError("This estimator does not support dask dataframes.")
# TODO: sample?
return array
elif isinstance(array, pd.DataFrame) and preserve_pandas_dataframe:
# TODO: validation?
return array
else:
return sk_validation.check_array(array, *args, **kwargs) |
def get_icon(name, as_qicon=False):
"""Returns a `QPixmap` containing the given image, or a QIcon if `as_qicon`
is True"""
filename = name + ".png"
icon = icons.get(filename)
if not icon:
path = os.path.dirname(__file__)
path = os.path.join(path, "icons")
filepath = os.path.join(path, filename)
if not os.path.exists(filepath):
filepath = os.path.join(path, "pink.png")
icon = QtGui.QPixmap(filepath)
icons[filename] = icon
return QtGui.QIcon(icon) if as_qicon else icon | Returns a `QPixmap` containing the given image, or a QIcon if `as_qicon`
is True | Below is the the instruction that describes the task:
### Input:
Returns a `QPixmap` containing the given image, or a QIcon if `as_qicon`
is True
### Response:
def get_icon(name, as_qicon=False):
"""Returns a `QPixmap` containing the given image, or a QIcon if `as_qicon`
is True"""
filename = name + ".png"
icon = icons.get(filename)
if not icon:
path = os.path.dirname(__file__)
path = os.path.join(path, "icons")
filepath = os.path.join(path, filename)
if not os.path.exists(filepath):
filepath = os.path.join(path, "pink.png")
icon = QtGui.QPixmap(filepath)
icons[filename] = icon
return QtGui.QIcon(icon) if as_qicon else icon |
def export(self, name, columns, points):
"""Write the points to the Prometheus exporter using Gauge."""
logger.debug("Export {} stats to Prometheus exporter".format(name))
# Remove non number stats and convert all to float (for Boolean)
data = {k: float(v) for (k, v) in iteritems(dict(zip(columns, points))) if isinstance(v, Number)}
# Write metrics to the Prometheus exporter
for k, v in iteritems(data):
# Prometheus metric name: prefix_<glances stats name>
metric_name = self.prefix + self.METRIC_SEPARATOR + str(name) + self.METRIC_SEPARATOR + str(k)
# Prometheus is very sensible to the metric name
# See: https://prometheus.io/docs/practices/naming/
for c in ['.', '-', '/', ' ']:
metric_name = metric_name.replace(c, self.METRIC_SEPARATOR)
# Get the labels
labels = self.parse_tags(self.labels)
# Manage an internal dict between metric name and Gauge
if metric_name not in self._metric_dict:
self._metric_dict[metric_name] = Gauge(metric_name, k,
labelnames=listkeys(labels))
# Write the value
if hasattr(self._metric_dict[metric_name], 'labels'):
# Add the labels (see issue #1255)
self._metric_dict[metric_name].labels(**labels).set(v)
else:
self._metric_dict[metric_name].set(v) | Write the points to the Prometheus exporter using Gauge. | Below is the the instruction that describes the task:
### Input:
Write the points to the Prometheus exporter using Gauge.
### Response:
def export(self, name, columns, points):
"""Write the points to the Prometheus exporter using Gauge."""
logger.debug("Export {} stats to Prometheus exporter".format(name))
# Remove non number stats and convert all to float (for Boolean)
data = {k: float(v) for (k, v) in iteritems(dict(zip(columns, points))) if isinstance(v, Number)}
# Write metrics to the Prometheus exporter
for k, v in iteritems(data):
# Prometheus metric name: prefix_<glances stats name>
metric_name = self.prefix + self.METRIC_SEPARATOR + str(name) + self.METRIC_SEPARATOR + str(k)
# Prometheus is very sensible to the metric name
# See: https://prometheus.io/docs/practices/naming/
for c in ['.', '-', '/', ' ']:
metric_name = metric_name.replace(c, self.METRIC_SEPARATOR)
# Get the labels
labels = self.parse_tags(self.labels)
# Manage an internal dict between metric name and Gauge
if metric_name not in self._metric_dict:
self._metric_dict[metric_name] = Gauge(metric_name, k,
labelnames=listkeys(labels))
# Write the value
if hasattr(self._metric_dict[metric_name], 'labels'):
# Add the labels (see issue #1255)
self._metric_dict[metric_name].labels(**labels).set(v)
else:
self._metric_dict[metric_name].set(v) |
def dict_has_all_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has all of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains all keys in the input list.
See Also
--------
dict_has_any_keys
Examples
--------
>>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_all_keys(["is", "this"])
dtype: int
Rows: 2
[1, 0]
"""
if not _is_non_string_iterable(keys):
keys = [keys]
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_all_keys(keys)) | Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has all of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains all keys in the input list.
See Also
--------
dict_has_any_keys
Examples
--------
>>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_all_keys(["is", "this"])
dtype: int
Rows: 2
[1, 0] | Below is the the instruction that describes the task:
### Input:
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has all of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains all keys in the input list.
See Also
--------
dict_has_any_keys
Examples
--------
>>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_all_keys(["is", "this"])
dtype: int
Rows: 2
[1, 0]
### Response:
def dict_has_all_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has all of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains all keys in the input list.
See Also
--------
dict_has_any_keys
Examples
--------
>>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_all_keys(["is", "this"])
dtype: int
Rows: 2
[1, 0]
"""
if not _is_non_string_iterable(keys):
keys = [keys]
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_all_keys(keys)) |
def allVariantAnnotationSets(self):
"""
Return an iterator over all variant annotation sets
in the data repo
"""
for dataset in self.getDatasets():
for variantSet in dataset.getVariantSets():
for vaSet in variantSet.getVariantAnnotationSets():
yield vaSet | Return an iterator over all variant annotation sets
in the data repo | Below is the the instruction that describes the task:
### Input:
Return an iterator over all variant annotation sets
in the data repo
### Response:
def allVariantAnnotationSets(self):
"""
Return an iterator over all variant annotation sets
in the data repo
"""
for dataset in self.getDatasets():
for variantSet in dataset.getVariantSets():
for vaSet in variantSet.getVariantAnnotationSets():
yield vaSet |
def lv_load_areas(self):
""" #TODO: description
"""
for lv_load_area in self._grid._graph.nodes():
if isinstance(lv_load_area, LVLoadAreaDing0):
if lv_load_area.ring == self:
yield lv_load_area | #TODO: description | Below is the the instruction that describes the task:
### Input:
#TODO: description
### Response:
def lv_load_areas(self):
""" #TODO: description
"""
for lv_load_area in self._grid._graph.nodes():
if isinstance(lv_load_area, LVLoadAreaDing0):
if lv_load_area.ring == self:
yield lv_load_area |
def number(items):
"""Maps numbering onto given values"""
n = len(items)
if n == 0:
return items
places = str(int(math.log10(n) // 1 + 1))
format = '[{0[0]:' + str(int(places)) + 'd}] {0[1]}'
return map(
lambda x: format.format(x),
enumerate(items)
) | Maps numbering onto given values | Below is the the instruction that describes the task:
### Input:
Maps numbering onto given values
### Response:
def number(items):
"""Maps numbering onto given values"""
n = len(items)
if n == 0:
return items
places = str(int(math.log10(n) // 1 + 1))
format = '[{0[0]:' + str(int(places)) + 'd}] {0[1]}'
return map(
lambda x: format.format(x),
enumerate(items)
) |
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url) | Gets full meta data (expect their annotations and relationships) from is ILX ID | Below is the the instruction that describes the task:
### Input:
Gets full meta data (expect their annotations and relationships) from is ILX ID
### Response:
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url) |
def nfa_dot_importer(input_file: str) -> dict:
""" Imports a NFA from a DOT file.
Of .dot files are recognized the following attributes
• nodeX shape=doublecircle -> accepting node;
• nodeX root=true -> initial node;
• edgeX label="a" -> action in alphabet;
• fakeX style=invisible -> dummy invisible nodes pointing
to initial state (it will be skipped);
• fakeX->S [style=bold] -> dummy transitions to draw arrows
pointing to initial states (they will be skipped).
All invisible nodes are skipped.
Forbidden names:
• 'fake' used for graphical purpose to drawn the arrow of
the initial state
• 'sink' used as additional state when completing a NFA
Forbidden characters:
• "
• '
• (
• )
• spaces
:param str input_file: Path to input DOT file;
:return: *(dict)* representing a NFA.
"""
# pyDot Object
g = pydot.graph_from_dot_file(input_file)[0]
states = set()
initial_states = set()
accepting_states = set()
replacements = {'"': '', "'": '', '(': '', ')': '', ' ': ''}
for node in g.get_nodes():
attributes = node.get_attributes()
if node.get_name() == 'fake' \
or node.get_name() == 'None' \
or node.get_name() == 'graph' \
or node.get_name() == 'node':
continue
if 'style' in attributes \
and attributes['style'] == 'invisible':
continue
node_reference = __replace_all(replacements,
node.get_name()).split(',')
if len(node_reference) > 1:
node_reference = tuple(node_reference)
else:
node_reference = node_reference[0]
states.add(node_reference)
for attribute in attributes:
if attribute == 'root':
initial_states.add(node_reference)
if attribute == 'shape' \
and attributes['shape'] == 'doublecircle':
accepting_states.add(node_reference)
alphabet = set()
transitions = {}
for edge in g.get_edges():
source = __replace_all(replacements,
edge.get_source()).split(',')
if len(source) > 1:
source = tuple(source)
else:
source = source[0]
destination = __replace_all(replacements,
edge.get_destination()).split(',')
if len(destination) > 1:
destination = tuple(destination)
else:
destination = destination[0]
if source not in states or destination not in states:
continue
label = __replace_all(replacements, edge.get_label())
alphabet.add(label)
transitions.setdefault((source, label), set()).add(
destination)
nfa = {
'alphabet': alphabet,
'states': states,
'initial_states': initial_states,
'accepting_states': accepting_states,
'transitions': transitions
}
return nfa | Imports a NFA from a DOT file.
Of .dot files are recognized the following attributes
• nodeX shape=doublecircle -> accepting node;
• nodeX root=true -> initial node;
• edgeX label="a" -> action in alphabet;
• fakeX style=invisible -> dummy invisible nodes pointing
to initial state (it will be skipped);
• fakeX->S [style=bold] -> dummy transitions to draw arrows
pointing to initial states (they will be skipped).
All invisible nodes are skipped.
Forbidden names:
• 'fake' used for graphical purpose to drawn the arrow of
the initial state
• 'sink' used as additional state when completing a NFA
Forbidden characters:
• "
• '
• (
• )
• spaces
:param str input_file: Path to input DOT file;
:return: *(dict)* representing a NFA. | Below is the the instruction that describes the task:
### Input:
Imports a NFA from a DOT file.
Of .dot files are recognized the following attributes
• nodeX shape=doublecircle -> accepting node;
• nodeX root=true -> initial node;
• edgeX label="a" -> action in alphabet;
• fakeX style=invisible -> dummy invisible nodes pointing
to initial state (it will be skipped);
• fakeX->S [style=bold] -> dummy transitions to draw arrows
pointing to initial states (they will be skipped).
All invisible nodes are skipped.
Forbidden names:
• 'fake' used for graphical purpose to drawn the arrow of
the initial state
• 'sink' used as additional state when completing a NFA
Forbidden characters:
• "
• '
• (
• )
• spaces
:param str input_file: Path to input DOT file;
:return: *(dict)* representing a NFA.
### Response:
def nfa_dot_importer(input_file: str) -> dict:
""" Imports a NFA from a DOT file.
Of .dot files are recognized the following attributes
• nodeX shape=doublecircle -> accepting node;
• nodeX root=true -> initial node;
• edgeX label="a" -> action in alphabet;
• fakeX style=invisible -> dummy invisible nodes pointing
to initial state (it will be skipped);
• fakeX->S [style=bold] -> dummy transitions to draw arrows
pointing to initial states (they will be skipped).
All invisible nodes are skipped.
Forbidden names:
• 'fake' used for graphical purpose to drawn the arrow of
the initial state
• 'sink' used as additional state when completing a NFA
Forbidden characters:
• "
• '
• (
• )
• spaces
:param str input_file: Path to input DOT file;
:return: *(dict)* representing a NFA.
"""
# pyDot Object
g = pydot.graph_from_dot_file(input_file)[0]
states = set()
initial_states = set()
accepting_states = set()
replacements = {'"': '', "'": '', '(': '', ')': '', ' ': ''}
for node in g.get_nodes():
attributes = node.get_attributes()
if node.get_name() == 'fake' \
or node.get_name() == 'None' \
or node.get_name() == 'graph' \
or node.get_name() == 'node':
continue
if 'style' in attributes \
and attributes['style'] == 'invisible':
continue
node_reference = __replace_all(replacements,
node.get_name()).split(',')
if len(node_reference) > 1:
node_reference = tuple(node_reference)
else:
node_reference = node_reference[0]
states.add(node_reference)
for attribute in attributes:
if attribute == 'root':
initial_states.add(node_reference)
if attribute == 'shape' \
and attributes['shape'] == 'doublecircle':
accepting_states.add(node_reference)
alphabet = set()
transitions = {}
for edge in g.get_edges():
source = __replace_all(replacements,
edge.get_source()).split(',')
if len(source) > 1:
source = tuple(source)
else:
source = source[0]
destination = __replace_all(replacements,
edge.get_destination()).split(',')
if len(destination) > 1:
destination = tuple(destination)
else:
destination = destination[0]
if source not in states or destination not in states:
continue
label = __replace_all(replacements, edge.get_label())
alphabet.add(label)
transitions.setdefault((source, label), set()).add(
destination)
nfa = {
'alphabet': alphabet,
'states': states,
'initial_states': initial_states,
'accepting_states': accepting_states,
'transitions': transitions
}
return nfa |
def result(self, result):
"""
Query result post processing.
@param result: A query result.
@type result: L{sxbase.SchemaObject}
"""
if result is None:
log.debug('%s, not-found', self.ref)
return
if self.resolved:
result = result.resolve()
log.debug('%s, found as: %s', self.ref, Repr(result))
self.history.append(result)
return result | Query result post processing.
@param result: A query result.
@type result: L{sxbase.SchemaObject} | Below is the the instruction that describes the task:
### Input:
Query result post processing.
@param result: A query result.
@type result: L{sxbase.SchemaObject}
### Response:
def result(self, result):
"""
Query result post processing.
@param result: A query result.
@type result: L{sxbase.SchemaObject}
"""
if result is None:
log.debug('%s, not-found', self.ref)
return
if self.resolved:
result = result.resolve()
log.debug('%s, found as: %s', self.ref, Repr(result))
self.history.append(result)
return result |
def schedule_to_proto_dicts(schedule: Schedule) -> Iterable[Dict]:
"""Convert a schedule into an iterable of proto dictionaries.
Args:
schedule: The schedule to convert to a proto dict. Must contain only
gates that can be cast to xmon gates.
Yields:
A proto dictionary corresponding to an Operation proto.
"""
last_time_picos = None # type: Optional[int]
for so in schedule.scheduled_operations:
op = gate_to_proto_dict(
cast(ops.GateOperation, so.operation).gate,
so.operation.qubits)
time_picos = so.time.raw_picos()
if last_time_picos is None:
op['incremental_delay_picoseconds'] = time_picos
else:
op['incremental_delay_picoseconds'] = time_picos - last_time_picos
last_time_picos = time_picos
yield op | Convert a schedule into an iterable of proto dictionaries.
Args:
schedule: The schedule to convert to a proto dict. Must contain only
gates that can be cast to xmon gates.
Yields:
A proto dictionary corresponding to an Operation proto. | Below is the the instruction that describes the task:
### Input:
Convert a schedule into an iterable of proto dictionaries.
Args:
schedule: The schedule to convert to a proto dict. Must contain only
gates that can be cast to xmon gates.
Yields:
A proto dictionary corresponding to an Operation proto.
### Response:
def schedule_to_proto_dicts(schedule: Schedule) -> Iterable[Dict]:
"""Convert a schedule into an iterable of proto dictionaries.
Args:
schedule: The schedule to convert to a proto dict. Must contain only
gates that can be cast to xmon gates.
Yields:
A proto dictionary corresponding to an Operation proto.
"""
last_time_picos = None # type: Optional[int]
for so in schedule.scheduled_operations:
op = gate_to_proto_dict(
cast(ops.GateOperation, so.operation).gate,
so.operation.qubits)
time_picos = so.time.raw_picos()
if last_time_picos is None:
op['incremental_delay_picoseconds'] = time_picos
else:
op['incremental_delay_picoseconds'] = time_picos - last_time_picos
last_time_picos = time_picos
yield op |
def from_string(string):
"""
Construct an AdfKey object from the string.
Parameters
----------
string : str
A string.
Returns
-------
adfkey : AdfKey
An AdfKey object recovered from the string.
Raises
------
ValueError
Currently nested subkeys are not supported. If ``subend`` was found
a ValueError would be raised.
Notes
-----
Only the first block key will be returned.
"""
def is_float(s):
if '.' in s or 'E' in s or 'e' in s:
return True
else:
return False
if string.find("\n") == -1:
el = string.split()
if len(el) > 1:
if string.find("=") != -1:
options = list(map(lambda s: s.split("="), el[1:]))
else:
options = el[1:]
for i, op in enumerate(options):
if isinstance(op, list) and is_numeric(op[1]):
op[1] = float(op[1]) if is_float(op[1]) else int(op[1])
elif is_numeric(op):
options[i] = float(op) if is_float(op) else int(op)
else:
options = None
return AdfKey(el[0], options)
if string.find('subend') != -1:
raise ValueError("Nested subkeys are not supported!")
key = None
for line in iterlines(string):
if line == "":
continue
el = line.strip().split()
if len(el) == 0:
continue
if el[0].upper() in AdfKey.block_keys:
if key is None:
key = AdfKey.from_string(line)
else:
return key
elif el[0].upper() == 'END':
return key
elif key is not None:
key.add_subkey(AdfKey.from_string(line))
else:
raise Exception("IncompleteKey: 'END' is missing!") | Construct an AdfKey object from the string.
Parameters
----------
string : str
A string.
Returns
-------
adfkey : AdfKey
An AdfKey object recovered from the string.
Raises
------
ValueError
Currently nested subkeys are not supported. If ``subend`` was found
a ValueError would be raised.
Notes
-----
Only the first block key will be returned. | Below is the the instruction that describes the task:
### Input:
Construct an AdfKey object from the string.
Parameters
----------
string : str
A string.
Returns
-------
adfkey : AdfKey
An AdfKey object recovered from the string.
Raises
------
ValueError
Currently nested subkeys are not supported. If ``subend`` was found
a ValueError would be raised.
Notes
-----
Only the first block key will be returned.
### Response:
def from_string(string):
"""
Construct an AdfKey object from the string.
Parameters
----------
string : str
A string.
Returns
-------
adfkey : AdfKey
An AdfKey object recovered from the string.
Raises
------
ValueError
Currently nested subkeys are not supported. If ``subend`` was found
a ValueError would be raised.
Notes
-----
Only the first block key will be returned.
"""
def is_float(s):
if '.' in s or 'E' in s or 'e' in s:
return True
else:
return False
if string.find("\n") == -1:
el = string.split()
if len(el) > 1:
if string.find("=") != -1:
options = list(map(lambda s: s.split("="), el[1:]))
else:
options = el[1:]
for i, op in enumerate(options):
if isinstance(op, list) and is_numeric(op[1]):
op[1] = float(op[1]) if is_float(op[1]) else int(op[1])
elif is_numeric(op):
options[i] = float(op) if is_float(op) else int(op)
else:
options = None
return AdfKey(el[0], options)
if string.find('subend') != -1:
raise ValueError("Nested subkeys are not supported!")
key = None
for line in iterlines(string):
if line == "":
continue
el = line.strip().split()
if len(el) == 0:
continue
if el[0].upper() in AdfKey.block_keys:
if key is None:
key = AdfKey.from_string(line)
else:
return key
elif el[0].upper() == 'END':
return key
elif key is not None:
key.add_subkey(AdfKey.from_string(line))
else:
raise Exception("IncompleteKey: 'END' is missing!") |
def inverted(self):
"""Return the inverse of the transform."""
# This is a bit of hackery so that we can put a single "inverse"
# function here. If we just made "self._inverse_type" point to the class
# in question, it wouldn't be defined yet. This way, it's done at
# at runtime and we avoid the definition problem. Hackish, but better
# than repeating code everywhere or making a relatively complex
# metaclass.
inverse_type = globals()[self._inverse_type]
return inverse_type(self._center_longitude, self._center_latitude,
self._resolution) | Return the inverse of the transform. | Below is the the instruction that describes the task:
### Input:
Return the inverse of the transform.
### Response:
def inverted(self):
"""Return the inverse of the transform."""
# This is a bit of hackery so that we can put a single "inverse"
# function here. If we just made "self._inverse_type" point to the class
# in question, it wouldn't be defined yet. This way, it's done at
# at runtime and we avoid the definition problem. Hackish, but better
# than repeating code everywhere or making a relatively complex
# metaclass.
inverse_type = globals()[self._inverse_type]
return inverse_type(self._center_longitude, self._center_latitude,
self._resolution) |
def call_fan(tstat):
"""
Toggles the fan
"""
old_fan = tstat.fan
tstat.write({
'fan': not old_fan,
})
def restore():
tstat.write({
'fan': old_fan,
})
return restore | Toggles the fan | Below is the the instruction that describes the task:
### Input:
Toggles the fan
### Response:
def call_fan(tstat):
"""
Toggles the fan
"""
old_fan = tstat.fan
tstat.write({
'fan': not old_fan,
})
def restore():
tstat.write({
'fan': old_fan,
})
return restore |
def get_field_type(field):
"""
Returns field type/possible values.
"""
if isinstance(field, core_filters.MappedMultipleChoiceFilter):
return ' | '.join(['"%s"' % f for f in sorted(field.mapped_to_model)])
if isinstance(field, OrderingFilter) or isinstance(field, ChoiceFilter):
return ' | '.join(['"%s"' % f[0] for f in field.extra['choices']])
if isinstance(field, ChoiceField):
return ' | '.join(['"%s"' % f for f in sorted(field.choices)])
if isinstance(field, HyperlinkedRelatedField):
if field.view_name.endswith('detail'):
return 'link to %s' % reverse(field.view_name,
kwargs={'%s' % field.lookup_field: "'%s'" % field.lookup_field})
return reverse(field.view_name)
if isinstance(field, structure_filters.ServiceTypeFilter):
return ' | '.join(['"%s"' % f for f in SupportedServices.get_filter_mapping().keys()])
if isinstance(field, ResourceTypeFilter):
return ' | '.join(['"%s"' % f for f in SupportedServices.get_resource_models().keys()])
if isinstance(field, core_serializers.GenericRelatedField):
links = []
for model in field.related_models:
detail_view_name = core_utils.get_detail_view_name(model)
for f in field.lookup_fields:
try:
link = reverse(detail_view_name, kwargs={'%s' % f: "'%s'" % f})
except NoReverseMatch:
pass
else:
links.append(link)
break
path = ', '.join(links)
if path:
return 'link to any: %s' % path
if isinstance(field, core_filters.ContentTypeFilter):
return "string in form 'app_label'.'model_name'"
if isinstance(field, ModelMultipleChoiceFilter):
return get_field_type(field.field)
if isinstance(field, ListSerializer):
return 'list of [%s]' % get_field_type(field.child)
if isinstance(field, ManyRelatedField):
return 'list of [%s]' % get_field_type(field.child_relation)
if isinstance(field, ModelField):
return get_field_type(field.model_field)
name = field.__class__.__name__
for w in ('Filter', 'Field', 'Serializer'):
name = name.replace(w, '')
return FIELDS.get(name, name) | Returns field type/possible values. | Below is the the instruction that describes the task:
### Input:
Returns field type/possible values.
### Response:
def get_field_type(field):
"""
Returns field type/possible values.
"""
if isinstance(field, core_filters.MappedMultipleChoiceFilter):
return ' | '.join(['"%s"' % f for f in sorted(field.mapped_to_model)])
if isinstance(field, OrderingFilter) or isinstance(field, ChoiceFilter):
return ' | '.join(['"%s"' % f[0] for f in field.extra['choices']])
if isinstance(field, ChoiceField):
return ' | '.join(['"%s"' % f for f in sorted(field.choices)])
if isinstance(field, HyperlinkedRelatedField):
if field.view_name.endswith('detail'):
return 'link to %s' % reverse(field.view_name,
kwargs={'%s' % field.lookup_field: "'%s'" % field.lookup_field})
return reverse(field.view_name)
if isinstance(field, structure_filters.ServiceTypeFilter):
return ' | '.join(['"%s"' % f for f in SupportedServices.get_filter_mapping().keys()])
if isinstance(field, ResourceTypeFilter):
return ' | '.join(['"%s"' % f for f in SupportedServices.get_resource_models().keys()])
if isinstance(field, core_serializers.GenericRelatedField):
links = []
for model in field.related_models:
detail_view_name = core_utils.get_detail_view_name(model)
for f in field.lookup_fields:
try:
link = reverse(detail_view_name, kwargs={'%s' % f: "'%s'" % f})
except NoReverseMatch:
pass
else:
links.append(link)
break
path = ', '.join(links)
if path:
return 'link to any: %s' % path
if isinstance(field, core_filters.ContentTypeFilter):
return "string in form 'app_label'.'model_name'"
if isinstance(field, ModelMultipleChoiceFilter):
return get_field_type(field.field)
if isinstance(field, ListSerializer):
return 'list of [%s]' % get_field_type(field.child)
if isinstance(field, ManyRelatedField):
return 'list of [%s]' % get_field_type(field.child_relation)
if isinstance(field, ModelField):
return get_field_type(field.model_field)
name = field.__class__.__name__
for w in ('Filter', 'Field', 'Serializer'):
name = name.replace(w, '')
return FIELDS.get(name, name) |
def kl_divergence(self, logits_q, logits_p):
"""
Categorical distribution KL divergence calculation
KL(Q || P) = sum Q_i log (Q_i / P_i)
When talking about logits this is:
sum exp(Q_i) * (Q_i - P_i)
"""
return (torch.exp(logits_q) * (logits_q - logits_p)).sum(1, keepdim=True) | Categorical distribution KL divergence calculation
KL(Q || P) = sum Q_i log (Q_i / P_i)
When talking about logits this is:
sum exp(Q_i) * (Q_i - P_i) | Below is the the instruction that describes the task:
### Input:
Categorical distribution KL divergence calculation
KL(Q || P) = sum Q_i log (Q_i / P_i)
When talking about logits this is:
sum exp(Q_i) * (Q_i - P_i)
### Response:
def kl_divergence(self, logits_q, logits_p):
"""
Categorical distribution KL divergence calculation
KL(Q || P) = sum Q_i log (Q_i / P_i)
When talking about logits this is:
sum exp(Q_i) * (Q_i - P_i)
"""
return (torch.exp(logits_q) * (logits_q - logits_p)).sum(1, keepdim=True) |
def delete_files_and_sync_sources(self, owner, id, name, **kwargs):
"""
Delete files
Delete one or more files from a dataset by their name, including files added via URL. **Batching** Note that the `name` parameter can be include multiple times in the query string, once for each file that is to be deleted together in a single request.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_files_and_sync_sources(owner, id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:param list[str] name: Names of files to be deleted. Multiple can be provided in a single request by repeating the query string parameter name as many times as necessary. (required)
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_files_and_sync_sources_with_http_info(owner, id, name, **kwargs)
else:
(data) = self.delete_files_and_sync_sources_with_http_info(owner, id, name, **kwargs)
return data | Delete files
Delete one or more files from a dataset by their name, including files added via URL. **Batching** Note that the `name` parameter can be include multiple times in the query string, once for each file that is to be deleted together in a single request.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_files_and_sync_sources(owner, id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:param list[str] name: Names of files to be deleted. Multiple can be provided in a single request by repeating the query string parameter name as many times as necessary. (required)
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Delete files
Delete one or more files from a dataset by their name, including files added via URL. **Batching** Note that the `name` parameter can be include multiple times in the query string, once for each file that is to be deleted together in a single request.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_files_and_sync_sources(owner, id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:param list[str] name: Names of files to be deleted. Multiple can be provided in a single request by repeating the query string parameter name as many times as necessary. (required)
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_files_and_sync_sources(self, owner, id, name, **kwargs):
"""
Delete files
Delete one or more files from a dataset by their name, including files added via URL. **Batching** Note that the `name` parameter can be include multiple times in the query string, once for each file that is to be deleted together in a single request.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_files_and_sync_sources(owner, id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:param list[str] name: Names of files to be deleted. Multiple can be provided in a single request by repeating the query string parameter name as many times as necessary. (required)
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_files_and_sync_sources_with_http_info(owner, id, name, **kwargs)
else:
(data) = self.delete_files_and_sync_sources_with_http_info(owner, id, name, **kwargs)
return data |
def get_form_context(self, obj, ns=None):
"""Return a dict: form instance, action button, submit url...
Used by macro m_tags_form(entity)
"""
return {
"url": url_for("entity_tags.edit", object_id=obj.id),
"form": self.entity_tags_form(obj)(obj=obj, ns=ns),
"buttons": [EDIT_BUTTON],
} | Return a dict: form instance, action button, submit url...
Used by macro m_tags_form(entity) | Below is the the instruction that describes the task:
### Input:
Return a dict: form instance, action button, submit url...
Used by macro m_tags_form(entity)
### Response:
def get_form_context(self, obj, ns=None):
"""Return a dict: form instance, action button, submit url...
Used by macro m_tags_form(entity)
"""
return {
"url": url_for("entity_tags.edit", object_id=obj.id),
"form": self.entity_tags_form(obj)(obj=obj, ns=ns),
"buttons": [EDIT_BUTTON],
} |
def _drag_col(self, event):
"""Continue dragging a column"""
x = self._dx + event.x # get dragged column new left x coordinate
self._visual_drag.place_configure(x=x) # update column preview position
# if one border of the dragged column is beyon the middle of the
# neighboring column, swap them
if (self._dragged_col_neighbor_widths[0] is not None and
x < self._dragged_col_x - self._dragged_col_neighbor_widths[0] / 2):
self._swap_columns('left')
elif (self._dragged_col_neighbor_widths[1] is not None and
x > self._dragged_col_x + self._dragged_col_neighbor_widths[1] / 2):
self._swap_columns('right')
# horizontal scrolling if the cursor reaches the side of the table
if x < 0 and self.xview()[0] > 0:
# scroll left and update dragged column x coordinate
self.xview_scroll(-10, 'units')
self._dragged_col_x += 10
elif x + self._dragged_col_width / 2 > self.winfo_width() and self.xview()[1] < 1:
# scroll right and update dragged column x coordinate
self.xview_scroll(10, 'units')
self._dragged_col_x -= 10 | Continue dragging a column | Below is the the instruction that describes the task:
### Input:
Continue dragging a column
### Response:
def _drag_col(self, event):
"""Continue dragging a column"""
x = self._dx + event.x # get dragged column new left x coordinate
self._visual_drag.place_configure(x=x) # update column preview position
# if one border of the dragged column is beyon the middle of the
# neighboring column, swap them
if (self._dragged_col_neighbor_widths[0] is not None and
x < self._dragged_col_x - self._dragged_col_neighbor_widths[0] / 2):
self._swap_columns('left')
elif (self._dragged_col_neighbor_widths[1] is not None and
x > self._dragged_col_x + self._dragged_col_neighbor_widths[1] / 2):
self._swap_columns('right')
# horizontal scrolling if the cursor reaches the side of the table
if x < 0 and self.xview()[0] > 0:
# scroll left and update dragged column x coordinate
self.xview_scroll(-10, 'units')
self._dragged_col_x += 10
elif x + self._dragged_col_width / 2 > self.winfo_width() and self.xview()[1] < 1:
# scroll right and update dragged column x coordinate
self.xview_scroll(10, 'units')
self._dragged_col_x -= 10 |
def _generate_cfgnode(self, cfg_job, current_function_addr):
"""
Generate a CFGNode that starts at `cfg_job.addr`.
Since lifting machine code to IRSBs is slow, self._nodes is used as a cache of CFGNodes.
If the current architecture is ARM, this method will try to lift the block in the mode specified by the address
(determined by the parity of the address: even for ARM, odd for THUMB), and in case of decoding failures, try
the other mode. If the basic block is successfully decoded in the other mode (different from the initial one),
`addr` and `current_function_addr` are updated.
:param CFGJob cfg_job: The CFGJob instance.
:param int current_function_addr: Address of the current function.
:return: A 4-tuple of (new address, new function address, CFGNode instance, IRSB object)
:rtype: tuple
"""
addr = cfg_job.addr
try:
if addr in self._nodes:
cfg_node = self._nodes[addr]
irsb = cfg_node.irsb
if cfg_node.function_address != current_function_addr:
# the node has been assigned to another function before.
# we should update the function address.
current_function_addr = cfg_node.function_address
return addr, current_function_addr, cfg_node, irsb
is_x86_x64_arch = self.project.arch.name in ('X86', 'AMD64')
if is_arm_arch(self.project.arch):
real_addr = addr & (~1)
else:
real_addr = addr
# if possible, check the distance between `addr` and the end of this section
distance = VEX_IRSB_MAX_SIZE
obj = self.project.loader.find_object_containing(addr, membership_check=False)
if obj:
# is there a section?
has_executable_section = len([ sec for sec in obj.sections if sec.is_executable ]) > 0 # pylint:disable=len-as-condition
section = self.project.loader.find_section_containing(addr)
if has_executable_section and section is None:
# the basic block should not exist here...
return None, None, None, None
if section is not None:
if not section.is_executable:
# the section is not executable...
return None, None, None, None
distance = section.vaddr + section.memsize - real_addr
distance = min(distance, VEX_IRSB_MAX_SIZE)
# TODO: handle segment information as well
# also check the distance between `addr` and the closest function.
# we don't want to have a basic block that spans across function boundaries
next_func = self.functions.ceiling_func(addr + 1)
if next_func is not None:
distance_to_func = (next_func.addr & (~1) if is_arm_arch(self.project.arch) else next_func.addr) - real_addr
if distance_to_func != 0:
if distance is None:
distance = distance_to_func
else:
distance = min(distance, distance_to_func)
# in the end, check the distance between `addr` and the closest occupied region in segment list
next_noncode_addr = self._seg_list.next_pos_with_sort_not_in(addr, { "code" }, max_distance=distance)
if next_noncode_addr is not None:
distance_to_noncode_addr = next_noncode_addr - addr
distance = min(distance, distance_to_noncode_addr)
# Let's try to create the pyvex IRSB directly, since it's much faster
nodecode = False
irsb = None
irsb_string = None
try:
lifted_block = self._lift(addr, size=distance, opt_level=self._iropt_level, collect_data_refs=True)
irsb = lifted_block.vex_nostmt
irsb_string = lifted_block.bytes[:irsb.size]
except SimTranslationError:
nodecode = True
if (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode') and \
is_arm_arch(self.project.arch) and \
self._arch_options.switch_mode_on_nodecode:
# maybe the current mode is wrong?
nodecode = False
if addr % 2 == 0:
addr_0 = addr + 1
else:
addr_0 = addr - 1
if addr_0 in self._nodes:
# it has been analyzed before
cfg_node = self._nodes[addr_0]
irsb = cfg_node.irsb
return addr_0, cfg_node.function_address, cfg_node, irsb
try:
lifted_block = self._lift(addr_0, size=distance, opt_level=self._iropt_level,
collect_data_refs=True)
irsb = lifted_block.vex_nostmt
irsb_string = lifted_block.bytes[:irsb.size]
except SimTranslationError:
nodecode = True
if not (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode'):
# it is decodeable
if current_function_addr == addr:
current_function_addr = addr_0
addr = addr_0
if nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode':
# decoding error
# we still occupy that location since it cannot be decoded anyways
if irsb is None:
irsb_size = 0
else:
irsb_size = irsb.size
# special handling for ud, ud1, and ud2 on x86 and x86-64
if is_x86_x64_arch \
and len(irsb_string) >= 2 \
and irsb_string[-2:] in {
b'\x0f\xff', # ud0
b'\x0f\xb9', # ud1
b'\x0f\x0b', # ud2
}:
# ud0, ud1, and ud2 are actually valid instructions.
valid_ins = True
nodecode_size = 2
else:
valid_ins = False
nodecode_size = 1
self._seg_list.occupy(addr, irsb_size, 'code')
self._seg_list.occupy(addr + irsb_size, nodecode_size, 'nodecode')
if not valid_ins:
l.error("Decoding error occurred at address %#x of function %#x.",
addr + irsb_size,
current_function_addr
)
return None, None, None, None
is_thumb = False
# Occupy the block in segment list
if irsb.size > 0:
if is_arm_arch(self.project.arch) and addr % 2 == 1:
# thumb mode
is_thumb=True
self._seg_list.occupy(real_addr, irsb.size, "code")
# Create a CFG node, and add it to the graph
cfg_node = CFGNode(addr, irsb.size, self.model,
function_address=current_function_addr,
block_id=addr,
irsb=irsb,
thumb=is_thumb,
byte_string=irsb_string,
)
if self._cfb is not None:
self._cfb.add_obj(addr, lifted_block)
self._nodes[addr] = cfg_node
self._nodes_by_addr[addr].append(cfg_node)
return addr, current_function_addr, cfg_node, irsb
except (SimMemoryError, SimEngineError):
return None, None, None, None | Generate a CFGNode that starts at `cfg_job.addr`.
Since lifting machine code to IRSBs is slow, self._nodes is used as a cache of CFGNodes.
If the current architecture is ARM, this method will try to lift the block in the mode specified by the address
(determined by the parity of the address: even for ARM, odd for THUMB), and in case of decoding failures, try
the other mode. If the basic block is successfully decoded in the other mode (different from the initial one),
`addr` and `current_function_addr` are updated.
:param CFGJob cfg_job: The CFGJob instance.
:param int current_function_addr: Address of the current function.
:return: A 4-tuple of (new address, new function address, CFGNode instance, IRSB object)
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Generate a CFGNode that starts at `cfg_job.addr`.
Since lifting machine code to IRSBs is slow, self._nodes is used as a cache of CFGNodes.
If the current architecture is ARM, this method will try to lift the block in the mode specified by the address
(determined by the parity of the address: even for ARM, odd for THUMB), and in case of decoding failures, try
the other mode. If the basic block is successfully decoded in the other mode (different from the initial one),
`addr` and `current_function_addr` are updated.
:param CFGJob cfg_job: The CFGJob instance.
:param int current_function_addr: Address of the current function.
:return: A 4-tuple of (new address, new function address, CFGNode instance, IRSB object)
:rtype: tuple
### Response:
def _generate_cfgnode(self, cfg_job, current_function_addr):
"""
Generate a CFGNode that starts at `cfg_job.addr`.
Since lifting machine code to IRSBs is slow, self._nodes is used as a cache of CFGNodes.
If the current architecture is ARM, this method will try to lift the block in the mode specified by the address
(determined by the parity of the address: even for ARM, odd for THUMB), and in case of decoding failures, try
the other mode. If the basic block is successfully decoded in the other mode (different from the initial one),
`addr` and `current_function_addr` are updated.
:param CFGJob cfg_job: The CFGJob instance.
:param int current_function_addr: Address of the current function.
:return: A 4-tuple of (new address, new function address, CFGNode instance, IRSB object)
:rtype: tuple
"""
addr = cfg_job.addr
try:
if addr in self._nodes:
cfg_node = self._nodes[addr]
irsb = cfg_node.irsb
if cfg_node.function_address != current_function_addr:
# the node has been assigned to another function before.
# we should update the function address.
current_function_addr = cfg_node.function_address
return addr, current_function_addr, cfg_node, irsb
is_x86_x64_arch = self.project.arch.name in ('X86', 'AMD64')
if is_arm_arch(self.project.arch):
real_addr = addr & (~1)
else:
real_addr = addr
# if possible, check the distance between `addr` and the end of this section
distance = VEX_IRSB_MAX_SIZE
obj = self.project.loader.find_object_containing(addr, membership_check=False)
if obj:
# is there a section?
has_executable_section = len([ sec for sec in obj.sections if sec.is_executable ]) > 0 # pylint:disable=len-as-condition
section = self.project.loader.find_section_containing(addr)
if has_executable_section and section is None:
# the basic block should not exist here...
return None, None, None, None
if section is not None:
if not section.is_executable:
# the section is not executable...
return None, None, None, None
distance = section.vaddr + section.memsize - real_addr
distance = min(distance, VEX_IRSB_MAX_SIZE)
# TODO: handle segment information as well
# also check the distance between `addr` and the closest function.
# we don't want to have a basic block that spans across function boundaries
next_func = self.functions.ceiling_func(addr + 1)
if next_func is not None:
distance_to_func = (next_func.addr & (~1) if is_arm_arch(self.project.arch) else next_func.addr) - real_addr
if distance_to_func != 0:
if distance is None:
distance = distance_to_func
else:
distance = min(distance, distance_to_func)
# in the end, check the distance between `addr` and the closest occupied region in segment list
next_noncode_addr = self._seg_list.next_pos_with_sort_not_in(addr, { "code" }, max_distance=distance)
if next_noncode_addr is not None:
distance_to_noncode_addr = next_noncode_addr - addr
distance = min(distance, distance_to_noncode_addr)
# Let's try to create the pyvex IRSB directly, since it's much faster
nodecode = False
irsb = None
irsb_string = None
try:
lifted_block = self._lift(addr, size=distance, opt_level=self._iropt_level, collect_data_refs=True)
irsb = lifted_block.vex_nostmt
irsb_string = lifted_block.bytes[:irsb.size]
except SimTranslationError:
nodecode = True
if (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode') and \
is_arm_arch(self.project.arch) and \
self._arch_options.switch_mode_on_nodecode:
# maybe the current mode is wrong?
nodecode = False
if addr % 2 == 0:
addr_0 = addr + 1
else:
addr_0 = addr - 1
if addr_0 in self._nodes:
# it has been analyzed before
cfg_node = self._nodes[addr_0]
irsb = cfg_node.irsb
return addr_0, cfg_node.function_address, cfg_node, irsb
try:
lifted_block = self._lift(addr_0, size=distance, opt_level=self._iropt_level,
collect_data_refs=True)
irsb = lifted_block.vex_nostmt
irsb_string = lifted_block.bytes[:irsb.size]
except SimTranslationError:
nodecode = True
if not (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode'):
# it is decodeable
if current_function_addr == addr:
current_function_addr = addr_0
addr = addr_0
if nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode':
# decoding error
# we still occupy that location since it cannot be decoded anyways
if irsb is None:
irsb_size = 0
else:
irsb_size = irsb.size
# special handling for ud, ud1, and ud2 on x86 and x86-64
if is_x86_x64_arch \
and len(irsb_string) >= 2 \
and irsb_string[-2:] in {
b'\x0f\xff', # ud0
b'\x0f\xb9', # ud1
b'\x0f\x0b', # ud2
}:
# ud0, ud1, and ud2 are actually valid instructions.
valid_ins = True
nodecode_size = 2
else:
valid_ins = False
nodecode_size = 1
self._seg_list.occupy(addr, irsb_size, 'code')
self._seg_list.occupy(addr + irsb_size, nodecode_size, 'nodecode')
if not valid_ins:
l.error("Decoding error occurred at address %#x of function %#x.",
addr + irsb_size,
current_function_addr
)
return None, None, None, None
is_thumb = False
# Occupy the block in segment list
if irsb.size > 0:
if is_arm_arch(self.project.arch) and addr % 2 == 1:
# thumb mode
is_thumb=True
self._seg_list.occupy(real_addr, irsb.size, "code")
# Create a CFG node, and add it to the graph
cfg_node = CFGNode(addr, irsb.size, self.model,
function_address=current_function_addr,
block_id=addr,
irsb=irsb,
thumb=is_thumb,
byte_string=irsb_string,
)
if self._cfb is not None:
self._cfb.add_obj(addr, lifted_block)
self._nodes[addr] = cfg_node
self._nodes_by_addr[addr].append(cfg_node)
return addr, current_function_addr, cfg_node, irsb
except (SimMemoryError, SimEngineError):
return None, None, None, None |
def payload_element_name(element_name):
"""Class decorator generator for decorationg
`StanzaPayload` subclasses.
:Parameters:
- `element_name`: XML element qname handled by the class
:Types:
- `element_name`: `unicode`
"""
def decorator(klass):
"""The payload_element_name decorator."""
# pylint: disable-msg=W0212,W0404
from .stanzapayload import STANZA_PAYLOAD_CLASSES
from .stanzapayload import STANZA_PAYLOAD_ELEMENTS
if hasattr(klass, "_pyxmpp_payload_element_name"):
klass._pyxmpp_payload_element_name.append(element_name)
else:
klass._pyxmpp_payload_element_name = [element_name]
if element_name in STANZA_PAYLOAD_CLASSES:
logger = logging.getLogger('pyxmpp.payload_element_name')
logger.warning("Overriding payload class for {0!r}".format(
element_name))
STANZA_PAYLOAD_CLASSES[element_name] = klass
STANZA_PAYLOAD_ELEMENTS[klass].append(element_name)
return klass
return decorator | Class decorator generator for decorationg
`StanzaPayload` subclasses.
:Parameters:
- `element_name`: XML element qname handled by the class
:Types:
- `element_name`: `unicode` | Below is the the instruction that describes the task:
### Input:
Class decorator generator for decorationg
`StanzaPayload` subclasses.
:Parameters:
- `element_name`: XML element qname handled by the class
:Types:
- `element_name`: `unicode`
### Response:
def payload_element_name(element_name):
"""Class decorator generator for decorationg
`StanzaPayload` subclasses.
:Parameters:
- `element_name`: XML element qname handled by the class
:Types:
- `element_name`: `unicode`
"""
def decorator(klass):
"""The payload_element_name decorator."""
# pylint: disable-msg=W0212,W0404
from .stanzapayload import STANZA_PAYLOAD_CLASSES
from .stanzapayload import STANZA_PAYLOAD_ELEMENTS
if hasattr(klass, "_pyxmpp_payload_element_name"):
klass._pyxmpp_payload_element_name.append(element_name)
else:
klass._pyxmpp_payload_element_name = [element_name]
if element_name in STANZA_PAYLOAD_CLASSES:
logger = logging.getLogger('pyxmpp.payload_element_name')
logger.warning("Overriding payload class for {0!r}".format(
element_name))
STANZA_PAYLOAD_CLASSES[element_name] = klass
STANZA_PAYLOAD_ELEMENTS[klass].append(element_name)
return klass
return decorator |
def _chi_lr(self,r, phi, nl,nr,beta):
"""
computes the generalized polar basis function in the convention of Massey&Refregier eqn 8
:param nl: left basis
:type nl: int
:param nr: right basis
:type nr: int
:param beta: beta --the characteristic scale typically choosen to be close to the size of the object.
:type beta: float.
:param coord: coordinates [r,phi]
:type coord: array(n,2)
:returns: values at positions of coordinates.
:raises: AttributeError, KeyError
"""
m=int((nr-nl).real)
n=int((nr+nl).real)
p=int((n-abs(m))/2)
p2=int((n+abs(m))/2)
q=int(abs(m))
if p % 2==0: #if p is even
prefac=1
else:
prefac=-1
prefactor=prefac/beta**(abs(m)+1)*np.sqrt(math.factorial(p)/(np.pi*math.factorial(p2)))
poly=self.poly[p][q]
return prefactor*r**q*poly((r/beta)**2)*np.exp(-(r/beta)**2/2)*np.exp(-1j*m*phi) | computes the generalized polar basis function in the convention of Massey&Refregier eqn 8
:param nl: left basis
:type nl: int
:param nr: right basis
:type nr: int
:param beta: beta --the characteristic scale typically choosen to be close to the size of the object.
:type beta: float.
:param coord: coordinates [r,phi]
:type coord: array(n,2)
:returns: values at positions of coordinates.
:raises: AttributeError, KeyError | Below is the the instruction that describes the task:
### Input:
computes the generalized polar basis function in the convention of Massey&Refregier eqn 8
:param nl: left basis
:type nl: int
:param nr: right basis
:type nr: int
:param beta: beta --the characteristic scale typically choosen to be close to the size of the object.
:type beta: float.
:param coord: coordinates [r,phi]
:type coord: array(n,2)
:returns: values at positions of coordinates.
:raises: AttributeError, KeyError
### Response:
def _chi_lr(self,r, phi, nl,nr,beta):
"""
computes the generalized polar basis function in the convention of Massey&Refregier eqn 8
:param nl: left basis
:type nl: int
:param nr: right basis
:type nr: int
:param beta: beta --the characteristic scale typically choosen to be close to the size of the object.
:type beta: float.
:param coord: coordinates [r,phi]
:type coord: array(n,2)
:returns: values at positions of coordinates.
:raises: AttributeError, KeyError
"""
m=int((nr-nl).real)
n=int((nr+nl).real)
p=int((n-abs(m))/2)
p2=int((n+abs(m))/2)
q=int(abs(m))
if p % 2==0: #if p is even
prefac=1
else:
prefac=-1
prefactor=prefac/beta**(abs(m)+1)*np.sqrt(math.factorial(p)/(np.pi*math.factorial(p2)))
poly=self.poly[p][q]
return prefactor*r**q*poly((r/beta)**2)*np.exp(-(r/beta)**2/2)*np.exp(-1j*m*phi) |
def set_widgets(self):
"""Set widgets on the LayerMode tab."""
self.clear_further_steps()
# Set widgets
self.lblBandSelector.setText(tr(
'Please select which band that contains the data that you want to '
'use for this layer.'))
self.lstBands.clear()
band_num = self.parent.layer.bandCount()
for i in range(band_num):
item = QListWidgetItem(
self.parent.layer.bandName(i + 1),
self.lstBands)
item.setData(QtCore.Qt.UserRole, i + 1)
self.lstBands.addItem(item)
existing_band = self.parent.get_existing_keyword('active_band')
if existing_band:
self.lstBands.setCurrentRow(existing_band - 1)
else:
# Set to Band 1 / index 0
self.lstBands.setCurrentRow(0) | Set widgets on the LayerMode tab. | Below is the the instruction that describes the task:
### Input:
Set widgets on the LayerMode tab.
### Response:
def set_widgets(self):
"""Set widgets on the LayerMode tab."""
self.clear_further_steps()
# Set widgets
self.lblBandSelector.setText(tr(
'Please select which band that contains the data that you want to '
'use for this layer.'))
self.lstBands.clear()
band_num = self.parent.layer.bandCount()
for i in range(band_num):
item = QListWidgetItem(
self.parent.layer.bandName(i + 1),
self.lstBands)
item.setData(QtCore.Qt.UserRole, i + 1)
self.lstBands.addItem(item)
existing_band = self.parent.get_existing_keyword('active_band')
if existing_band:
self.lstBands.setCurrentRow(existing_band - 1)
else:
# Set to Band 1 / index 0
self.lstBands.setCurrentRow(0) |
def get_instance(self, payload):
"""
Build an instance of EngagementInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.studio.v1.flow.engagement.EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
return EngagementInstance(self._version, payload, flow_sid=self._solution['flow_sid'], ) | Build an instance of EngagementInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.studio.v1.flow.engagement.EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of EngagementInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.studio.v1.flow.engagement.EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of EngagementInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.studio.v1.flow.engagement.EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
return EngagementInstance(self._version, payload, flow_sid=self._solution['flow_sid'], ) |
def process_values(self):
"""Takes a set of angles and converts them to the x,y,z coordinates in the internal prepresentation of the class, ready for plotting.
:param vals: the values that are being modelled."""
if self.padding>0:
channels = np.zeros((self.vals.shape[0], self.vals.shape[1]+self.padding))
channels[:, 0:self.vals.shape[0]] = self.vals
else:
channels = self.vals
vals_mat = self.skel.to_xyz(channels.flatten())
self.vals = np.zeros_like(vals_mat)
# Flip the Y and Z axes
self.vals[:, 0] = vals_mat[:, 0].copy()
self.vals[:, 1] = vals_mat[:, 2].copy()
self.vals[:, 2] = vals_mat[:, 1].copy() | Takes a set of angles and converts them to the x,y,z coordinates in the internal prepresentation of the class, ready for plotting.
:param vals: the values that are being modelled. | Below is the the instruction that describes the task:
### Input:
Takes a set of angles and converts them to the x,y,z coordinates in the internal prepresentation of the class, ready for plotting.
:param vals: the values that are being modelled.
### Response:
def process_values(self):
"""Takes a set of angles and converts them to the x,y,z coordinates in the internal prepresentation of the class, ready for plotting.
:param vals: the values that are being modelled."""
if self.padding>0:
channels = np.zeros((self.vals.shape[0], self.vals.shape[1]+self.padding))
channels[:, 0:self.vals.shape[0]] = self.vals
else:
channels = self.vals
vals_mat = self.skel.to_xyz(channels.flatten())
self.vals = np.zeros_like(vals_mat)
# Flip the Y and Z axes
self.vals[:, 0] = vals_mat[:, 0].copy()
self.vals[:, 1] = vals_mat[:, 2].copy()
self.vals[:, 2] = vals_mat[:, 1].copy() |
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style | Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True). | Below is the the instruction that describes the task:
### Input:
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
### Response:
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style |
def bytes(self, count):
"""Returns a bytearray of length `count`. Works unaligned."""
if count < 0:
raise ValueError
# fast path
if self._bits == 0:
data = self._fileobj.read(count)
if len(data) != count:
raise BitReaderError("not enough data")
return data
return bytes(bytearray(self.bits(8) for _ in xrange(count))) | Returns a bytearray of length `count`. Works unaligned. | Below is the the instruction that describes the task:
### Input:
Returns a bytearray of length `count`. Works unaligned.
### Response:
def bytes(self, count):
"""Returns a bytearray of length `count`. Works unaligned."""
if count < 0:
raise ValueError
# fast path
if self._bits == 0:
data = self._fileobj.read(count)
if len(data) != count:
raise BitReaderError("not enough data")
return data
return bytes(bytearray(self.bits(8) for _ in xrange(count))) |
def obfn_cns(self):
r"""Compute constraint violation measure :math:`\| P(\mathbf{y})
- \mathbf{y}\|_2`.
"""
Y = self.obfn_gvar()
return np.linalg.norm((self.Pcn(Y) - Y)) | r"""Compute constraint violation measure :math:`\| P(\mathbf{y})
- \mathbf{y}\|_2`. | Below is the the instruction that describes the task:
### Input:
r"""Compute constraint violation measure :math:`\| P(\mathbf{y})
- \mathbf{y}\|_2`.
### Response:
def obfn_cns(self):
r"""Compute constraint violation measure :math:`\| P(\mathbf{y})
- \mathbf{y}\|_2`.
"""
Y = self.obfn_gvar()
return np.linalg.norm((self.Pcn(Y) - Y)) |
def add_module(self, ref, text, format=None,
expect_modulename=None, expect_revision=None,
expect_failure_error=True):
"""Parse a module text and add the module data to the context
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`text` is the raw text data
`format` is one of 'yang' or 'yin'.
Returns the parsed and validated module on success, and None on error.
"""
if format == None:
format = util.guess_format(text)
if format == 'yin':
p = yin_parser.YinParser()
else:
p = yang_parser.YangParser()
module = p.parse(self, ref, text)
if module is None:
return None
if expect_modulename is not None:
if not re.match(syntax.re_identifier, expect_modulename):
error.err_add(self.errors, module.pos,
'FILENAME_BAD_MODULE_NAME',
(ref, expect_modulename, syntax.identifier))
elif expect_modulename != module.arg:
if expect_failure_error:
error.err_add(self.errors, module.pos, 'BAD_MODULE_NAME',
(module.arg, ref, expect_modulename))
return None
else:
error.err_add(self.errors, module.pos, 'WBAD_MODULE_NAME',
(module.arg, ref, expect_modulename))
latest_rev = util.get_latest_revision(module)
if expect_revision is not None:
if not re.match(syntax.re_date, expect_revision):
error.err_add(self.errors, module.pos, 'FILENAME_BAD_REVISION',
(ref, expect_revision, 'YYYY-MM-DD'))
elif expect_revision != latest_rev:
if expect_failure_error:
error.err_add(self.errors, module.pos, 'BAD_REVISION',
(latest_rev, ref, expect_revision))
return None
else:
error.err_add(self.errors, module.pos, 'WBAD_REVISION',
(latest_rev, ref, expect_revision))
if module.arg not in self.revs:
self.revs[module.arg] = []
revs = self.revs[module.arg]
revs.append((latest_rev, None))
return self.add_parsed_module(module) | Parse a module text and add the module data to the context
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`text` is the raw text data
`format` is one of 'yang' or 'yin'.
Returns the parsed and validated module on success, and None on error. | Below is the the instruction that describes the task:
### Input:
Parse a module text and add the module data to the context
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`text` is the raw text data
`format` is one of 'yang' or 'yin'.
Returns the parsed and validated module on success, and None on error.
### Response:
def add_module(self, ref, text, format=None,
expect_modulename=None, expect_revision=None,
expect_failure_error=True):
"""Parse a module text and add the module data to the context
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`text` is the raw text data
`format` is one of 'yang' or 'yin'.
Returns the parsed and validated module on success, and None on error.
"""
if format == None:
format = util.guess_format(text)
if format == 'yin':
p = yin_parser.YinParser()
else:
p = yang_parser.YangParser()
module = p.parse(self, ref, text)
if module is None:
return None
if expect_modulename is not None:
if not re.match(syntax.re_identifier, expect_modulename):
error.err_add(self.errors, module.pos,
'FILENAME_BAD_MODULE_NAME',
(ref, expect_modulename, syntax.identifier))
elif expect_modulename != module.arg:
if expect_failure_error:
error.err_add(self.errors, module.pos, 'BAD_MODULE_NAME',
(module.arg, ref, expect_modulename))
return None
else:
error.err_add(self.errors, module.pos, 'WBAD_MODULE_NAME',
(module.arg, ref, expect_modulename))
latest_rev = util.get_latest_revision(module)
if expect_revision is not None:
if not re.match(syntax.re_date, expect_revision):
error.err_add(self.errors, module.pos, 'FILENAME_BAD_REVISION',
(ref, expect_revision, 'YYYY-MM-DD'))
elif expect_revision != latest_rev:
if expect_failure_error:
error.err_add(self.errors, module.pos, 'BAD_REVISION',
(latest_rev, ref, expect_revision))
return None
else:
error.err_add(self.errors, module.pos, 'WBAD_REVISION',
(latest_rev, ref, expect_revision))
if module.arg not in self.revs:
self.revs[module.arg] = []
revs = self.revs[module.arg]
revs.append((latest_rev, None))
return self.add_parsed_module(module) |
def _strip_tag(tree, tag):
"""
Remove all tags that have the tag name ``tag``
"""
for el in tree.iter():
if el.tag == tag:
el.getparent().remove(el) | Remove all tags that have the tag name ``tag`` | Below is the the instruction that describes the task:
### Input:
Remove all tags that have the tag name ``tag``
### Response:
def _strip_tag(tree, tag):
"""
Remove all tags that have the tag name ``tag``
"""
for el in tree.iter():
if el.tag == tag:
el.getparent().remove(el) |
def writeProxy(self, obj):
"""
Encodes a proxied object to the stream.
@since: 0.6
"""
proxy = self.context.getProxyForObject(obj)
self.writeObject(proxy, is_proxy=True) | Encodes a proxied object to the stream.
@since: 0.6 | Below is the the instruction that describes the task:
### Input:
Encodes a proxied object to the stream.
@since: 0.6
### Response:
def writeProxy(self, obj):
"""
Encodes a proxied object to the stream.
@since: 0.6
"""
proxy = self.context.getProxyForObject(obj)
self.writeObject(proxy, is_proxy=True) |
def add_option(self, parser):
""" Add option group and all children options. """
group = parser.add_argument_group(self.name)
for stat in self.stats:
stat.add_option(group)
group.add_argument(
"--{0}".format(self.option), action="store_true", help="All above") | Add option group and all children options. | Below is the the instruction that describes the task:
### Input:
Add option group and all children options.
### Response:
def add_option(self, parser):
""" Add option group and all children options. """
group = parser.add_argument_group(self.name)
for stat in self.stats:
stat.add_option(group)
group.add_argument(
"--{0}".format(self.option), action="store_true", help="All above") |
def add_wikilink(self, title, href, **attrs):
"""
Add a Wiki link to the project and returns a :class:`WikiLink` object.
:param title: title of the :class:`WikiLink`
:param href: href of the :class:`WikiLink`
:param attrs: optional attributes for :class:`WikiLink`
"""
return WikiLinks(self.requester).create(self.id, title, href, **attrs) | Add a Wiki link to the project and returns a :class:`WikiLink` object.
:param title: title of the :class:`WikiLink`
:param href: href of the :class:`WikiLink`
:param attrs: optional attributes for :class:`WikiLink` | Below is the the instruction that describes the task:
### Input:
Add a Wiki link to the project and returns a :class:`WikiLink` object.
:param title: title of the :class:`WikiLink`
:param href: href of the :class:`WikiLink`
:param attrs: optional attributes for :class:`WikiLink`
### Response:
def add_wikilink(self, title, href, **attrs):
"""
Add a Wiki link to the project and returns a :class:`WikiLink` object.
:param title: title of the :class:`WikiLink`
:param href: href of the :class:`WikiLink`
:param attrs: optional attributes for :class:`WikiLink`
"""
return WikiLinks(self.requester).create(self.id, title, href, **attrs) |
def get_historical_klines(symbol, interval, start_str, end_str=None):
"""Get Historical Klines from Binance
See dateparse docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values
"""
# create the Binance client, no need for api key
client = Client("", "")
# init our list
output_data = []
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
start_ts = date_to_milliseconds(start_str)
# if an end time was passed convert it
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
# it can be difficult to know when a symbol was listed on Binance so allow start time to be before list date
symbol_existed = False
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where our start date is before the symbol pair listed on Binance
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
# append this loops data to our output data
output_data += temp_data
# update our start timestamp using the last value in the array and add the interval timeframe
start_ts = temp_data[len(temp_data) - 1][0] + timeframe
else:
# it wasn't listed yet, increment our start date
start_ts += timeframe
idx += 1
# check if we received less than the required limit and exit the loop
if len(temp_data) < limit:
# exit the while loop
break
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
return output_data | Get Historical Klines from Binance
See dateparse docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values | Below is the the instruction that describes the task:
### Input:
Get Historical Klines from Binance
See dateparse docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values
### Response:
def get_historical_klines(symbol, interval, start_str, end_str=None):
"""Get Historical Klines from Binance
See dateparse docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values
"""
# create the Binance client, no need for api key
client = Client("", "")
# init our list
output_data = []
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
start_ts = date_to_milliseconds(start_str)
# if an end time was passed convert it
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
# it can be difficult to know when a symbol was listed on Binance so allow start time to be before list date
symbol_existed = False
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where our start date is before the symbol pair listed on Binance
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
# append this loops data to our output data
output_data += temp_data
# update our start timestamp using the last value in the array and add the interval timeframe
start_ts = temp_data[len(temp_data) - 1][0] + timeframe
else:
# it wasn't listed yet, increment our start date
start_ts += timeframe
idx += 1
# check if we received less than the required limit and exit the loop
if len(temp_data) < limit:
# exit the while loop
break
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
return output_data |
def ReadHuntLogEntries(self,
hunt_id,
offset,
count,
with_substring=None,
cursor=None):
"""Reads hunt log entries of a given hunt using given query options."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT client_id, flow_id, message, UNIX_TIMESTAMP(timestamp) "
"FROM flow_log_entries "
"FORCE INDEX(flow_log_entries_by_hunt) "
"WHERE hunt_id = %s AND flow_id = hunt_id ")
args = [hunt_id_int]
if with_substring is not None:
query += "AND message LIKE %s "
args.append("%" + db_utils.EscapeWildcards(with_substring) + "%")
query += "ORDER BY timestamp ASC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
flow_log_entries = []
for client_id_int, flow_id_int, message, timestamp in cursor.fetchall():
flow_log_entries.append(
rdf_flow_objects.FlowLogEntry(
client_id=db_utils.IntToClientID(client_id_int),
flow_id=db_utils.IntToFlowID(flow_id_int),
hunt_id=hunt_id,
message=message,
timestamp=mysql_utils.TimestampToRDFDatetime(timestamp)))
return flow_log_entries | Reads hunt log entries of a given hunt using given query options. | Below is the the instruction that describes the task:
### Input:
Reads hunt log entries of a given hunt using given query options.
### Response:
def ReadHuntLogEntries(self,
hunt_id,
offset,
count,
with_substring=None,
cursor=None):
"""Reads hunt log entries of a given hunt using given query options."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT client_id, flow_id, message, UNIX_TIMESTAMP(timestamp) "
"FROM flow_log_entries "
"FORCE INDEX(flow_log_entries_by_hunt) "
"WHERE hunt_id = %s AND flow_id = hunt_id ")
args = [hunt_id_int]
if with_substring is not None:
query += "AND message LIKE %s "
args.append("%" + db_utils.EscapeWildcards(with_substring) + "%")
query += "ORDER BY timestamp ASC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
flow_log_entries = []
for client_id_int, flow_id_int, message, timestamp in cursor.fetchall():
flow_log_entries.append(
rdf_flow_objects.FlowLogEntry(
client_id=db_utils.IntToClientID(client_id_int),
flow_id=db_utils.IntToFlowID(flow_id_int),
hunt_id=hunt_id,
message=message,
timestamp=mysql_utils.TimestampToRDFDatetime(timestamp)))
return flow_log_entries |
def _list_dir(self, path):
"""returns absolute paths for all entries in a directory"""
try:
elements = [
os.path.join(path, x) for x in os.listdir(path)
] if os.path.isdir(path) else []
elements.sort()
except OSError:
elements = None
return elements | returns absolute paths for all entries in a directory | Below is the the instruction that describes the task:
### Input:
returns absolute paths for all entries in a directory
### Response:
def _list_dir(self, path):
"""returns absolute paths for all entries in a directory"""
try:
elements = [
os.path.join(path, x) for x in os.listdir(path)
] if os.path.isdir(path) else []
elements.sort()
except OSError:
elements = None
return elements |
def clone(self, _, scene):
"""
Create a clone of this Frame into a new Screen.
:param _: ignored.
:param scene: The new Scene object to clone into.
"""
# Assume that the application creates a new set of Frames and so we need to match up the
# data from the old object to the new (using the name).
if self._name is not None:
for effect in scene.effects:
if isinstance(effect, Frame):
if effect._name == self._name:
effect.data = self.data
for layout in self._layouts:
layout.update_widgets(new_frame=effect) | Create a clone of this Frame into a new Screen.
:param _: ignored.
:param scene: The new Scene object to clone into. | Below is the the instruction that describes the task:
### Input:
Create a clone of this Frame into a new Screen.
:param _: ignored.
:param scene: The new Scene object to clone into.
### Response:
def clone(self, _, scene):
"""
Create a clone of this Frame into a new Screen.
:param _: ignored.
:param scene: The new Scene object to clone into.
"""
# Assume that the application creates a new set of Frames and so we need to match up the
# data from the old object to the new (using the name).
if self._name is not None:
for effect in scene.effects:
if isinstance(effect, Frame):
if effect._name == self._name:
effect.data = self.data
for layout in self._layouts:
layout.update_widgets(new_frame=effect) |
def update_plan(self, updated_plan, project, id):
"""UpdatePlan.
Update the information for the specified plan
:param :class:`<UpdatePlan> <azure.devops.v5_0.work.models.UpdatePlan>` updated_plan: Plan definition to be updated
:param str project: Project ID or project name
:param str id: Identifier of the plan
:rtype: :class:`<Plan> <azure.devops.v5_0.work.models.Plan>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
content = self._serialize.body(updated_plan, 'UpdatePlan')
response = self._send(http_method='PUT',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('Plan', response) | UpdatePlan.
Update the information for the specified plan
:param :class:`<UpdatePlan> <azure.devops.v5_0.work.models.UpdatePlan>` updated_plan: Plan definition to be updated
:param str project: Project ID or project name
:param str id: Identifier of the plan
:rtype: :class:`<Plan> <azure.devops.v5_0.work.models.Plan>` | Below is the the instruction that describes the task:
### Input:
UpdatePlan.
Update the information for the specified plan
:param :class:`<UpdatePlan> <azure.devops.v5_0.work.models.UpdatePlan>` updated_plan: Plan definition to be updated
:param str project: Project ID or project name
:param str id: Identifier of the plan
:rtype: :class:`<Plan> <azure.devops.v5_0.work.models.Plan>`
### Response:
def update_plan(self, updated_plan, project, id):
"""UpdatePlan.
Update the information for the specified plan
:param :class:`<UpdatePlan> <azure.devops.v5_0.work.models.UpdatePlan>` updated_plan: Plan definition to be updated
:param str project: Project ID or project name
:param str id: Identifier of the plan
:rtype: :class:`<Plan> <azure.devops.v5_0.work.models.Plan>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
content = self._serialize.body(updated_plan, 'UpdatePlan')
response = self._send(http_method='PUT',
location_id='0b42cb47-cd73-4810-ac90-19c9ba147453',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('Plan', response) |
def save_and_validate_logo(logo_stream, logo_filename, community_id):
"""Validate if communities logo is in limit size and save it."""
cfg = current_app.config
logos_bucket_id = cfg['COMMUNITIES_BUCKET_UUID']
logo_max_size = cfg['COMMUNITIES_LOGO_MAX_SIZE']
logos_bucket = Bucket.query.get(logos_bucket_id)
ext = os.path.splitext(logo_filename)[1]
ext = ext[1:] if ext.startswith('.') else ext
logo_stream.seek(SEEK_SET, SEEK_END) # Seek from beginning to end
logo_size = logo_stream.tell()
if logo_size > logo_max_size:
return None
if ext in cfg['COMMUNITIES_LOGO_EXTENSIONS']:
key = "{0}/logo.{1}".format(community_id, ext)
logo_stream.seek(0) # Rewind the stream to the beginning
ObjectVersion.create(logos_bucket, key, stream=logo_stream,
size=logo_size)
return ext
else:
return None | Validate if communities logo is in limit size and save it. | Below is the the instruction that describes the task:
### Input:
Validate if communities logo is in limit size and save it.
### Response:
def save_and_validate_logo(logo_stream, logo_filename, community_id):
"""Validate if communities logo is in limit size and save it."""
cfg = current_app.config
logos_bucket_id = cfg['COMMUNITIES_BUCKET_UUID']
logo_max_size = cfg['COMMUNITIES_LOGO_MAX_SIZE']
logos_bucket = Bucket.query.get(logos_bucket_id)
ext = os.path.splitext(logo_filename)[1]
ext = ext[1:] if ext.startswith('.') else ext
logo_stream.seek(SEEK_SET, SEEK_END) # Seek from beginning to end
logo_size = logo_stream.tell()
if logo_size > logo_max_size:
return None
if ext in cfg['COMMUNITIES_LOGO_EXTENSIONS']:
key = "{0}/logo.{1}".format(community_id, ext)
logo_stream.seek(0) # Rewind the stream to the beginning
ObjectVersion.create(logos_bucket, key, stream=logo_stream,
size=logo_size)
return ext
else:
return None |
def Reset(self):
"""Resets the internal state of the analyzer."""
hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(
self._hasher_names_string)
self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names) | Resets the internal state of the analyzer. | Below is the the instruction that describes the task:
### Input:
Resets the internal state of the analyzer.
### Response:
def Reset(self):
"""Resets the internal state of the analyzer."""
hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(
self._hasher_names_string)
self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names) |
def fetch(url, dest, force=False):
"""Retrieve data from an url and store it into dest.
Parameters
----------
url: str
Link to the remote data
dest: str
Path where the file must be stored
force: bool (default=False)
Overwrite if the file exists
Returns
-------
cached: bool
True if the file already exists
dest: str
The same string of the parameter
"""
cached = True
if force or not os.path.exists(dest):
cached = False
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(dest, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
return cached, dest | Retrieve data from an url and store it into dest.
Parameters
----------
url: str
Link to the remote data
dest: str
Path where the file must be stored
force: bool (default=False)
Overwrite if the file exists
Returns
-------
cached: bool
True if the file already exists
dest: str
The same string of the parameter | Below is the the instruction that describes the task:
### Input:
Retrieve data from an url and store it into dest.
Parameters
----------
url: str
Link to the remote data
dest: str
Path where the file must be stored
force: bool (default=False)
Overwrite if the file exists
Returns
-------
cached: bool
True if the file already exists
dest: str
The same string of the parameter
### Response:
def fetch(url, dest, force=False):
"""Retrieve data from an url and store it into dest.
Parameters
----------
url: str
Link to the remote data
dest: str
Path where the file must be stored
force: bool (default=False)
Overwrite if the file exists
Returns
-------
cached: bool
True if the file already exists
dest: str
The same string of the parameter
"""
cached = True
if force or not os.path.exists(dest):
cached = False
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(dest, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
return cached, dest |
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList | inverse function of numList2blocks. | Below is the the instruction that describes the task:
### Input:
inverse function of numList2blocks.
### Response:
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList |
def move_datetime_week(dt, direction, num_shifts):
"""
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(weeks=+num_shifts)
return _move_datetime(dt, direction, delta) | Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case | Below is the the instruction that describes the task:
### Input:
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
### Response:
def move_datetime_week(dt, direction, num_shifts):
"""
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(weeks=+num_shifts)
return _move_datetime(dt, direction, delta) |
def validate_response_type(self, client_id, response_type, client, request,
*args, **kwargs):
"""Ensure client is authorized to use the response type requested.
It will allow any of the two (`code`, `token`) response types by
default. Implemented `allowed_response_types` for client object
to authorize the request.
"""
if response_type not in ('code', 'token'):
return False
if hasattr(client, 'allowed_response_types'):
return response_type in client.allowed_response_types
return True | Ensure client is authorized to use the response type requested.
It will allow any of the two (`code`, `token`) response types by
default. Implemented `allowed_response_types` for client object
to authorize the request. | Below is the the instruction that describes the task:
### Input:
Ensure client is authorized to use the response type requested.
It will allow any of the two (`code`, `token`) response types by
default. Implemented `allowed_response_types` for client object
to authorize the request.
### Response:
def validate_response_type(self, client_id, response_type, client, request,
*args, **kwargs):
"""Ensure client is authorized to use the response type requested.
It will allow any of the two (`code`, `token`) response types by
default. Implemented `allowed_response_types` for client object
to authorize the request.
"""
if response_type not in ('code', 'token'):
return False
if hasattr(client, 'allowed_response_types'):
return response_type in client.allowed_response_types
return True |
def get_deployment_by_slot(self, service_name, deployment_slot):
'''
Returns configuration information, status, and system properties for
a deployment.
service_name:
Name of the hosted service.
deployment_slot:
The environment to which the hosted service is deployed. Valid
values are: staging, production
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_slot', deployment_slot)
return self._perform_get(
self._get_deployment_path_using_slot(
service_name, deployment_slot),
Deployment) | Returns configuration information, status, and system properties for
a deployment.
service_name:
Name of the hosted service.
deployment_slot:
The environment to which the hosted service is deployed. Valid
values are: staging, production | Below is the the instruction that describes the task:
### Input:
Returns configuration information, status, and system properties for
a deployment.
service_name:
Name of the hosted service.
deployment_slot:
The environment to which the hosted service is deployed. Valid
values are: staging, production
### Response:
def get_deployment_by_slot(self, service_name, deployment_slot):
'''
Returns configuration information, status, and system properties for
a deployment.
service_name:
Name of the hosted service.
deployment_slot:
The environment to which the hosted service is deployed. Valid
values are: staging, production
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_slot', deployment_slot)
return self._perform_get(
self._get_deployment_path_using_slot(
service_name, deployment_slot),
Deployment) |
def wheel_delta_discrete(self):
"""The delta for the wheel in discrete steps (e.g. wheel clicks) and
whether it has changed in this event.
Returns:
(int, bool): The delta of the wheel, in discrete steps, compared to
the last event and whether it has changed.
"""
delta = self._libinput. \
libinput_event_tablet_tool_get_wheel_delta_discrete(self._handle)
changed = self._libinput.libinput_event_tablet_tool_wheel_has_changed(
self._handle)
return delta, changed | The delta for the wheel in discrete steps (e.g. wheel clicks) and
whether it has changed in this event.
Returns:
(int, bool): The delta of the wheel, in discrete steps, compared to
the last event and whether it has changed. | Below is the the instruction that describes the task:
### Input:
The delta for the wheel in discrete steps (e.g. wheel clicks) and
whether it has changed in this event.
Returns:
(int, bool): The delta of the wheel, in discrete steps, compared to
the last event and whether it has changed.
### Response:
def wheel_delta_discrete(self):
"""The delta for the wheel in discrete steps (e.g. wheel clicks) and
whether it has changed in this event.
Returns:
(int, bool): The delta of the wheel, in discrete steps, compared to
the last event and whether it has changed.
"""
delta = self._libinput. \
libinput_event_tablet_tool_get_wheel_delta_discrete(self._handle)
changed = self._libinput.libinput_event_tablet_tool_wheel_has_changed(
self._handle)
return delta, changed |
def set_nodes_vlan(site, nodes, interface, vlan_id):
"""Set the interface of the nodes in a specific vlan.
It is assumed that the same interface name is available on the node.
Args:
site(str): site to consider
nodes(list): nodes to consider
interface(str): the network interface to put in the vlan
vlan_id(str): the id of the vlan
"""
def _to_network_address(host):
"""Translate a host to a network address
e.g:
paranoia-20.rennes.grid5000.fr -> paranoia-20-eth2.rennes.grid5000.fr
"""
splitted = host.split('.')
splitted[0] = splitted[0] + "-" + interface
return ".".join(splitted)
gk = get_api_client()
network_addresses = [_to_network_address(n) for n in nodes]
gk.sites[site].vlans[str(vlan_id)].submit({"nodes": network_addresses}) | Set the interface of the nodes in a specific vlan.
It is assumed that the same interface name is available on the node.
Args:
site(str): site to consider
nodes(list): nodes to consider
interface(str): the network interface to put in the vlan
vlan_id(str): the id of the vlan | Below is the the instruction that describes the task:
### Input:
Set the interface of the nodes in a specific vlan.
It is assumed that the same interface name is available on the node.
Args:
site(str): site to consider
nodes(list): nodes to consider
interface(str): the network interface to put in the vlan
vlan_id(str): the id of the vlan
### Response:
def set_nodes_vlan(site, nodes, interface, vlan_id):
"""Set the interface of the nodes in a specific vlan.
It is assumed that the same interface name is available on the node.
Args:
site(str): site to consider
nodes(list): nodes to consider
interface(str): the network interface to put in the vlan
vlan_id(str): the id of the vlan
"""
def _to_network_address(host):
"""Translate a host to a network address
e.g:
paranoia-20.rennes.grid5000.fr -> paranoia-20-eth2.rennes.grid5000.fr
"""
splitted = host.split('.')
splitted[0] = splitted[0] + "-" + interface
return ".".join(splitted)
gk = get_api_client()
network_addresses = [_to_network_address(n) for n in nodes]
gk.sites[site].vlans[str(vlan_id)].submit({"nodes": network_addresses}) |
def connection_made(self, transport: asyncio.BaseTransport) -> None:
"""
Configure write buffer limits.
The high-water limit is defined by ``self.write_limit``.
The low-water limit currently defaults to ``self.write_limit // 4`` in
:meth:`~asyncio.WriteTransport.set_write_buffer_limits`, which should
be all right for reasonable use cases of this library.
This is the earliest point where we can get hold of the transport,
which means it's the best point for configuring it.
"""
logger.debug("%s - event = connection_made(%s)", self.side, transport)
# mypy thinks transport is a BaseTransport, not a Transport.
transport.set_write_buffer_limits(self.write_limit) # type: ignore
super().connection_made(transport) | Configure write buffer limits.
The high-water limit is defined by ``self.write_limit``.
The low-water limit currently defaults to ``self.write_limit // 4`` in
:meth:`~asyncio.WriteTransport.set_write_buffer_limits`, which should
be all right for reasonable use cases of this library.
This is the earliest point where we can get hold of the transport,
which means it's the best point for configuring it. | Below is the the instruction that describes the task:
### Input:
Configure write buffer limits.
The high-water limit is defined by ``self.write_limit``.
The low-water limit currently defaults to ``self.write_limit // 4`` in
:meth:`~asyncio.WriteTransport.set_write_buffer_limits`, which should
be all right for reasonable use cases of this library.
This is the earliest point where we can get hold of the transport,
which means it's the best point for configuring it.
### Response:
def connection_made(self, transport: asyncio.BaseTransport) -> None:
"""
Configure write buffer limits.
The high-water limit is defined by ``self.write_limit``.
The low-water limit currently defaults to ``self.write_limit // 4`` in
:meth:`~asyncio.WriteTransport.set_write_buffer_limits`, which should
be all right for reasonable use cases of this library.
This is the earliest point where we can get hold of the transport,
which means it's the best point for configuring it.
"""
logger.debug("%s - event = connection_made(%s)", self.side, transport)
# mypy thinks transport is a BaseTransport, not a Transport.
transport.set_write_buffer_limits(self.write_limit) # type: ignore
super().connection_made(transport) |
def superreload(module, reload=reload, old_objects={}):
"""Enhanced version of the builtin reload function.
superreload remembers objects previously in the module, and
- upgrades the class dictionary of every old class in the module
- upgrades the code object of every old function and method
- clears the module's namespace before reloading
"""
# collect old objects in the module
for name, obj in list(module.__dict__.items()):
if not hasattr(obj, '__module__') or obj.__module__ != module.__name__:
continue
key = (module.__name__, name)
try:
old_objects.setdefault(key, []).append(weakref.ref(obj))
except TypeError:
pass
# reload module
try:
# clear namespace first from old cruft
old_dict = module.__dict__.copy()
old_name = module.__name__
module.__dict__.clear()
module.__dict__['__name__'] = old_name
module.__dict__['__loader__'] = old_dict['__loader__']
except (TypeError, AttributeError, KeyError):
pass
try:
module = reload(module)
except:
# restore module dictionary on failed reload
module.__dict__.update(old_dict)
raise
# iterate over all objects and update functions & classes
for name, new_obj in list(module.__dict__.items()):
key = (module.__name__, name)
if key not in old_objects: continue
new_refs = []
for old_ref in old_objects[key]:
old_obj = old_ref()
if old_obj is None: continue
new_refs.append(old_ref)
update_generic(old_obj, new_obj)
if new_refs:
old_objects[key] = new_refs
else:
del old_objects[key]
return module | Enhanced version of the builtin reload function.
superreload remembers objects previously in the module, and
- upgrades the class dictionary of every old class in the module
- upgrades the code object of every old function and method
- clears the module's namespace before reloading | Below is the the instruction that describes the task:
### Input:
Enhanced version of the builtin reload function.
superreload remembers objects previously in the module, and
- upgrades the class dictionary of every old class in the module
- upgrades the code object of every old function and method
- clears the module's namespace before reloading
### Response:
def superreload(module, reload=reload, old_objects={}):
"""Enhanced version of the builtin reload function.
superreload remembers objects previously in the module, and
- upgrades the class dictionary of every old class in the module
- upgrades the code object of every old function and method
- clears the module's namespace before reloading
"""
# collect old objects in the module
for name, obj in list(module.__dict__.items()):
if not hasattr(obj, '__module__') or obj.__module__ != module.__name__:
continue
key = (module.__name__, name)
try:
old_objects.setdefault(key, []).append(weakref.ref(obj))
except TypeError:
pass
# reload module
try:
# clear namespace first from old cruft
old_dict = module.__dict__.copy()
old_name = module.__name__
module.__dict__.clear()
module.__dict__['__name__'] = old_name
module.__dict__['__loader__'] = old_dict['__loader__']
except (TypeError, AttributeError, KeyError):
pass
try:
module = reload(module)
except:
# restore module dictionary on failed reload
module.__dict__.update(old_dict)
raise
# iterate over all objects and update functions & classes
for name, new_obj in list(module.__dict__.items()):
key = (module.__name__, name)
if key not in old_objects: continue
new_refs = []
for old_ref in old_objects[key]:
old_obj = old_ref()
if old_obj is None: continue
new_refs.append(old_ref)
update_generic(old_obj, new_obj)
if new_refs:
old_objects[key] = new_refs
else:
del old_objects[key]
return module |
def _run_cell_text(self, text, line):
"""Run cell code in the console.
Cell code is run in the console by copying it to the console if
`self.run_cell_copy` is ``True`` otherwise by using the `run_cell`
function.
Parameters
----------
text : str
The code in the cell as a string.
line : int
The starting line number of the cell in the file.
"""
finfo = self.get_current_finfo()
editor = self.get_current_editor()
oe_data = editor.highlighter.get_outlineexplorer_data()
try:
cell_name = oe_data.get(line-1).def_name
except AttributeError:
cell_name = ''
if finfo.editor.is_python() and text:
self.run_cell_in_ipyclient.emit(text, cell_name,
finfo.filename,
self.run_cell_copy)
editor.setFocus() | Run cell code in the console.
Cell code is run in the console by copying it to the console if
`self.run_cell_copy` is ``True`` otherwise by using the `run_cell`
function.
Parameters
----------
text : str
The code in the cell as a string.
line : int
The starting line number of the cell in the file. | Below is the the instruction that describes the task:
### Input:
Run cell code in the console.
Cell code is run in the console by copying it to the console if
`self.run_cell_copy` is ``True`` otherwise by using the `run_cell`
function.
Parameters
----------
text : str
The code in the cell as a string.
line : int
The starting line number of the cell in the file.
### Response:
def _run_cell_text(self, text, line):
"""Run cell code in the console.
Cell code is run in the console by copying it to the console if
`self.run_cell_copy` is ``True`` otherwise by using the `run_cell`
function.
Parameters
----------
text : str
The code in the cell as a string.
line : int
The starting line number of the cell in the file.
"""
finfo = self.get_current_finfo()
editor = self.get_current_editor()
oe_data = editor.highlighter.get_outlineexplorer_data()
try:
cell_name = oe_data.get(line-1).def_name
except AttributeError:
cell_name = ''
if finfo.editor.is_python() and text:
self.run_cell_in_ipyclient.emit(text, cell_name,
finfo.filename,
self.run_cell_copy)
editor.setFocus() |
def select_neighbors_by_layer(docgraph, node, layer, data=False):
"""
Get all neighboring nodes belonging to (any of) the given layer(s),
A neighboring node is a node that the given node connects to with an
outgoing edge.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str or collection of str
name(s) of the layer(s)
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of neighbor node IDs
that are present in the given layer. If data is True,
a generator of (node ID, node attrib dict) tuples.
"""
for node_id in docgraph.neighbors_iter(node):
node_layers = docgraph.node[node_id]['layers']
if isinstance(layer, (str, unicode)):
condition = layer in node_layers
else: # ``layer`` is a list/set/dict of layers
condition = any(l in node_layers for l in layer)
if condition:
yield (node_id, docgraph.node[node_id]) if data else (node_id) | Get all neighboring nodes belonging to (any of) the given layer(s),
A neighboring node is a node that the given node connects to with an
outgoing edge.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str or collection of str
name(s) of the layer(s)
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of neighbor node IDs
that are present in the given layer. If data is True,
a generator of (node ID, node attrib dict) tuples. | Below is the the instruction that describes the task:
### Input:
Get all neighboring nodes belonging to (any of) the given layer(s),
A neighboring node is a node that the given node connects to with an
outgoing edge.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str or collection of str
name(s) of the layer(s)
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of neighbor node IDs
that are present in the given layer. If data is True,
a generator of (node ID, node attrib dict) tuples.
### Response:
def select_neighbors_by_layer(docgraph, node, layer, data=False):
"""
Get all neighboring nodes belonging to (any of) the given layer(s),
A neighboring node is a node that the given node connects to with an
outgoing edge.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str or collection of str
name(s) of the layer(s)
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of neighbor node IDs
that are present in the given layer. If data is True,
a generator of (node ID, node attrib dict) tuples.
"""
for node_id in docgraph.neighbors_iter(node):
node_layers = docgraph.node[node_id]['layers']
if isinstance(layer, (str, unicode)):
condition = layer in node_layers
else: # ``layer`` is a list/set/dict of layers
condition = any(l in node_layers for l in layer)
if condition:
yield (node_id, docgraph.node[node_id]) if data else (node_id) |
def analytics(account=None, *args, **kwargs):
"""
Simple Google Analytics integration.
First looks for an ``account`` parameter. If not supplied, uses
Django ``GOOGLE_ANALYTICS_ACCOUNT`` setting. If account not set,
raises ``TemplateSyntaxError``.
:param account:
Google Analytics account id to be used.
"""
if not account:
try:
account = settings.GOOGLE_ANALYTICS_ACCOUNT
except:
raise template.TemplateSyntaxError(
"Analytics account could not found either "
"in tag parameters or settings")
return {'account': account, 'params':kwargs } | Simple Google Analytics integration.
First looks for an ``account`` parameter. If not supplied, uses
Django ``GOOGLE_ANALYTICS_ACCOUNT`` setting. If account not set,
raises ``TemplateSyntaxError``.
:param account:
Google Analytics account id to be used. | Below is the the instruction that describes the task:
### Input:
Simple Google Analytics integration.
First looks for an ``account`` parameter. If not supplied, uses
Django ``GOOGLE_ANALYTICS_ACCOUNT`` setting. If account not set,
raises ``TemplateSyntaxError``.
:param account:
Google Analytics account id to be used.
### Response:
def analytics(account=None, *args, **kwargs):
"""
Simple Google Analytics integration.
First looks for an ``account`` parameter. If not supplied, uses
Django ``GOOGLE_ANALYTICS_ACCOUNT`` setting. If account not set,
raises ``TemplateSyntaxError``.
:param account:
Google Analytics account id to be used.
"""
if not account:
try:
account = settings.GOOGLE_ANALYTICS_ACCOUNT
except:
raise template.TemplateSyntaxError(
"Analytics account could not found either "
"in tag parameters or settings")
return {'account': account, 'params':kwargs } |
def vol_tetra(vt1, vt2, vt3, vt4):
"""
Calculate the volume of a tetrahedron, given the four vertices of vt1,
vt2, vt3 and vt4.
Args:
vt1 (array-like): coordinates of vertex 1.
vt2 (array-like): coordinates of vertex 2.
vt3 (array-like): coordinates of vertex 3.
vt4 (array-like): coordinates of vertex 4.
Returns:
(float): volume of the tetrahedron.
"""
vol_tetra = np.abs(np.dot((vt1 - vt4),
np.cross((vt2 - vt4), (vt3 - vt4)))) / 6
return vol_tetra | Calculate the volume of a tetrahedron, given the four vertices of vt1,
vt2, vt3 and vt4.
Args:
vt1 (array-like): coordinates of vertex 1.
vt2 (array-like): coordinates of vertex 2.
vt3 (array-like): coordinates of vertex 3.
vt4 (array-like): coordinates of vertex 4.
Returns:
(float): volume of the tetrahedron. | Below is the the instruction that describes the task:
### Input:
Calculate the volume of a tetrahedron, given the four vertices of vt1,
vt2, vt3 and vt4.
Args:
vt1 (array-like): coordinates of vertex 1.
vt2 (array-like): coordinates of vertex 2.
vt3 (array-like): coordinates of vertex 3.
vt4 (array-like): coordinates of vertex 4.
Returns:
(float): volume of the tetrahedron.
### Response:
def vol_tetra(vt1, vt2, vt3, vt4):
"""
Calculate the volume of a tetrahedron, given the four vertices of vt1,
vt2, vt3 and vt4.
Args:
vt1 (array-like): coordinates of vertex 1.
vt2 (array-like): coordinates of vertex 2.
vt3 (array-like): coordinates of vertex 3.
vt4 (array-like): coordinates of vertex 4.
Returns:
(float): volume of the tetrahedron.
"""
vol_tetra = np.abs(np.dot((vt1 - vt4),
np.cross((vt2 - vt4), (vt3 - vt4)))) / 6
return vol_tetra |
def set_extra_info(self, username, extra_info):
"""Set extra info for the given user.
Raise a ServerError if an error occurs in the request process.
@param username The username for the user to update.
@param info The extra info as a JSON encoded string, or as a Python
dictionary like object.
"""
url = self._get_extra_info_url(username)
make_request(url, method='PUT', body=extra_info, timeout=self.timeout) | Set extra info for the given user.
Raise a ServerError if an error occurs in the request process.
@param username The username for the user to update.
@param info The extra info as a JSON encoded string, or as a Python
dictionary like object. | Below is the the instruction that describes the task:
### Input:
Set extra info for the given user.
Raise a ServerError if an error occurs in the request process.
@param username The username for the user to update.
@param info The extra info as a JSON encoded string, or as a Python
dictionary like object.
### Response:
def set_extra_info(self, username, extra_info):
"""Set extra info for the given user.
Raise a ServerError if an error occurs in the request process.
@param username The username for the user to update.
@param info The extra info as a JSON encoded string, or as a Python
dictionary like object.
"""
url = self._get_extra_info_url(username)
make_request(url, method='PUT', body=extra_info, timeout=self.timeout) |
def multiple_sources(stmt):
'''Return True if statement is supported by multiple sources.
Note: this is currently not used and replaced by BeliefEngine score cutoff
'''
sources = list(set([e.source_api for e in stmt.evidence]))
if len(sources) > 1:
return True
return False | Return True if statement is supported by multiple sources.
Note: this is currently not used and replaced by BeliefEngine score cutoff | Below is the the instruction that describes the task:
### Input:
Return True if statement is supported by multiple sources.
Note: this is currently not used and replaced by BeliefEngine score cutoff
### Response:
def multiple_sources(stmt):
'''Return True if statement is supported by multiple sources.
Note: this is currently not used and replaced by BeliefEngine score cutoff
'''
sources = list(set([e.source_api for e in stmt.evidence]))
if len(sources) > 1:
return True
return False |
def add_javascripts(self, *js_files):
"""add javascripts files in HTML body"""
# create the script tag if don't exists
if self.main_soup.script is None:
script_tag = self.main_soup.new_tag('script')
self.main_soup.body.append(script_tag)
for js_file in js_files:
self.main_soup.script.append(self._text_file(js_file)) | add javascripts files in HTML body | Below is the the instruction that describes the task:
### Input:
add javascripts files in HTML body
### Response:
def add_javascripts(self, *js_files):
"""add javascripts files in HTML body"""
# create the script tag if don't exists
if self.main_soup.script is None:
script_tag = self.main_soup.new_tag('script')
self.main_soup.body.append(script_tag)
for js_file in js_files:
self.main_soup.script.append(self._text_file(js_file)) |
def write_sequences_to_fasta(path, seqs):
"""
Create a FASTA file listing the given sequences.
Arguments
=========
path: str or pathlib.Path
The name of the file to create.
seqs: dict
A mapping of names to sequences, which can be either protein or DNA.
"""
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
path = Path(path)
records = []
for id, seq in seqs.items():
record = SeqRecord(Seq(seq), id=id, description='')
records.append(record)
SeqIO.write(records, str(path), 'fasta') | Create a FASTA file listing the given sequences.
Arguments
=========
path: str or pathlib.Path
The name of the file to create.
seqs: dict
A mapping of names to sequences, which can be either protein or DNA. | Below is the the instruction that describes the task:
### Input:
Create a FASTA file listing the given sequences.
Arguments
=========
path: str or pathlib.Path
The name of the file to create.
seqs: dict
A mapping of names to sequences, which can be either protein or DNA.
### Response:
def write_sequences_to_fasta(path, seqs):
"""
Create a FASTA file listing the given sequences.
Arguments
=========
path: str or pathlib.Path
The name of the file to create.
seqs: dict
A mapping of names to sequences, which can be either protein or DNA.
"""
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
path = Path(path)
records = []
for id, seq in seqs.items():
record = SeqRecord(Seq(seq), id=id, description='')
records.append(record)
SeqIO.write(records, str(path), 'fasta') |
def patch(self, request, format=None):
"""
Update an existing Channel
"""
data = request.data.copy()
# Get chat type record
try:
ct = ChatType.objects.get(id=data.pop("chat_type"))
data["chat_type"] = ct
except ChatType.DoesNotExist:
return typeNotFound404
if not self.is_path_unique(
data["id"], data["publish_path"], ct.publish_path
):
return notUnique400
# Get channel record
try:
c = Channel.objects.get(id=data.pop("id"))
except Channel.DoesNotExist:
return channelNotFound404
# Save new data
for key, value in data.items():
setattr(c, key, value)
c.save()
self.handle_webhook(c)
return Response(
{
"text": "Channel saved.",
"method": "PATCH",
"saved": ChannelCMSSerializer(c).data,
},
200,
) | Update an existing Channel | Below is the the instruction that describes the task:
### Input:
Update an existing Channel
### Response:
def patch(self, request, format=None):
"""
Update an existing Channel
"""
data = request.data.copy()
# Get chat type record
try:
ct = ChatType.objects.get(id=data.pop("chat_type"))
data["chat_type"] = ct
except ChatType.DoesNotExist:
return typeNotFound404
if not self.is_path_unique(
data["id"], data["publish_path"], ct.publish_path
):
return notUnique400
# Get channel record
try:
c = Channel.objects.get(id=data.pop("id"))
except Channel.DoesNotExist:
return channelNotFound404
# Save new data
for key, value in data.items():
setattr(c, key, value)
c.save()
self.handle_webhook(c)
return Response(
{
"text": "Channel saved.",
"method": "PATCH",
"saved": ChannelCMSSerializer(c).data,
},
200,
) |
def business_hours_schedule_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/schedules#create-a-schedule"
api_path = "/api/v2/business_hours/schedules.json"
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/schedules#create-a-schedule | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/schedules#create-a-schedule
### Response:
def business_hours_schedule_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/schedules#create-a-schedule"
api_path = "/api/v2/business_hours/schedules.json"
return self.call(api_path, method="POST", data=data, **kwargs) |
def filesavebox(msg=None, title=None, argInitialFile=None):
"""Original doc: A file to get the name of a file to save.
Returns the name of a file, or None if user chose to cancel.
if argInitialFile contains a valid filename, the dialog will
be positioned at that file when it appears.
"""
return psidialogs.ask_file(message=msg, title=title, default=argInitialFile, save=True) | Original doc: A file to get the name of a file to save.
Returns the name of a file, or None if user chose to cancel.
if argInitialFile contains a valid filename, the dialog will
be positioned at that file when it appears. | Below is the the instruction that describes the task:
### Input:
Original doc: A file to get the name of a file to save.
Returns the name of a file, or None if user chose to cancel.
if argInitialFile contains a valid filename, the dialog will
be positioned at that file when it appears.
### Response:
def filesavebox(msg=None, title=None, argInitialFile=None):
"""Original doc: A file to get the name of a file to save.
Returns the name of a file, or None if user chose to cancel.
if argInitialFile contains a valid filename, the dialog will
be positioned at that file when it appears.
"""
return psidialogs.ask_file(message=msg, title=title, default=argInitialFile, save=True) |
def badge_label(self, badge):
'''Display the badge label for a given kind'''
kind = badge.kind if isinstance(badge, Badge) else badge
return self.__badges__[kind] | Display the badge label for a given kind | Below is the the instruction that describes the task:
### Input:
Display the badge label for a given kind
### Response:
def badge_label(self, badge):
'''Display the badge label for a given kind'''
kind = badge.kind if isinstance(badge, Badge) else badge
return self.__badges__[kind] |
def verify_param(self, param, must=[], r=None):
'''return Code.ARGUMENT_MISSING if every key in must not found in param'''
if APIKEY not in param:
param[APIKEY] = self.apikey()
r = Result() if r is None else r
for p in must:
if p not in param:
r.code(Code.ARGUMENT_MISSING).detail('missing-' + p)
break
return r | return Code.ARGUMENT_MISSING if every key in must not found in param | Below is the the instruction that describes the task:
### Input:
return Code.ARGUMENT_MISSING if every key in must not found in param
### Response:
def verify_param(self, param, must=[], r=None):
'''return Code.ARGUMENT_MISSING if every key in must not found in param'''
if APIKEY not in param:
param[APIKEY] = self.apikey()
r = Result() if r is None else r
for p in must:
if p not in param:
r.code(Code.ARGUMENT_MISSING).detail('missing-' + p)
break
return r |
def parents(self, lhs, rhs):
"""Find nodes in rhs which have parents in lhs."""
return [node for node in rhs if node.parent in lhs] | Find nodes in rhs which have parents in lhs. | Below is the the instruction that describes the task:
### Input:
Find nodes in rhs which have parents in lhs.
### Response:
def parents(self, lhs, rhs):
"""Find nodes in rhs which have parents in lhs."""
return [node for node in rhs if node.parent in lhs] |
def _extract_hemispheric_difference(image, mask = slice(None), sigma_active = 7, sigma_reference = 7, cut_plane = 0, voxelspacing = None):
"""
Internal, single-image version of `hemispheric_difference`.
"""
# constants
INTERPOLATION_RANGE = int(10) # how many neighbouring values to take into account when interpolating the medial longitudinal fissure slice
# check arguments
if cut_plane >= image.ndim:
raise ArgumentError('The suppliedc cut-plane ({}) is invalid, the image has only {} dimensions.'.format(cut_plane, image.ndim))
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# compute the (presumed) location of the medial longitudinal fissure, treating also the special of an odd number of slices, in which case a cut into two equal halves is not possible
medial_longitudinal_fissure = int(image.shape[cut_plane] / 2)
medial_longitudinal_fissure_excluded = image.shape[cut_plane] % 2
# split the head into a dexter and sinister half along the saggital plane
# this is assumed to be consistent with a cut of the brain along the medial longitudinal fissure, thus separating it into its hemispheres
slicer = [slice(None)] * image.ndim
slicer[cut_plane] = slice(None, medial_longitudinal_fissure)
left_hemisphere = image[slicer]
slicer[cut_plane] = slice(medial_longitudinal_fissure + medial_longitudinal_fissure_excluded, None)
right_hemisphere = image[slicer]
# flip right hemisphere image along cut plane
slicer[cut_plane] = slice(None, None, -1)
right_hemisphere = right_hemisphere[slicer]
# substract once left from right and once right from left hemisphere, including smoothing steps
right_hemisphere_difference = _substract_hemispheres(right_hemisphere, left_hemisphere, sigma_active, sigma_reference, voxelspacing)
left_hemisphere_difference = _substract_hemispheres(left_hemisphere, right_hemisphere, sigma_active, sigma_reference, voxelspacing)
# re-flip right hemisphere image to original orientation
right_hemisphere_difference = right_hemisphere_difference[slicer]
# estimate the medial longitudinal fissure if required
if 1 == medial_longitudinal_fissure_excluded:
left_slicer = [slice(None)] * image.ndim
right_slicer = [slice(None)] * image.ndim
left_slicer[cut_plane] = slice(-1 * INTERPOLATION_RANGE, None)
right_slicer[cut_plane] = slice(None, INTERPOLATION_RANGE)
interp_data_left = left_hemisphere_difference[left_slicer]
interp_data_right = right_hemisphere_difference[right_slicer]
interp_indices_left = list(range(-1 * interp_data_left.shape[cut_plane], 0))
interp_indices_right = list(range(1, interp_data_right.shape[cut_plane] + 1))
interp_data = numpy.concatenate((left_hemisphere_difference[left_slicer], right_hemisphere_difference[right_slicer]), cut_plane)
interp_indices = numpy.concatenate((interp_indices_left, interp_indices_right), 0)
medial_longitudinal_fissure_estimated = interp1d(interp_indices, interp_data, kind='cubic', axis=cut_plane)(0)
# add singleton dimension
slicer[cut_plane] = numpy.newaxis
medial_longitudinal_fissure_estimated = medial_longitudinal_fissure_estimated[slicer]
# stich images back together
if 1 == medial_longitudinal_fissure_excluded:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, medial_longitudinal_fissure_estimated, right_hemisphere_difference), cut_plane)
else:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, right_hemisphere_difference), cut_plane)
# extract intensities and return
return _extract_intensities(hemisphere_difference, mask) | Internal, single-image version of `hemispheric_difference`. | Below is the the instruction that describes the task:
### Input:
Internal, single-image version of `hemispheric_difference`.
### Response:
def _extract_hemispheric_difference(image, mask = slice(None), sigma_active = 7, sigma_reference = 7, cut_plane = 0, voxelspacing = None):
"""
Internal, single-image version of `hemispheric_difference`.
"""
# constants
INTERPOLATION_RANGE = int(10) # how many neighbouring values to take into account when interpolating the medial longitudinal fissure slice
# check arguments
if cut_plane >= image.ndim:
raise ArgumentError('The suppliedc cut-plane ({}) is invalid, the image has only {} dimensions.'.format(cut_plane, image.ndim))
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# compute the (presumed) location of the medial longitudinal fissure, treating also the special of an odd number of slices, in which case a cut into two equal halves is not possible
medial_longitudinal_fissure = int(image.shape[cut_plane] / 2)
medial_longitudinal_fissure_excluded = image.shape[cut_plane] % 2
# split the head into a dexter and sinister half along the saggital plane
# this is assumed to be consistent with a cut of the brain along the medial longitudinal fissure, thus separating it into its hemispheres
slicer = [slice(None)] * image.ndim
slicer[cut_plane] = slice(None, medial_longitudinal_fissure)
left_hemisphere = image[slicer]
slicer[cut_plane] = slice(medial_longitudinal_fissure + medial_longitudinal_fissure_excluded, None)
right_hemisphere = image[slicer]
# flip right hemisphere image along cut plane
slicer[cut_plane] = slice(None, None, -1)
right_hemisphere = right_hemisphere[slicer]
# substract once left from right and once right from left hemisphere, including smoothing steps
right_hemisphere_difference = _substract_hemispheres(right_hemisphere, left_hemisphere, sigma_active, sigma_reference, voxelspacing)
left_hemisphere_difference = _substract_hemispheres(left_hemisphere, right_hemisphere, sigma_active, sigma_reference, voxelspacing)
# re-flip right hemisphere image to original orientation
right_hemisphere_difference = right_hemisphere_difference[slicer]
# estimate the medial longitudinal fissure if required
if 1 == medial_longitudinal_fissure_excluded:
left_slicer = [slice(None)] * image.ndim
right_slicer = [slice(None)] * image.ndim
left_slicer[cut_plane] = slice(-1 * INTERPOLATION_RANGE, None)
right_slicer[cut_plane] = slice(None, INTERPOLATION_RANGE)
interp_data_left = left_hemisphere_difference[left_slicer]
interp_data_right = right_hemisphere_difference[right_slicer]
interp_indices_left = list(range(-1 * interp_data_left.shape[cut_plane], 0))
interp_indices_right = list(range(1, interp_data_right.shape[cut_plane] + 1))
interp_data = numpy.concatenate((left_hemisphere_difference[left_slicer], right_hemisphere_difference[right_slicer]), cut_plane)
interp_indices = numpy.concatenate((interp_indices_left, interp_indices_right), 0)
medial_longitudinal_fissure_estimated = interp1d(interp_indices, interp_data, kind='cubic', axis=cut_plane)(0)
# add singleton dimension
slicer[cut_plane] = numpy.newaxis
medial_longitudinal_fissure_estimated = medial_longitudinal_fissure_estimated[slicer]
# stich images back together
if 1 == medial_longitudinal_fissure_excluded:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, medial_longitudinal_fissure_estimated, right_hemisphere_difference), cut_plane)
else:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, right_hemisphere_difference), cut_plane)
# extract intensities and return
return _extract_intensities(hemisphere_difference, mask) |
def get_table_location(self, database_name, table_name):
"""
Get the physical location of the table
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table
:type table_name: str
:return: str
"""
table = self.get_table(database_name, table_name)
return table['StorageDescriptor']['Location'] | Get the physical location of the table
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table
:type table_name: str
:return: str | Below is the the instruction that describes the task:
### Input:
Get the physical location of the table
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table
:type table_name: str
:return: str
### Response:
def get_table_location(self, database_name, table_name):
"""
Get the physical location of the table
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table
:type table_name: str
:return: str
"""
table = self.get_table(database_name, table_name)
return table['StorageDescriptor']['Location'] |
def as_dict(self):
"""
Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
:return: Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"symmetry_measure_type": self.symmetry_measure_type,
"dist_ang_area_weight": self.dist_ang_area_weight.as_dict()
if self.dist_ang_area_weight is not None else None,
"self_csm_weight": self.self_csm_weight.as_dict()
if self.self_csm_weight is not None else None,
"delta_csm_weight": self.delta_csm_weight.as_dict()
if self.delta_csm_weight is not None else None,
"cn_bias_weight": self.cn_bias_weight.as_dict()
if self.cn_bias_weight is not None else None,
"angle_weight": self.angle_weight.as_dict()
if self.angle_weight is not None else None,
"normalized_angle_distance_weight": self.normalized_angle_distance_weight.as_dict()
if self.normalized_angle_distance_weight is not None else None,
"ce_estimator": self.ce_estimator,
} | Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
:return: Bson-serializable dict representation of the MultiWeightsChemenvStrategy object. | Below is the the instruction that describes the task:
### Input:
Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
:return: Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
### Response:
def as_dict(self):
"""
Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
:return: Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"symmetry_measure_type": self.symmetry_measure_type,
"dist_ang_area_weight": self.dist_ang_area_weight.as_dict()
if self.dist_ang_area_weight is not None else None,
"self_csm_weight": self.self_csm_weight.as_dict()
if self.self_csm_weight is not None else None,
"delta_csm_weight": self.delta_csm_weight.as_dict()
if self.delta_csm_weight is not None else None,
"cn_bias_weight": self.cn_bias_weight.as_dict()
if self.cn_bias_weight is not None else None,
"angle_weight": self.angle_weight.as_dict()
if self.angle_weight is not None else None,
"normalized_angle_distance_weight": self.normalized_angle_distance_weight.as_dict()
if self.normalized_angle_distance_weight is not None else None,
"ce_estimator": self.ce_estimator,
} |
def execute(self, input_data):
''' Execute the PEIndicators worker '''
raw_bytes = input_data['sample']['raw_bytes']
# Analyze the output of pefile for any anomalous conditions.
# Have the PE File module process the file
try:
self.pefile_handle = pefile.PE(data=raw_bytes, fast_load=False)
except (AttributeError, pefile.PEFormatError), error:
return {'error': str(error), 'indicator_list': [{'Error': 'PE module failed!'}]}
indicators = []
indicators += [{'description': warn, 'severity': 2, 'category': 'PE_WARN'}
for warn in self.pefile_handle.get_warnings()]
# Automatically invoke any method of this class that starts with 'check'
check_methods = self._get_check_methods()
for check_method in check_methods:
hit_data = check_method()
if hit_data:
indicators.append(hit_data)
return {'indicator_list': indicators} | Execute the PEIndicators worker | Below is the the instruction that describes the task:
### Input:
Execute the PEIndicators worker
### Response:
def execute(self, input_data):
''' Execute the PEIndicators worker '''
raw_bytes = input_data['sample']['raw_bytes']
# Analyze the output of pefile for any anomalous conditions.
# Have the PE File module process the file
try:
self.pefile_handle = pefile.PE(data=raw_bytes, fast_load=False)
except (AttributeError, pefile.PEFormatError), error:
return {'error': str(error), 'indicator_list': [{'Error': 'PE module failed!'}]}
indicators = []
indicators += [{'description': warn, 'severity': 2, 'category': 'PE_WARN'}
for warn in self.pefile_handle.get_warnings()]
# Automatically invoke any method of this class that starts with 'check'
check_methods = self._get_check_methods()
for check_method in check_methods:
hit_data = check_method()
if hit_data:
indicators.append(hit_data)
return {'indicator_list': indicators} |
def preemptable(self):
"""
Whether the job can be run on a preemptable node.
"""
if self._preemptable is not None:
return self._preemptable
elif self._config is not None:
return self._config.defaultPreemptable
else:
raise AttributeError("Default value for 'preemptable' cannot be determined") | Whether the job can be run on a preemptable node. | Below is the the instruction that describes the task:
### Input:
Whether the job can be run on a preemptable node.
### Response:
def preemptable(self):
"""
Whether the job can be run on a preemptable node.
"""
if self._preemptable is not None:
return self._preemptable
elif self._config is not None:
return self._config.defaultPreemptable
else:
raise AttributeError("Default value for 'preemptable' cannot be determined") |
def addSubprocess(self, fds, name, factory):
"""
Public method for _addSubprocess.
Wraps reactor.adoptStreamConnection in
a simple DeferredLock to guarantee
workers play well together.
"""
self._lock.run(self._addSubprocess, self, fds, name, factory) | Public method for _addSubprocess.
Wraps reactor.adoptStreamConnection in
a simple DeferredLock to guarantee
workers play well together. | Below is the the instruction that describes the task:
### Input:
Public method for _addSubprocess.
Wraps reactor.adoptStreamConnection in
a simple DeferredLock to guarantee
workers play well together.
### Response:
def addSubprocess(self, fds, name, factory):
"""
Public method for _addSubprocess.
Wraps reactor.adoptStreamConnection in
a simple DeferredLock to guarantee
workers play well together.
"""
self._lock.run(self._addSubprocess, self, fds, name, factory) |
def after_log(logger, log_level, sec_format="%0.3f"):
"""After call strategy that logs to some logger the finished attempt."""
log_tpl = ("Finished call to '%s' after " + str(sec_format) + "(s), "
"this was the %s time calling it.")
def log_it(retry_state):
logger.log(log_level, log_tpl,
_utils.get_callback_name(retry_state.fn),
retry_state.seconds_since_start,
_utils.to_ordinal(retry_state.attempt_number))
return log_it | After call strategy that logs to some logger the finished attempt. | Below is the the instruction that describes the task:
### Input:
After call strategy that logs to some logger the finished attempt.
### Response:
def after_log(logger, log_level, sec_format="%0.3f"):
"""After call strategy that logs to some logger the finished attempt."""
log_tpl = ("Finished call to '%s' after " + str(sec_format) + "(s), "
"this was the %s time calling it.")
def log_it(retry_state):
logger.log(log_level, log_tpl,
_utils.get_callback_name(retry_state.fn),
retry_state.seconds_since_start,
_utils.to_ordinal(retry_state.attempt_number))
return log_it |
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result | Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. | Below is the the instruction that describes the task:
### Input:
Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids.
### Response:
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result |
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount'] | Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total. | Below is the the instruction that describes the task:
### Input:
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
### Response:
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount'] |
def _deserialize_audience(audience_map):
""" Helper method to de-serialize and populate audience map with the condition list and structure.
Args:
audience_map: Dict mapping audience ID to audience object.
Returns:
Dict additionally consisting of condition list and structure on every audience object.
"""
for audience in audience_map.values():
condition_structure, condition_list = condition_helper.loads(audience.conditions)
audience.__dict__.update({
'conditionStructure': condition_structure,
'conditionList': condition_list
})
return audience_map | Helper method to de-serialize and populate audience map with the condition list and structure.
Args:
audience_map: Dict mapping audience ID to audience object.
Returns:
Dict additionally consisting of condition list and structure on every audience object. | Below is the the instruction that describes the task:
### Input:
Helper method to de-serialize and populate audience map with the condition list and structure.
Args:
audience_map: Dict mapping audience ID to audience object.
Returns:
Dict additionally consisting of condition list and structure on every audience object.
### Response:
def _deserialize_audience(audience_map):
""" Helper method to de-serialize and populate audience map with the condition list and structure.
Args:
audience_map: Dict mapping audience ID to audience object.
Returns:
Dict additionally consisting of condition list and structure on every audience object.
"""
for audience in audience_map.values():
condition_structure, condition_list = condition_helper.loads(audience.conditions)
audience.__dict__.update({
'conditionStructure': condition_structure,
'conditionList': condition_list
})
return audience_map |
def get_context_from_cmdln(args, desc="Run scriptworker"):
"""Create a Context object from args.
Args:
args (list): the commandline args. Generally sys.argv
Returns:
tuple: ``scriptworker.context.Context`` with populated config, and
credentials frozendict
"""
context = Context()
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"config_path", type=str, nargs="?", default="scriptworker.yaml",
help="the path to the config file"
)
parsed_args = parser.parse_args(args)
context.config, credentials = create_config(config_path=parsed_args.config_path)
update_logging_config(context)
return context, credentials | Create a Context object from args.
Args:
args (list): the commandline args. Generally sys.argv
Returns:
tuple: ``scriptworker.context.Context`` with populated config, and
credentials frozendict | Below is the the instruction that describes the task:
### Input:
Create a Context object from args.
Args:
args (list): the commandline args. Generally sys.argv
Returns:
tuple: ``scriptworker.context.Context`` with populated config, and
credentials frozendict
### Response:
def get_context_from_cmdln(args, desc="Run scriptworker"):
"""Create a Context object from args.
Args:
args (list): the commandline args. Generally sys.argv
Returns:
tuple: ``scriptworker.context.Context`` with populated config, and
credentials frozendict
"""
context = Context()
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"config_path", type=str, nargs="?", default="scriptworker.yaml",
help="the path to the config file"
)
parsed_args = parser.parse_args(args)
context.config, credentials = create_config(config_path=parsed_args.config_path)
update_logging_config(context)
return context, credentials |
def infos(self, type=None, failed=False):
"""Get infos that originate from this node.
Type must be a subclass of :class:`~dallinger.models.Info`, the default is
``Info``. Failed can be True, False or "all".
"""
if type is None:
type = Info
if not issubclass(type, Info):
raise TypeError(
"Cannot get infos of type {} " "as it is not a valid type.".format(type)
)
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid vector failed".format(failed))
if failed == "all":
return type.query.filter_by(origin_id=self.id).all()
else:
return type.query.filter_by(origin_id=self.id, failed=failed).all() | Get infos that originate from this node.
Type must be a subclass of :class:`~dallinger.models.Info`, the default is
``Info``. Failed can be True, False or "all". | Below is the the instruction that describes the task:
### Input:
Get infos that originate from this node.
Type must be a subclass of :class:`~dallinger.models.Info`, the default is
``Info``. Failed can be True, False or "all".
### Response:
def infos(self, type=None, failed=False):
"""Get infos that originate from this node.
Type must be a subclass of :class:`~dallinger.models.Info`, the default is
``Info``. Failed can be True, False or "all".
"""
if type is None:
type = Info
if not issubclass(type, Info):
raise TypeError(
"Cannot get infos of type {} " "as it is not a valid type.".format(type)
)
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid vector failed".format(failed))
if failed == "all":
return type.query.filter_by(origin_id=self.id).all()
else:
return type.query.filter_by(origin_id=self.id, failed=failed).all() |
def _verify_shape_bounds(shape, bounds):
"""Verify that shape corresponds to bounds apect ratio."""
if not isinstance(shape, (tuple, list)) or len(shape) != 2:
raise TypeError(
"shape must be a tuple or list with two elements: %s" % str(shape)
)
if not isinstance(bounds, (tuple, list)) or len(bounds) != 4:
raise TypeError(
"bounds must be a tuple or list with four elements: %s" % str(bounds)
)
shape = Shape(*shape)
bounds = Bounds(*bounds)
shape_ratio = shape.width / shape.height
bounds_ratio = (bounds.right - bounds.left) / (bounds.top - bounds.bottom)
if abs(shape_ratio - bounds_ratio) > DELTA:
min_length = min([
(bounds.right - bounds.left) / shape.width,
(bounds.top - bounds.bottom) / shape.height
])
proposed_bounds = Bounds(
bounds.left,
bounds.bottom,
bounds.left + shape.width * min_length,
bounds.bottom + shape.height * min_length
)
raise ValueError(
"shape ratio (%s) must equal bounds ratio (%s); try %s" % (
shape_ratio, bounds_ratio, proposed_bounds
)
) | Verify that shape corresponds to bounds apect ratio. | Below is the the instruction that describes the task:
### Input:
Verify that shape corresponds to bounds apect ratio.
### Response:
def _verify_shape_bounds(shape, bounds):
"""Verify that shape corresponds to bounds apect ratio."""
if not isinstance(shape, (tuple, list)) or len(shape) != 2:
raise TypeError(
"shape must be a tuple or list with two elements: %s" % str(shape)
)
if not isinstance(bounds, (tuple, list)) or len(bounds) != 4:
raise TypeError(
"bounds must be a tuple or list with four elements: %s" % str(bounds)
)
shape = Shape(*shape)
bounds = Bounds(*bounds)
shape_ratio = shape.width / shape.height
bounds_ratio = (bounds.right - bounds.left) / (bounds.top - bounds.bottom)
if abs(shape_ratio - bounds_ratio) > DELTA:
min_length = min([
(bounds.right - bounds.left) / shape.width,
(bounds.top - bounds.bottom) / shape.height
])
proposed_bounds = Bounds(
bounds.left,
bounds.bottom,
bounds.left + shape.width * min_length,
bounds.bottom + shape.height * min_length
)
raise ValueError(
"shape ratio (%s) must equal bounds ratio (%s); try %s" % (
shape_ratio, bounds_ratio, proposed_bounds
)
) |
def client_pause(self, timeout):
"""Stop processing commands from clients for *timeout* milliseconds.
:raises TypeError: if timeout is not int
:raises ValueError: if timeout is less than 0
"""
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
fut = self.execute(b'CLIENT', b'PAUSE', timeout)
return wait_ok(fut) | Stop processing commands from clients for *timeout* milliseconds.
:raises TypeError: if timeout is not int
:raises ValueError: if timeout is less than 0 | Below is the the instruction that describes the task:
### Input:
Stop processing commands from clients for *timeout* milliseconds.
:raises TypeError: if timeout is not int
:raises ValueError: if timeout is less than 0
### Response:
def client_pause(self, timeout):
"""Stop processing commands from clients for *timeout* milliseconds.
:raises TypeError: if timeout is not int
:raises ValueError: if timeout is less than 0
"""
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
fut = self.execute(b'CLIENT', b'PAUSE', timeout)
return wait_ok(fut) |
def is_reversible(T, mu=None, tol=1e-12):
r"""Check reversibility of the given transition matrix.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
mu : (M,) ndarray (optional)
Test reversibility with respect to this vector
tol : float (optional)
Floating point tolerance to check with
Returns
-------
is_reversible : bool
True, if T is reversible, False otherwise
Notes
-----
A transition matrix :math:`T=(t_{ij})` is reversible with respect
to a probability vector :math:`\mu=(\mu_i)` if the follwing holds,
.. math:: \mu_i \, t_{ij}= \mu_j \, t_{ji}.
In this case :math:`\mu` is the stationary vector for :math:`T`,
so that :math:`\mu^T T = \mu^T`.
If the stationary vector is unknown it is computed from :math:`T`
before reversibility is checked.
A reversible transition matrix has purely real eigenvalues. The
left eigenvectors :math:`(l_i)` can be computed from right
eigenvectors :math:`(r_i)` via :math:`l_i=\mu_i r_i`.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import is_reversible
>>> P = np.array([[0.8, 0.1, 0.1], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> is_reversible(P)
False
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> is_reversible(T)
True
"""
# check input
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
mu = _types.ensure_float_vector_or_None(mu, require_order=True)
# go
if _issparse(T):
return sparse.assessment.is_reversible(T, mu, tol)
else:
return dense.assessment.is_reversible(T, mu, tol) | r"""Check reversibility of the given transition matrix.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
mu : (M,) ndarray (optional)
Test reversibility with respect to this vector
tol : float (optional)
Floating point tolerance to check with
Returns
-------
is_reversible : bool
True, if T is reversible, False otherwise
Notes
-----
A transition matrix :math:`T=(t_{ij})` is reversible with respect
to a probability vector :math:`\mu=(\mu_i)` if the follwing holds,
.. math:: \mu_i \, t_{ij}= \mu_j \, t_{ji}.
In this case :math:`\mu` is the stationary vector for :math:`T`,
so that :math:`\mu^T T = \mu^T`.
If the stationary vector is unknown it is computed from :math:`T`
before reversibility is checked.
A reversible transition matrix has purely real eigenvalues. The
left eigenvectors :math:`(l_i)` can be computed from right
eigenvectors :math:`(r_i)` via :math:`l_i=\mu_i r_i`.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import is_reversible
>>> P = np.array([[0.8, 0.1, 0.1], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> is_reversible(P)
False
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> is_reversible(T)
True | Below is the the instruction that describes the task:
### Input:
r"""Check reversibility of the given transition matrix.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
mu : (M,) ndarray (optional)
Test reversibility with respect to this vector
tol : float (optional)
Floating point tolerance to check with
Returns
-------
is_reversible : bool
True, if T is reversible, False otherwise
Notes
-----
A transition matrix :math:`T=(t_{ij})` is reversible with respect
to a probability vector :math:`\mu=(\mu_i)` if the follwing holds,
.. math:: \mu_i \, t_{ij}= \mu_j \, t_{ji}.
In this case :math:`\mu` is the stationary vector for :math:`T`,
so that :math:`\mu^T T = \mu^T`.
If the stationary vector is unknown it is computed from :math:`T`
before reversibility is checked.
A reversible transition matrix has purely real eigenvalues. The
left eigenvectors :math:`(l_i)` can be computed from right
eigenvectors :math:`(r_i)` via :math:`l_i=\mu_i r_i`.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import is_reversible
>>> P = np.array([[0.8, 0.1, 0.1], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> is_reversible(P)
False
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> is_reversible(T)
True
### Response:
def is_reversible(T, mu=None, tol=1e-12):
r"""Check reversibility of the given transition matrix.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
mu : (M,) ndarray (optional)
Test reversibility with respect to this vector
tol : float (optional)
Floating point tolerance to check with
Returns
-------
is_reversible : bool
True, if T is reversible, False otherwise
Notes
-----
A transition matrix :math:`T=(t_{ij})` is reversible with respect
to a probability vector :math:`\mu=(\mu_i)` if the follwing holds,
.. math:: \mu_i \, t_{ij}= \mu_j \, t_{ji}.
In this case :math:`\mu` is the stationary vector for :math:`T`,
so that :math:`\mu^T T = \mu^T`.
If the stationary vector is unknown it is computed from :math:`T`
before reversibility is checked.
A reversible transition matrix has purely real eigenvalues. The
left eigenvectors :math:`(l_i)` can be computed from right
eigenvectors :math:`(r_i)` via :math:`l_i=\mu_i r_i`.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import is_reversible
>>> P = np.array([[0.8, 0.1, 0.1], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> is_reversible(P)
False
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> is_reversible(T)
True
"""
# check input
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
mu = _types.ensure_float_vector_or_None(mu, require_order=True)
# go
if _issparse(T):
return sparse.assessment.is_reversible(T, mu, tol)
else:
return dense.assessment.is_reversible(T, mu, tol) |
def _record(self, value, rank, delta, successor):
"""Catalogs a sample."""
self._observations += 1
self._items += 1
return _Sample(value, rank, delta, successor) | Catalogs a sample. | Below is the the instruction that describes the task:
### Input:
Catalogs a sample.
### Response:
def _record(self, value, rank, delta, successor):
"""Catalogs a sample."""
self._observations += 1
self._items += 1
return _Sample(value, rank, delta, successor) |
def location_once_scrolled_into_view(self):
"""THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover
where on the screen an element is so that we can click it. This method
should cause the element to be scrolled into view.
Returns the top lefthand corner location on the screen, or ``None`` if
the element is not visible.
"""
if self._w3c:
old_loc = self._execute(Command.W3C_EXECUTE_SCRIPT, {
'script': "arguments[0].scrollIntoView(true); return arguments[0].getBoundingClientRect()",
'args': [self]})['value']
return {"x": round(old_loc['x']),
"y": round(old_loc['y'])}
else:
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value'] | THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover
where on the screen an element is so that we can click it. This method
should cause the element to be scrolled into view.
Returns the top lefthand corner location on the screen, or ``None`` if
the element is not visible. | Below is the the instruction that describes the task:
### Input:
THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover
where on the screen an element is so that we can click it. This method
should cause the element to be scrolled into view.
Returns the top lefthand corner location on the screen, or ``None`` if
the element is not visible.
### Response:
def location_once_scrolled_into_view(self):
"""THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover
where on the screen an element is so that we can click it. This method
should cause the element to be scrolled into view.
Returns the top lefthand corner location on the screen, or ``None`` if
the element is not visible.
"""
if self._w3c:
old_loc = self._execute(Command.W3C_EXECUTE_SCRIPT, {
'script': "arguments[0].scrollIntoView(true); return arguments[0].getBoundingClientRect()",
'args': [self]})['value']
return {"x": round(old_loc['x']),
"y": round(old_loc['y'])}
else:
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value'] |
def maximum_vline_bundle(self, x0, y0, y1):
"""Compute a maximum set of vertical lines in the unit cells ``(x0,y)``
for :math:`y0 \leq y \leq y1`.
INPUTS:
y0,x0,x1: int
OUTPUT:
list of lists of qubits
"""
y_range = range(y1, y0 - 1, -1) if y0 < y1 else range(y1, y0 + 1)
vlines = [[(x0, y, 1, k) for y in y_range] for k in range(self.L)]
return list(filter(self._contains_line, vlines)) | Compute a maximum set of vertical lines in the unit cells ``(x0,y)``
for :math:`y0 \leq y \leq y1`.
INPUTS:
y0,x0,x1: int
OUTPUT:
list of lists of qubits | Below is the the instruction that describes the task:
### Input:
Compute a maximum set of vertical lines in the unit cells ``(x0,y)``
for :math:`y0 \leq y \leq y1`.
INPUTS:
y0,x0,x1: int
OUTPUT:
list of lists of qubits
### Response:
def maximum_vline_bundle(self, x0, y0, y1):
"""Compute a maximum set of vertical lines in the unit cells ``(x0,y)``
for :math:`y0 \leq y \leq y1`.
INPUTS:
y0,x0,x1: int
OUTPUT:
list of lists of qubits
"""
y_range = range(y1, y0 - 1, -1) if y0 < y1 else range(y1, y0 + 1)
vlines = [[(x0, y, 1, k) for y in y_range] for k in range(self.L)]
return list(filter(self._contains_line, vlines)) |
def create(self, name, *args, **kwargs):
"""
Create an instance of this resource type.
"""
resource_name = self._resource_name(name)
log.info(
"Creating {} '{}'...".format(self._model_name, resource_name))
resource = self.collection.create(*args, name=resource_name, **kwargs)
self._ids.add(resource.id)
return resource | Create an instance of this resource type. | Below is the the instruction that describes the task:
### Input:
Create an instance of this resource type.
### Response:
def create(self, name, *args, **kwargs):
"""
Create an instance of this resource type.
"""
resource_name = self._resource_name(name)
log.info(
"Creating {} '{}'...".format(self._model_name, resource_name))
resource = self.collection.create(*args, name=resource_name, **kwargs)
self._ids.add(resource.id)
return resource |
def _parse_protocol_port(name, protocol, port):
'''
.. versionadded:: 2019.2.0
Validates and parses the protocol and port/port range from the name
if both protocol and port are not provided.
If the name is in a valid format, the protocol and port are ignored if provided
Examples: tcp/8080 or udp/20-21
'''
protocol_port_pattern = r'^(tcp|udp)\/(([\d]+)\-?[\d]+)$'
name_parts = re.match(protocol_port_pattern, name)
if not name_parts:
name_parts = re.match(protocol_port_pattern, '{0}/{1}'.format(protocol, port))
if not name_parts:
raise SaltInvocationError(
'Invalid name "{0}" format and protocol and port not provided or invalid: "{1}" "{2}".'.format(
name, protocol, port))
return name_parts.group(1), name_parts.group(2) | .. versionadded:: 2019.2.0
Validates and parses the protocol and port/port range from the name
if both protocol and port are not provided.
If the name is in a valid format, the protocol and port are ignored if provided
Examples: tcp/8080 or udp/20-21 | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Validates and parses the protocol and port/port range from the name
if both protocol and port are not provided.
If the name is in a valid format, the protocol and port are ignored if provided
Examples: tcp/8080 or udp/20-21
### Response:
def _parse_protocol_port(name, protocol, port):
'''
.. versionadded:: 2019.2.0
Validates and parses the protocol and port/port range from the name
if both protocol and port are not provided.
If the name is in a valid format, the protocol and port are ignored if provided
Examples: tcp/8080 or udp/20-21
'''
protocol_port_pattern = r'^(tcp|udp)\/(([\d]+)\-?[\d]+)$'
name_parts = re.match(protocol_port_pattern, name)
if not name_parts:
name_parts = re.match(protocol_port_pattern, '{0}/{1}'.format(protocol, port))
if not name_parts:
raise SaltInvocationError(
'Invalid name "{0}" format and protocol and port not provided or invalid: "{1}" "{2}".'.format(
name, protocol, port))
return name_parts.group(1), name_parts.group(2) |
def _is_active_model(cls, model):
""" Check is model app name is in list of INSTALLED_APPS """
# We need to use such tricky way to check because of inconsistent apps names:
# some apps are included in format "<module_name>.<app_name>" like "waldur_core.openstack"
# other apps are included in format "<app_name>" like "nodecondcutor_sugarcrm"
return ('.'.join(model.__module__.split('.')[:2]) in settings.INSTALLED_APPS or
'.'.join(model.__module__.split('.')[:1]) in settings.INSTALLED_APPS) | Check is model app name is in list of INSTALLED_APPS | Below is the the instruction that describes the task:
### Input:
Check is model app name is in list of INSTALLED_APPS
### Response:
def _is_active_model(cls, model):
""" Check is model app name is in list of INSTALLED_APPS """
# We need to use such tricky way to check because of inconsistent apps names:
# some apps are included in format "<module_name>.<app_name>" like "waldur_core.openstack"
# other apps are included in format "<app_name>" like "nodecondcutor_sugarcrm"
return ('.'.join(model.__module__.split('.')[:2]) in settings.INSTALLED_APPS or
'.'.join(model.__module__.split('.')[:1]) in settings.INSTALLED_APPS) |
def read_text(self, name):
"""Read text string from cur_dir/name using open_readable()."""
with self.open_readable(name) as fp:
res = fp.read() # StringIO or file object
# try:
# res = fp.getvalue() # StringIO returned by FtpTarget
# except AttributeError:
# res = fp.read() # file object returned by FsTarget
res = res.decode("utf-8")
return res | Read text string from cur_dir/name using open_readable(). | Below is the the instruction that describes the task:
### Input:
Read text string from cur_dir/name using open_readable().
### Response:
def read_text(self, name):
"""Read text string from cur_dir/name using open_readable()."""
with self.open_readable(name) as fp:
res = fp.read() # StringIO or file object
# try:
# res = fp.getvalue() # StringIO returned by FtpTarget
# except AttributeError:
# res = fp.read() # file object returned by FsTarget
res = res.decode("utf-8")
return res |
def show_help(command_name: str = None, raw_args: str = '') -> Response:
""" Prints the basic command help to the console """
response = Response()
cmds = fetch()
if command_name and command_name in cmds:
parser, result = parse.get_parser(
cmds[command_name],
parse.explode_line(raw_args),
dict()
)
if parser is not None:
out = parser.format_help()
return response.notify(
kind='INFO',
code='COMMAND_DESCRIPTION'
).kernel(
commands=out
).console(
out,
whitespace=1
).response
environ.log_header('Available Commands')
response.consume(print_module_help())
return response.fail(
code='NO_SUCH_COMMAND',
message='Failed to show command help for "{}"'.format(command_name)
).console(
"""
For more information on the various commands, enter help on the
specific command:
help [COMMAND]
""",
whitespace_bottom=1
).response | Prints the basic command help to the console | Below is the the instruction that describes the task:
### Input:
Prints the basic command help to the console
### Response:
def show_help(command_name: str = None, raw_args: str = '') -> Response:
""" Prints the basic command help to the console """
response = Response()
cmds = fetch()
if command_name and command_name in cmds:
parser, result = parse.get_parser(
cmds[command_name],
parse.explode_line(raw_args),
dict()
)
if parser is not None:
out = parser.format_help()
return response.notify(
kind='INFO',
code='COMMAND_DESCRIPTION'
).kernel(
commands=out
).console(
out,
whitespace=1
).response
environ.log_header('Available Commands')
response.consume(print_module_help())
return response.fail(
code='NO_SUCH_COMMAND',
message='Failed to show command help for "{}"'.format(command_name)
).console(
"""
For more information on the various commands, enter help on the
specific command:
help [COMMAND]
""",
whitespace_bottom=1
).response |
def create_setter(func, attrs):
"""Create the __set__ method for the descriptor."""
def _set(self, instance, value, name=None):
args = [getattr(self, attr) for attr in attrs]
if not func(value, *args):
raise ValueError(self.err_msg(instance, value))
return _set | Create the __set__ method for the descriptor. | Below is the the instruction that describes the task:
### Input:
Create the __set__ method for the descriptor.
### Response:
def create_setter(func, attrs):
"""Create the __set__ method for the descriptor."""
def _set(self, instance, value, name=None):
args = [getattr(self, attr) for attr in attrs]
if not func(value, *args):
raise ValueError(self.err_msg(instance, value))
return _set |
def experimentVaryingSynapseSampling(expParams,
sampleSizeDistalList,
sampleSizeProximalList):
"""
Test multi-column convergence with varying amount of proximal/distal sampling
:return:
"""
numRpts = 20
df = None
args = []
for sampleSizeProximal in sampleSizeProximalList:
for sampleSizeDistal in sampleSizeDistalList:
for rpt in range(numRpts):
l4Params = getL4Params()
l2Params = getL2Params()
l2Params["sampleSizeProximal"] = sampleSizeProximal
l2Params["minThresholdProximal"] = sampleSizeProximal
l2Params["sampleSizeDistal"] = sampleSizeDistal
l2Params["activationThresholdDistal"] = sampleSizeDistal
args.append(
{
"numObjects": expParams['numObjects'],
"numLocations": expParams['numLocations'],
"numFeatures": expParams['numFeatures'],
"numColumns": expParams['numColumns'],
"trialNum": rpt,
"l4Params": l4Params,
"l2Params": l2Params,
"profile": True,
"objectSeed": rpt,
}
)
pool = Pool(processes=expParams['numWorkers'])
result = pool.map(runExperiment, args)
#
# if df is None:
# df = pd.DataFrame.from_dict(result, orient='index')
# else:
# df = pd.concat([df, pd.DataFrame.from_dict(result, orient='index')], axis=1)
#
# df = df.transpose()
return result | Test multi-column convergence with varying amount of proximal/distal sampling
:return: | Below is the the instruction that describes the task:
### Input:
Test multi-column convergence with varying amount of proximal/distal sampling
:return:
### Response:
def experimentVaryingSynapseSampling(expParams,
sampleSizeDistalList,
sampleSizeProximalList):
"""
Test multi-column convergence with varying amount of proximal/distal sampling
:return:
"""
numRpts = 20
df = None
args = []
for sampleSizeProximal in sampleSizeProximalList:
for sampleSizeDistal in sampleSizeDistalList:
for rpt in range(numRpts):
l4Params = getL4Params()
l2Params = getL2Params()
l2Params["sampleSizeProximal"] = sampleSizeProximal
l2Params["minThresholdProximal"] = sampleSizeProximal
l2Params["sampleSizeDistal"] = sampleSizeDistal
l2Params["activationThresholdDistal"] = sampleSizeDistal
args.append(
{
"numObjects": expParams['numObjects'],
"numLocations": expParams['numLocations'],
"numFeatures": expParams['numFeatures'],
"numColumns": expParams['numColumns'],
"trialNum": rpt,
"l4Params": l4Params,
"l2Params": l2Params,
"profile": True,
"objectSeed": rpt,
}
)
pool = Pool(processes=expParams['numWorkers'])
result = pool.map(runExperiment, args)
#
# if df is None:
# df = pd.DataFrame.from_dict(result, orient='index')
# else:
# df = pd.concat([df, pd.DataFrame.from_dict(result, orient='index')], axis=1)
#
# df = df.transpose()
return result |
def standardize_cell(cell,
to_primitive=False,
no_idealize=False,
symprec=1e-5,
angle_tolerance=-1.0):
"""Return standardized cell.
Args:
cell, symprec, angle_tolerance:
See the docstring of get_symmetry.
to_primitive:
bool: If True, the standardized primitive cell is created.
no_idealize:
bool: If True, it is disabled to idealize lengths and angles of
basis vectors and positions of atoms according to crystal
symmetry.
Return:
The standardized unit cell or primitive cell is returned by a tuple of
(lattice, positions, numbers).
If it fails, None is returned.
"""
_set_no_error()
lattice, _positions, _numbers, _ = _expand_cell(cell)
if lattice is None:
return None
# Atomic positions have to be specified by scaled positions for spglib.
num_atom = len(_positions)
positions = np.zeros((num_atom * 4, 3), dtype='double', order='C')
positions[:num_atom] = _positions
numbers = np.zeros(num_atom * 4, dtype='intc')
numbers[:num_atom] = _numbers
num_atom_std = spg.standardize_cell(lattice,
positions,
numbers,
num_atom,
to_primitive * 1,
no_idealize * 1,
symprec,
angle_tolerance)
_set_error_message()
if num_atom_std > 0:
return (np.array(lattice.T, dtype='double', order='C'),
np.array(positions[:num_atom_std], dtype='double', order='C'),
np.array(numbers[:num_atom_std], dtype='intc'))
else:
return None | Return standardized cell.
Args:
cell, symprec, angle_tolerance:
See the docstring of get_symmetry.
to_primitive:
bool: If True, the standardized primitive cell is created.
no_idealize:
bool: If True, it is disabled to idealize lengths and angles of
basis vectors and positions of atoms according to crystal
symmetry.
Return:
The standardized unit cell or primitive cell is returned by a tuple of
(lattice, positions, numbers).
If it fails, None is returned. | Below is the the instruction that describes the task:
### Input:
Return standardized cell.
Args:
cell, symprec, angle_tolerance:
See the docstring of get_symmetry.
to_primitive:
bool: If True, the standardized primitive cell is created.
no_idealize:
bool: If True, it is disabled to idealize lengths and angles of
basis vectors and positions of atoms according to crystal
symmetry.
Return:
The standardized unit cell or primitive cell is returned by a tuple of
(lattice, positions, numbers).
If it fails, None is returned.
### Response:
def standardize_cell(cell,
to_primitive=False,
no_idealize=False,
symprec=1e-5,
angle_tolerance=-1.0):
"""Return standardized cell.
Args:
cell, symprec, angle_tolerance:
See the docstring of get_symmetry.
to_primitive:
bool: If True, the standardized primitive cell is created.
no_idealize:
bool: If True, it is disabled to idealize lengths and angles of
basis vectors and positions of atoms according to crystal
symmetry.
Return:
The standardized unit cell or primitive cell is returned by a tuple of
(lattice, positions, numbers).
If it fails, None is returned.
"""
_set_no_error()
lattice, _positions, _numbers, _ = _expand_cell(cell)
if lattice is None:
return None
# Atomic positions have to be specified by scaled positions for spglib.
num_atom = len(_positions)
positions = np.zeros((num_atom * 4, 3), dtype='double', order='C')
positions[:num_atom] = _positions
numbers = np.zeros(num_atom * 4, dtype='intc')
numbers[:num_atom] = _numbers
num_atom_std = spg.standardize_cell(lattice,
positions,
numbers,
num_atom,
to_primitive * 1,
no_idealize * 1,
symprec,
angle_tolerance)
_set_error_message()
if num_atom_std > 0:
return (np.array(lattice.T, dtype='double', order='C'),
np.array(positions[:num_atom_std], dtype='double', order='C'),
np.array(numbers[:num_atom_std], dtype='intc'))
else:
return None |
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Returns Defective Vacancy structure, decorated with charge
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
defect_structure = self.bulk_structure.copy()
defect_structure.make_supercell(supercell)
#create a trivial defect structure to find where supercell transformation moves the lattice
struct_for_defect_site = Structure( self.bulk_structure.copy().lattice,
[self.site.specie],
[self.site.frac_coords],
to_unit_cell=True)
struct_for_defect_site.make_supercell(supercell)
defect_site = struct_for_defect_site[0]
poss_deflist = sorted(
defect_structure.get_sites_in_sphere(defect_site.coords, 2, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
defect_structure.remove_sites([defindex])
defect_structure.set_charge(self.charge)
return defect_structure | Returns Defective Vacancy structure, decorated with charge
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix | Below is the the instruction that describes the task:
### Input:
Returns Defective Vacancy structure, decorated with charge
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
### Response:
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Returns Defective Vacancy structure, decorated with charge
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
defect_structure = self.bulk_structure.copy()
defect_structure.make_supercell(supercell)
#create a trivial defect structure to find where supercell transformation moves the lattice
struct_for_defect_site = Structure( self.bulk_structure.copy().lattice,
[self.site.specie],
[self.site.frac_coords],
to_unit_cell=True)
struct_for_defect_site.make_supercell(supercell)
defect_site = struct_for_defect_site[0]
poss_deflist = sorted(
defect_structure.get_sites_in_sphere(defect_site.coords, 2, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
defect_structure.remove_sites([defindex])
defect_structure.set_charge(self.charge)
return defect_structure |
def get_property(elt, key, ctx=None):
"""Get elt key property.
:param elt: property elt. Not None methods.
:param key: property key to get from elt.
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:return: list of property values by elt.
:rtype: list
"""
result = []
properties = get_properties(elt=elt, ctx=ctx, keys=key)
if key in properties:
result = properties[key]
return result | Get elt key property.
:param elt: property elt. Not None methods.
:param key: property key to get from elt.
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:return: list of property values by elt.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get elt key property.
:param elt: property elt. Not None methods.
:param key: property key to get from elt.
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:return: list of property values by elt.
:rtype: list
### Response:
def get_property(elt, key, ctx=None):
"""Get elt key property.
:param elt: property elt. Not None methods.
:param key: property key to get from elt.
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:return: list of property values by elt.
:rtype: list
"""
result = []
properties = get_properties(elt=elt, ctx=ctx, keys=key)
if key in properties:
result = properties[key]
return result |
def max_projection(self, axis=2):
"""
Compute maximum projections of images along a dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
"""
if axis >= size(self.value_shape):
raise Exception('Axis for projection (%s) exceeds '
'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))
new_value_shape = list(self.value_shape)
del new_value_shape[axis]
return self.map(lambda x: amax(x, axis), value_shape=new_value_shape) | Compute maximum projections of images along a dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along. | Below is the the instruction that describes the task:
### Input:
Compute maximum projections of images along a dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
### Response:
def max_projection(self, axis=2):
"""
Compute maximum projections of images along a dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
"""
if axis >= size(self.value_shape):
raise Exception('Axis for projection (%s) exceeds '
'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))
new_value_shape = list(self.value_shape)
del new_value_shape[axis]
return self.map(lambda x: amax(x, axis), value_shape=new_value_shape) |
def discombobulate(self, filehash):
""" prepare napiprojekt scrambled hash """
idx = [0xe, 0x3, 0x6, 0x8, 0x2]
mul = [2, 2, 5, 4, 3]
add = [0, 0xd, 0x10, 0xb, 0x5]
b = []
for i in xrange(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(filehash[i], 16)
v = int(filehash[t:t + 2], 16)
b.append(("%x" % (v * m))[-1])
return ''.join(b) | prepare napiprojekt scrambled hash | Below is the the instruction that describes the task:
### Input:
prepare napiprojekt scrambled hash
### Response:
def discombobulate(self, filehash):
""" prepare napiprojekt scrambled hash """
idx = [0xe, 0x3, 0x6, 0x8, 0x2]
mul = [2, 2, 5, 4, 3]
add = [0, 0xd, 0x10, 0xb, 0x5]
b = []
for i in xrange(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(filehash[i], 16)
v = int(filehash[t:t + 2], 16)
b.append(("%x" % (v * m))[-1])
return ''.join(b) |
def _labeledInput(activeInputs, cellsPerCol=32):
"""Print the list of [column, cellIdx] indices for each of the active
cells in activeInputs.
"""
if cellsPerCol == 0:
cellsPerCol = 1
cols = activeInputs.size / cellsPerCol
activeInputs = activeInputs.reshape(cols, cellsPerCol)
(cols, cellIdxs) = activeInputs.nonzero()
if len(cols) == 0:
return "NONE"
items = ["(%d): " % (len(cols))]
prevCol = -1
for (col,cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
items.append("] ")
items.append("Col %d: [" % col)
prevCol = col
items.append("%d," % cellIdx)
items.append("]")
return " ".join(items) | Print the list of [column, cellIdx] indices for each of the active
cells in activeInputs. | Below is the the instruction that describes the task:
### Input:
Print the list of [column, cellIdx] indices for each of the active
cells in activeInputs.
### Response:
def _labeledInput(activeInputs, cellsPerCol=32):
"""Print the list of [column, cellIdx] indices for each of the active
cells in activeInputs.
"""
if cellsPerCol == 0:
cellsPerCol = 1
cols = activeInputs.size / cellsPerCol
activeInputs = activeInputs.reshape(cols, cellsPerCol)
(cols, cellIdxs) = activeInputs.nonzero()
if len(cols) == 0:
return "NONE"
items = ["(%d): " % (len(cols))]
prevCol = -1
for (col,cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
items.append("] ")
items.append("Col %d: [" % col)
prevCol = col
items.append("%d," % cellIdx)
items.append("]")
return " ".join(items) |
def get_whois(self, asn_registry='arin', retry_count=3, server=None,
port=43, extra_blacklist=None):
"""
The function for retrieving whois or rwhois information for an IP
address via any port. Defaults to port 43/tcp (WHOIS).
Args:
asn_registry (:obj:`str`): The NIC to run the query against.
Defaults to 'arin'.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
server (:obj:`str`): An optional server to connect to. If
provided, asn_registry will be ignored.
port (:obj:`int`): The network port to connect on. Defaults to 43.
extra_blacklist (:obj:`list` of :obj:`str`): Blacklisted whois
servers in addition to the global BLACKLIST. Defaults to None.
Returns:
str: The raw whois data.
Raises:
BlacklistError: Raised if the whois server provided is in the
global BLACKLIST or extra_blacklist.
WhoisLookupError: The whois lookup failed.
WhoisRateLimitError: The Whois request rate limited and retries
were exhausted.
"""
try:
extra_bl = extra_blacklist if extra_blacklist else []
if any(server in srv for srv in (BLACKLIST, extra_bl)):
raise BlacklistError(
'The server {0} is blacklisted.'.format(server)
)
if server is None:
server = RIR_WHOIS[asn_registry]['server']
# Create the connection for the whois query.
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(self.timeout)
log.debug('WHOIS query for {0} at {1}:{2}'.format(
self.address_str, server, port))
conn.connect((server, port))
# Prep the query.
query = self.address_str + '\r\n'
if asn_registry == 'arin':
query = 'n + {0}'.format(query)
# Query the whois server, and store the results.
conn.send(query.encode())
response = ''
while True:
d = conn.recv(4096).decode('ascii', 'ignore')
response += d
if not d:
break
conn.close()
if 'Query rate limit exceeded' in response: # pragma: no cover
if retry_count > 0:
log.debug('WHOIS query rate limit exceeded. Waiting...')
sleep(1)
return self.get_whois(
asn_registry=asn_registry, retry_count=retry_count-1,
server=server, port=port,
extra_blacklist=extra_blacklist
)
else:
raise WhoisRateLimitError(
'Whois lookup failed for {0}. Rate limit '
'exceeded, wait and try again (possibly a '
'temporary block).'.format(self.address_str))
elif ('error 501' in response or 'error 230' in response
): # pragma: no cover
log.debug('WHOIS query error: {0}'.format(response))
raise ValueError
return str(response)
except (socket.timeout, socket.error) as e:
log.debug('WHOIS query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('WHOIS query retrying (count: {0})'.format(
str(retry_count)))
return self.get_whois(
asn_registry=asn_registry, retry_count=retry_count-1,
server=server, port=port, extra_blacklist=extra_blacklist
)
else:
raise WhoisLookupError(
'WHOIS lookup failed for {0}.'.format(self.address_str)
)
except WhoisRateLimitError: # pragma: no cover
raise
except BlacklistError:
raise
except: # pragma: no cover
raise WhoisLookupError(
'WHOIS lookup failed for {0}.'.format(self.address_str)
) | The function for retrieving whois or rwhois information for an IP
address via any port. Defaults to port 43/tcp (WHOIS).
Args:
asn_registry (:obj:`str`): The NIC to run the query against.
Defaults to 'arin'.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
server (:obj:`str`): An optional server to connect to. If
provided, asn_registry will be ignored.
port (:obj:`int`): The network port to connect on. Defaults to 43.
extra_blacklist (:obj:`list` of :obj:`str`): Blacklisted whois
servers in addition to the global BLACKLIST. Defaults to None.
Returns:
str: The raw whois data.
Raises:
BlacklistError: Raised if the whois server provided is in the
global BLACKLIST or extra_blacklist.
WhoisLookupError: The whois lookup failed.
WhoisRateLimitError: The Whois request rate limited and retries
were exhausted. | Below is the the instruction that describes the task:
### Input:
The function for retrieving whois or rwhois information for an IP
address via any port. Defaults to port 43/tcp (WHOIS).
Args:
asn_registry (:obj:`str`): The NIC to run the query against.
Defaults to 'arin'.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
server (:obj:`str`): An optional server to connect to. If
provided, asn_registry will be ignored.
port (:obj:`int`): The network port to connect on. Defaults to 43.
extra_blacklist (:obj:`list` of :obj:`str`): Blacklisted whois
servers in addition to the global BLACKLIST. Defaults to None.
Returns:
str: The raw whois data.
Raises:
BlacklistError: Raised if the whois server provided is in the
global BLACKLIST or extra_blacklist.
WhoisLookupError: The whois lookup failed.
WhoisRateLimitError: The Whois request rate limited and retries
were exhausted.
### Response:
def get_whois(self, asn_registry='arin', retry_count=3, server=None,
port=43, extra_blacklist=None):
"""
The function for retrieving whois or rwhois information for an IP
address via any port. Defaults to port 43/tcp (WHOIS).
Args:
asn_registry (:obj:`str`): The NIC to run the query against.
Defaults to 'arin'.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
server (:obj:`str`): An optional server to connect to. If
provided, asn_registry will be ignored.
port (:obj:`int`): The network port to connect on. Defaults to 43.
extra_blacklist (:obj:`list` of :obj:`str`): Blacklisted whois
servers in addition to the global BLACKLIST. Defaults to None.
Returns:
str: The raw whois data.
Raises:
BlacklistError: Raised if the whois server provided is in the
global BLACKLIST or extra_blacklist.
WhoisLookupError: The whois lookup failed.
WhoisRateLimitError: The Whois request rate limited and retries
were exhausted.
"""
try:
extra_bl = extra_blacklist if extra_blacklist else []
if any(server in srv for srv in (BLACKLIST, extra_bl)):
raise BlacklistError(
'The server {0} is blacklisted.'.format(server)
)
if server is None:
server = RIR_WHOIS[asn_registry]['server']
# Create the connection for the whois query.
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(self.timeout)
log.debug('WHOIS query for {0} at {1}:{2}'.format(
self.address_str, server, port))
conn.connect((server, port))
# Prep the query.
query = self.address_str + '\r\n'
if asn_registry == 'arin':
query = 'n + {0}'.format(query)
# Query the whois server, and store the results.
conn.send(query.encode())
response = ''
while True:
d = conn.recv(4096).decode('ascii', 'ignore')
response += d
if not d:
break
conn.close()
if 'Query rate limit exceeded' in response: # pragma: no cover
if retry_count > 0:
log.debug('WHOIS query rate limit exceeded. Waiting...')
sleep(1)
return self.get_whois(
asn_registry=asn_registry, retry_count=retry_count-1,
server=server, port=port,
extra_blacklist=extra_blacklist
)
else:
raise WhoisRateLimitError(
'Whois lookup failed for {0}. Rate limit '
'exceeded, wait and try again (possibly a '
'temporary block).'.format(self.address_str))
elif ('error 501' in response or 'error 230' in response
): # pragma: no cover
log.debug('WHOIS query error: {0}'.format(response))
raise ValueError
return str(response)
except (socket.timeout, socket.error) as e:
log.debug('WHOIS query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('WHOIS query retrying (count: {0})'.format(
str(retry_count)))
return self.get_whois(
asn_registry=asn_registry, retry_count=retry_count-1,
server=server, port=port, extra_blacklist=extra_blacklist
)
else:
raise WhoisLookupError(
'WHOIS lookup failed for {0}.'.format(self.address_str)
)
except WhoisRateLimitError: # pragma: no cover
raise
except BlacklistError:
raise
except: # pragma: no cover
raise WhoisLookupError(
'WHOIS lookup failed for {0}.'.format(self.address_str)
) |
def user_type_registered(self, keyspace, user_type, klass):
"""
Called by the parent Cluster instance when the user registers a new
mapping from a user-defined type to a class. Intended for internal
use only.
"""
try:
ks_meta = self.cluster.metadata.keyspaces[keyspace]
except KeyError:
raise UserTypeDoesNotExist(
'Keyspace %s does not exist or has not been discovered by the driver' % (keyspace,))
try:
type_meta = ks_meta.user_types[user_type]
except KeyError:
raise UserTypeDoesNotExist(
'User type %s does not exist in keyspace %s' % (user_type, keyspace))
field_names = type_meta.field_names
if six.PY2:
# go from unicode to string to avoid decode errors from implicit
# decode when formatting non-ascii values
field_names = [fn.encode('utf-8') for fn in field_names]
def encode(val):
return '{ %s }' % ' , '.join('%s : %s' % (
field_name,
self.encoder.cql_encode_all_types(getattr(val, field_name, None))
) for field_name in field_names)
self.encoder.mapping[klass] = encode | Called by the parent Cluster instance when the user registers a new
mapping from a user-defined type to a class. Intended for internal
use only. | Below is the the instruction that describes the task:
### Input:
Called by the parent Cluster instance when the user registers a new
mapping from a user-defined type to a class. Intended for internal
use only.
### Response:
def user_type_registered(self, keyspace, user_type, klass):
"""
Called by the parent Cluster instance when the user registers a new
mapping from a user-defined type to a class. Intended for internal
use only.
"""
try:
ks_meta = self.cluster.metadata.keyspaces[keyspace]
except KeyError:
raise UserTypeDoesNotExist(
'Keyspace %s does not exist or has not been discovered by the driver' % (keyspace,))
try:
type_meta = ks_meta.user_types[user_type]
except KeyError:
raise UserTypeDoesNotExist(
'User type %s does not exist in keyspace %s' % (user_type, keyspace))
field_names = type_meta.field_names
if six.PY2:
# go from unicode to string to avoid decode errors from implicit
# decode when formatting non-ascii values
field_names = [fn.encode('utf-8') for fn in field_names]
def encode(val):
return '{ %s }' % ' , '.join('%s : %s' % (
field_name,
self.encoder.cql_encode_all_types(getattr(val, field_name, None))
) for field_name in field_names)
self.encoder.mapping[klass] = encode |
def _inserts(self):
"""thwe"""
return {concat(a, c, b)
for a, b in self.slices
for c in ALPHABET} | thwe | Below is the the instruction that describes the task:
### Input:
thwe
### Response:
def _inserts(self):
"""thwe"""
return {concat(a, c, b)
for a, b in self.slices
for c in ALPHABET} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.