code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _learn(connections, rng, learningSegments, activeInput,
potentialOverlaps, initialPermanence, sampleSize,
permanenceIncrement, permanenceDecrement, maxSynapsesPerSegment):
"""
Adjust synapse permanences, grow new synapses, and grow new segments.
@param learningActiveSegments (numpy array)
@param learningMatchingSegments (numpy array)
@param segmentsToPunish (numpy array)
@param activeInput (numpy array)
@param potentialOverlaps (numpy array)
"""
# Learn on existing segments
connections.adjustSynapses(learningSegments, activeInput,
permanenceIncrement, -permanenceDecrement)
# Grow new synapses. Calculate "maxNew", the maximum number of synapses to
# grow per segment. "maxNew" might be a number or it might be a list of
# numbers.
if sampleSize == -1:
maxNew = len(activeInput)
else:
maxNew = sampleSize - potentialOverlaps[learningSegments]
if maxSynapsesPerSegment != -1:
synapseCounts = connections.mapSegmentsToSynapseCounts(
learningSegments)
numSynapsesToReachMax = maxSynapsesPerSegment - synapseCounts
maxNew = np.where(maxNew <= numSynapsesToReachMax,
maxNew, numSynapsesToReachMax)
connections.growSynapsesToSample(learningSegments, activeInput,
maxNew, initialPermanence, rng) | Adjust synapse permanences, grow new synapses, and grow new segments.
@param learningActiveSegments (numpy array)
@param learningMatchingSegments (numpy array)
@param segmentsToPunish (numpy array)
@param activeInput (numpy array)
@param potentialOverlaps (numpy array) | Below is the the instruction that describes the task:
### Input:
Adjust synapse permanences, grow new synapses, and grow new segments.
@param learningActiveSegments (numpy array)
@param learningMatchingSegments (numpy array)
@param segmentsToPunish (numpy array)
@param activeInput (numpy array)
@param potentialOverlaps (numpy array)
### Response:
def _learn(connections, rng, learningSegments, activeInput,
potentialOverlaps, initialPermanence, sampleSize,
permanenceIncrement, permanenceDecrement, maxSynapsesPerSegment):
"""
Adjust synapse permanences, grow new synapses, and grow new segments.
@param learningActiveSegments (numpy array)
@param learningMatchingSegments (numpy array)
@param segmentsToPunish (numpy array)
@param activeInput (numpy array)
@param potentialOverlaps (numpy array)
"""
# Learn on existing segments
connections.adjustSynapses(learningSegments, activeInput,
permanenceIncrement, -permanenceDecrement)
# Grow new synapses. Calculate "maxNew", the maximum number of synapses to
# grow per segment. "maxNew" might be a number or it might be a list of
# numbers.
if sampleSize == -1:
maxNew = len(activeInput)
else:
maxNew = sampleSize - potentialOverlaps[learningSegments]
if maxSynapsesPerSegment != -1:
synapseCounts = connections.mapSegmentsToSynapseCounts(
learningSegments)
numSynapsesToReachMax = maxSynapsesPerSegment - synapseCounts
maxNew = np.where(maxNew <= numSynapsesToReachMax,
maxNew, numSynapsesToReachMax)
connections.growSynapsesToSample(learningSegments, activeInput,
maxNew, initialPermanence, rng) |
def fractional_base(fractional_part, input_base=10, output_base=10,
max_depth=100):
"""
Convert the fractional part of a number from any base to any base.
Args:
fractional_part(iterable container): The fractional part of a number in
the following form: ( ".", int, int, int, ...)
input_base(int): The base to convert from (defualt 10).
output_base(int): The base to convert to (default 10).
max_depth(int): The maximum number of decimal digits to output.
Returns:
The converted number as a tuple of digits.
Example:
>>> fractional_base((".", 6,),10,16,10)
('.', 9, 9, 9, 9, 9, 9, 9, 9, 9, 9)
"""
fractional_part = fractional_part[1:]
fractional_digits = len(fractional_part)
numerator = 0
for i, value in enumerate(fractional_part, 1):
numerator += value * input_base ** (fractional_digits - i)
denominator = input_base ** fractional_digits
i = 1
digits = []
while(i < max_depth + 1):
numerator *= output_base ** i
digit = numerator // denominator
numerator -= digit * denominator
denominator *= output_base ** i
digits.append(digit)
i += 1
greatest_common_divisor = gcd(numerator, denominator)
numerator //= greatest_common_divisor
denominator //= greatest_common_divisor
return (".",) + tuple(digits) | Convert the fractional part of a number from any base to any base.
Args:
fractional_part(iterable container): The fractional part of a number in
the following form: ( ".", int, int, int, ...)
input_base(int): The base to convert from (defualt 10).
output_base(int): The base to convert to (default 10).
max_depth(int): The maximum number of decimal digits to output.
Returns:
The converted number as a tuple of digits.
Example:
>>> fractional_base((".", 6,),10,16,10)
('.', 9, 9, 9, 9, 9, 9, 9, 9, 9, 9) | Below is the the instruction that describes the task:
### Input:
Convert the fractional part of a number from any base to any base.
Args:
fractional_part(iterable container): The fractional part of a number in
the following form: ( ".", int, int, int, ...)
input_base(int): The base to convert from (defualt 10).
output_base(int): The base to convert to (default 10).
max_depth(int): The maximum number of decimal digits to output.
Returns:
The converted number as a tuple of digits.
Example:
>>> fractional_base((".", 6,),10,16,10)
('.', 9, 9, 9, 9, 9, 9, 9, 9, 9, 9)
### Response:
def fractional_base(fractional_part, input_base=10, output_base=10,
max_depth=100):
"""
Convert the fractional part of a number from any base to any base.
Args:
fractional_part(iterable container): The fractional part of a number in
the following form: ( ".", int, int, int, ...)
input_base(int): The base to convert from (defualt 10).
output_base(int): The base to convert to (default 10).
max_depth(int): The maximum number of decimal digits to output.
Returns:
The converted number as a tuple of digits.
Example:
>>> fractional_base((".", 6,),10,16,10)
('.', 9, 9, 9, 9, 9, 9, 9, 9, 9, 9)
"""
fractional_part = fractional_part[1:]
fractional_digits = len(fractional_part)
numerator = 0
for i, value in enumerate(fractional_part, 1):
numerator += value * input_base ** (fractional_digits - i)
denominator = input_base ** fractional_digits
i = 1
digits = []
while(i < max_depth + 1):
numerator *= output_base ** i
digit = numerator // denominator
numerator -= digit * denominator
denominator *= output_base ** i
digits.append(digit)
i += 1
greatest_common_divisor = gcd(numerator, denominator)
numerator //= greatest_common_divisor
denominator //= greatest_common_divisor
return (".",) + tuple(digits) |
def fix_list_arguments(self):
"""Find arguments that should accumulate values and fix them."""
either = [list(c.children) for c in self.either.children]
for case in either:
case = [c for c in case if case.count(c) > 1]
for a in [e for e in case if type(e) == Argument]:
a.value = []
return self | Find arguments that should accumulate values and fix them. | Below is the the instruction that describes the task:
### Input:
Find arguments that should accumulate values and fix them.
### Response:
def fix_list_arguments(self):
"""Find arguments that should accumulate values and fix them."""
either = [list(c.children) for c in self.either.children]
for case in either:
case = [c for c in case if case.count(c) > 1]
for a in [e for e in case if type(e) == Argument]:
a.value = []
return self |
async def handle_json_response(responses):
"""
get the json data response
:param responses: the json response
:return the json data without 'root' node
"""
json_data = {}
if responses.status != 200:
err_msg = HttpProcessingError(code=responses.status,
message=await responses.json())
logging.error("Wallabag: aiohttp error {err_msg}".format(
err_msg=err_msg))
else:
try:
json_data = responses.json()
except ClientResponseError as e:
# sometimes json_data does not return any json() without
# any error. This is due to the grabbing URL which "rejects"
# the URL
logging.error("Wallabag: aiohttp error {code} {message}"
.format(code=e.code, message=e.message))
return await json_data | get the json data response
:param responses: the json response
:return the json data without 'root' node | Below is the the instruction that describes the task:
### Input:
get the json data response
:param responses: the json response
:return the json data without 'root' node
### Response:
async def handle_json_response(responses):
"""
get the json data response
:param responses: the json response
:return the json data without 'root' node
"""
json_data = {}
if responses.status != 200:
err_msg = HttpProcessingError(code=responses.status,
message=await responses.json())
logging.error("Wallabag: aiohttp error {err_msg}".format(
err_msg=err_msg))
else:
try:
json_data = responses.json()
except ClientResponseError as e:
# sometimes json_data does not return any json() without
# any error. This is due to the grabbing URL which "rejects"
# the URL
logging.error("Wallabag: aiohttp error {code} {message}"
.format(code=e.code, message=e.message))
return await json_data |
def tabulate(data, # type: List[List[Any]]
header=None, # type: Optional[List[Any]]
col_align=None, # type: Union[str, List[str]]
):
# type: (...) -> List[str]
""" Format data as a table without any fancy features.
col_align: l/r/c or a list/string of l/r/c. l = left, r = right, c = center
Return a list of strings (lines of the table).
"""
if not data and not header:
return []
if data:
n_cols = len(data[0])
else:
assert header is not None
n_cols = len(header)
if not all(len(row) == n_cols for row in data):
raise ValueError('data is not rectangular')
if col_align is None:
col_align = ['l'] * n_cols
elif isinstance(col_align, six.string_types) and len(col_align) == 1:
col_align = [col_align] * n_cols
else:
col_align = list(col_align)
if len(col_align) != n_cols:
raise ValueError('col_align length does not match number of columns')
if header and len(header) != n_cols:
raise ValueError('header length does not match number of columns')
if header:
data = [header] + data
data = [[six.text_type(x) for x in row] for row in data]
col_width = [max(len(row[col_i]) for row in data) for col_i in range(n_cols)]
if header:
data.insert(1, ['-' * width for width in col_width])
line_tpl = u' '.join(
u'{:%s%s}' % ({'l': '', 'r': '>', 'c': '^'}[align], width)
for align, width in zip(col_align, col_width))
return [line_tpl.format(*row) for row in data] | Format data as a table without any fancy features.
col_align: l/r/c or a list/string of l/r/c. l = left, r = right, c = center
Return a list of strings (lines of the table). | Below is the the instruction that describes the task:
### Input:
Format data as a table without any fancy features.
col_align: l/r/c or a list/string of l/r/c. l = left, r = right, c = center
Return a list of strings (lines of the table).
### Response:
def tabulate(data, # type: List[List[Any]]
header=None, # type: Optional[List[Any]]
col_align=None, # type: Union[str, List[str]]
):
# type: (...) -> List[str]
""" Format data as a table without any fancy features.
col_align: l/r/c or a list/string of l/r/c. l = left, r = right, c = center
Return a list of strings (lines of the table).
"""
if not data and not header:
return []
if data:
n_cols = len(data[0])
else:
assert header is not None
n_cols = len(header)
if not all(len(row) == n_cols for row in data):
raise ValueError('data is not rectangular')
if col_align is None:
col_align = ['l'] * n_cols
elif isinstance(col_align, six.string_types) and len(col_align) == 1:
col_align = [col_align] * n_cols
else:
col_align = list(col_align)
if len(col_align) != n_cols:
raise ValueError('col_align length does not match number of columns')
if header and len(header) != n_cols:
raise ValueError('header length does not match number of columns')
if header:
data = [header] + data
data = [[six.text_type(x) for x in row] for row in data]
col_width = [max(len(row[col_i]) for row in data) for col_i in range(n_cols)]
if header:
data.insert(1, ['-' * width for width in col_width])
line_tpl = u' '.join(
u'{:%s%s}' % ({'l': '', 'r': '>', 'c': '^'}[align], width)
for align, width in zip(col_align, col_width))
return [line_tpl.format(*row) for row in data] |
def OnLabelSizeIntCtrl(self, event):
"""Label size IntCtrl event handler"""
self.attrs["labelsize"] = event.GetValue()
post_command_event(self, self.DrawChartMsg) | Label size IntCtrl event handler | Below is the the instruction that describes the task:
### Input:
Label size IntCtrl event handler
### Response:
def OnLabelSizeIntCtrl(self, event):
"""Label size IntCtrl event handler"""
self.attrs["labelsize"] = event.GetValue()
post_command_event(self, self.DrawChartMsg) |
def lsattr(path):
'''
.. versionadded:: 2018.3.0
.. versionchanged:: 2018.3.1
If ``lsattr`` is not installed on the system, ``None`` is returned.
.. versionchanged:: 2018.3.4
If on ``AIX``, ``None`` is returned even if in filesystem as lsattr on ``AIX``
is not the same thing as the linux version.
Obtain the modifiable attributes of the given file. If path
is to a directory, an empty list is returned.
path
path to file to obtain attributes of. File/directory must exist.
CLI Example:
.. code-block:: bash
salt '*' file.lsattr foo1.txt
'''
if not salt.utils.path.which('lsattr') or salt.utils.platform.is_aix():
return None
if not os.path.exists(path):
raise SaltInvocationError("File or directory does not exist: " + path)
cmd = ['lsattr', path]
result = __salt__['cmd.run'](cmd, ignore_retcode=True, python_shell=False)
results = {}
for line in result.splitlines():
if not line.startswith('lsattr: '):
vals = line.split(None, 1)
results[vals[1]] = re.findall(r"[aAcCdDeijPsStTu]", vals[0])
return results | .. versionadded:: 2018.3.0
.. versionchanged:: 2018.3.1
If ``lsattr`` is not installed on the system, ``None`` is returned.
.. versionchanged:: 2018.3.4
If on ``AIX``, ``None`` is returned even if in filesystem as lsattr on ``AIX``
is not the same thing as the linux version.
Obtain the modifiable attributes of the given file. If path
is to a directory, an empty list is returned.
path
path to file to obtain attributes of. File/directory must exist.
CLI Example:
.. code-block:: bash
salt '*' file.lsattr foo1.txt | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2018.3.0
.. versionchanged:: 2018.3.1
If ``lsattr`` is not installed on the system, ``None`` is returned.
.. versionchanged:: 2018.3.4
If on ``AIX``, ``None`` is returned even if in filesystem as lsattr on ``AIX``
is not the same thing as the linux version.
Obtain the modifiable attributes of the given file. If path
is to a directory, an empty list is returned.
path
path to file to obtain attributes of. File/directory must exist.
CLI Example:
.. code-block:: bash
salt '*' file.lsattr foo1.txt
### Response:
def lsattr(path):
'''
.. versionadded:: 2018.3.0
.. versionchanged:: 2018.3.1
If ``lsattr`` is not installed on the system, ``None`` is returned.
.. versionchanged:: 2018.3.4
If on ``AIX``, ``None`` is returned even if in filesystem as lsattr on ``AIX``
is not the same thing as the linux version.
Obtain the modifiable attributes of the given file. If path
is to a directory, an empty list is returned.
path
path to file to obtain attributes of. File/directory must exist.
CLI Example:
.. code-block:: bash
salt '*' file.lsattr foo1.txt
'''
if not salt.utils.path.which('lsattr') or salt.utils.platform.is_aix():
return None
if not os.path.exists(path):
raise SaltInvocationError("File or directory does not exist: " + path)
cmd = ['lsattr', path]
result = __salt__['cmd.run'](cmd, ignore_retcode=True, python_shell=False)
results = {}
for line in result.splitlines():
if not line.startswith('lsattr: '):
vals = line.split(None, 1)
results[vals[1]] = re.findall(r"[aAcCdDeijPsStTu]", vals[0])
return results |
def calculate_item_depth(self, tree_alias, item_id, depth=0):
"""Calculates depth of the item in the tree.
:param str|unicode tree_alias:
:param int item_id:
:param int depth:
:rtype: int
"""
item = self.get_item_by_id(tree_alias, item_id)
if hasattr(item, 'depth'):
depth = item.depth + depth
else:
if item.parent is not None:
depth = self.calculate_item_depth(tree_alias, item.parent.id, depth + 1)
return depth | Calculates depth of the item in the tree.
:param str|unicode tree_alias:
:param int item_id:
:param int depth:
:rtype: int | Below is the the instruction that describes the task:
### Input:
Calculates depth of the item in the tree.
:param str|unicode tree_alias:
:param int item_id:
:param int depth:
:rtype: int
### Response:
def calculate_item_depth(self, tree_alias, item_id, depth=0):
"""Calculates depth of the item in the tree.
:param str|unicode tree_alias:
:param int item_id:
:param int depth:
:rtype: int
"""
item = self.get_item_by_id(tree_alias, item_id)
if hasattr(item, 'depth'):
depth = item.depth + depth
else:
if item.parent is not None:
depth = self.calculate_item_depth(tree_alias, item.parent.id, depth + 1)
return depth |
def call(self, url, method=None, args=None):
"""Calls the first function matching the urls pattern and method.
Args:
url (str): Url for which to call a matching function.
method (str, optional): The method used while registering a
function.
Defaults to None
args (dict, optional): Additional args to be passed to the
matching function.
Returns:
The functions return value or `None` if no function was called.
"""
if not args:
args = {}
if sys.version_info.major == 3:
data = urllib.parse.urlparse(url)
path = data.path.rstrip('/') + '/'
_args = dict(urllib.parse.parse_qs(data.query,
keep_blank_values=True))
elif sys.version_info.major == 2:
data = urlparse.urlparse(url)
path = data.path.rstrip('/') + '/'
_args = dict(urlparse.parse_qs(data.query,
keep_blank_values=True))
for elem in self._data_store:
pattern = elem['pattern']
function = elem['function']
_method = elem['method']
type_cast = elem['type_cast']
result = re.match(pattern, path)
# Found matching method
if result and _method == method:
_args = dict(_args, **result.groupdict())
# Unpack value lists (due to urllib.parse.parse_qs) in case
# theres only one value available
for key, val in _args.items():
if isinstance(_args[key], list) and len(_args[key]) == 1:
_args[key] = _args[key][0]
# Apply typ-casting if necessary
for key, val in type_cast.items():
# Not within available _args, no type-cast required
if key not in _args:
continue
# Is None or empty, no type-cast required
if not _args[key]:
continue
# Try and cast the values
if isinstance(_args[key], list):
for i, _val in enumerate(_args[key]):
_args[key][i] = self._cast(_val, val)
else:
_args[key] = self._cast(_args[key], val)
requiered_args = self._get_function_args(function)
for key, val in args.items():
if key in requiered_args:
_args[key] = val
return function(**_args)
return None | Calls the first function matching the urls pattern and method.
Args:
url (str): Url for which to call a matching function.
method (str, optional): The method used while registering a
function.
Defaults to None
args (dict, optional): Additional args to be passed to the
matching function.
Returns:
The functions return value or `None` if no function was called. | Below is the the instruction that describes the task:
### Input:
Calls the first function matching the urls pattern and method.
Args:
url (str): Url for which to call a matching function.
method (str, optional): The method used while registering a
function.
Defaults to None
args (dict, optional): Additional args to be passed to the
matching function.
Returns:
The functions return value or `None` if no function was called.
### Response:
def call(self, url, method=None, args=None):
"""Calls the first function matching the urls pattern and method.
Args:
url (str): Url for which to call a matching function.
method (str, optional): The method used while registering a
function.
Defaults to None
args (dict, optional): Additional args to be passed to the
matching function.
Returns:
The functions return value or `None` if no function was called.
"""
if not args:
args = {}
if sys.version_info.major == 3:
data = urllib.parse.urlparse(url)
path = data.path.rstrip('/') + '/'
_args = dict(urllib.parse.parse_qs(data.query,
keep_blank_values=True))
elif sys.version_info.major == 2:
data = urlparse.urlparse(url)
path = data.path.rstrip('/') + '/'
_args = dict(urlparse.parse_qs(data.query,
keep_blank_values=True))
for elem in self._data_store:
pattern = elem['pattern']
function = elem['function']
_method = elem['method']
type_cast = elem['type_cast']
result = re.match(pattern, path)
# Found matching method
if result and _method == method:
_args = dict(_args, **result.groupdict())
# Unpack value lists (due to urllib.parse.parse_qs) in case
# theres only one value available
for key, val in _args.items():
if isinstance(_args[key], list) and len(_args[key]) == 1:
_args[key] = _args[key][0]
# Apply typ-casting if necessary
for key, val in type_cast.items():
# Not within available _args, no type-cast required
if key not in _args:
continue
# Is None or empty, no type-cast required
if not _args[key]:
continue
# Try and cast the values
if isinstance(_args[key], list):
for i, _val in enumerate(_args[key]):
_args[key][i] = self._cast(_val, val)
else:
_args[key] = self._cast(_args[key], val)
requiered_args = self._get_function_args(function)
for key, val in args.items():
if key in requiered_args:
_args[key] = val
return function(**_args)
return None |
def which(program, paths=None):
""" takes a program name or full path, plus an optional collection of search
paths, and returns the full path of the requested executable. if paths is
specified, it is the entire list of search paths, and the PATH env is not
used at all. otherwise, PATH env is used to look for the program """
def is_exe(fpath):
return (os.path.exists(fpath) and
os.access(fpath, os.X_OK) and
os.path.isfile(os.path.realpath(fpath)))
found_path = None
fpath, fname = os.path.split(program)
# if there's a path component, then we've specified a path to the program,
# and we should just test if that program is executable. if it is, return
if fpath:
program = os.path.abspath(os.path.expanduser(program))
if is_exe(program):
found_path = program
# otherwise, we've just passed in the program name, and we need to search
# the paths to find where it actually lives
else:
paths_to_search = []
if isinstance(paths, (tuple, list)):
paths_to_search.extend(paths)
else:
env_paths = os.environ.get("PATH", "").split(os.pathsep)
paths_to_search.extend(env_paths)
for path in paths_to_search:
exe_file = os.path.join(path, program)
if is_exe(exe_file):
found_path = exe_file
break
return found_path | takes a program name or full path, plus an optional collection of search
paths, and returns the full path of the requested executable. if paths is
specified, it is the entire list of search paths, and the PATH env is not
used at all. otherwise, PATH env is used to look for the program | Below is the the instruction that describes the task:
### Input:
takes a program name or full path, plus an optional collection of search
paths, and returns the full path of the requested executable. if paths is
specified, it is the entire list of search paths, and the PATH env is not
used at all. otherwise, PATH env is used to look for the program
### Response:
def which(program, paths=None):
""" takes a program name or full path, plus an optional collection of search
paths, and returns the full path of the requested executable. if paths is
specified, it is the entire list of search paths, and the PATH env is not
used at all. otherwise, PATH env is used to look for the program """
def is_exe(fpath):
return (os.path.exists(fpath) and
os.access(fpath, os.X_OK) and
os.path.isfile(os.path.realpath(fpath)))
found_path = None
fpath, fname = os.path.split(program)
# if there's a path component, then we've specified a path to the program,
# and we should just test if that program is executable. if it is, return
if fpath:
program = os.path.abspath(os.path.expanduser(program))
if is_exe(program):
found_path = program
# otherwise, we've just passed in the program name, and we need to search
# the paths to find where it actually lives
else:
paths_to_search = []
if isinstance(paths, (tuple, list)):
paths_to_search.extend(paths)
else:
env_paths = os.environ.get("PATH", "").split(os.pathsep)
paths_to_search.extend(env_paths)
for path in paths_to_search:
exe_file = os.path.join(path, program)
if is_exe(exe_file):
found_path = exe_file
break
return found_path |
def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print("Method name as second argument without 'method=' shouldn't be allowed.")
... except TypeError:
... pass
"""
# Check inputs to determine whether a single value or a matrix is needed for output.
# Since it's legal for users to use the method name as the second argument, we need to
# check if y is used to specify the method name instead.
if type(y) == str:
raise TypeError("Use 'method=' to specify method name.")
if not y:
return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray()
else:
return callMLlibFunc("corr", x.map(float), y.map(float), method) | Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print("Method name as second argument without 'method=' shouldn't be allowed.")
... except TypeError:
... pass | Below is the the instruction that describes the task:
### Input:
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print("Method name as second argument without 'method=' shouldn't be allowed.")
... except TypeError:
... pass
### Response:
def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print("Method name as second argument without 'method=' shouldn't be allowed.")
... except TypeError:
... pass
"""
# Check inputs to determine whether a single value or a matrix is needed for output.
# Since it's legal for users to use the method name as the second argument, we need to
# check if y is used to specify the method name instead.
if type(y) == str:
raise TypeError("Use 'method=' to specify method name.")
if not y:
return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray()
else:
return callMLlibFunc("corr", x.map(float), y.map(float), method) |
def open(self, session, resource_name,
access_mode=constants.AccessModes.no_lock, open_timeout=constants.VI_TMO_IMMEDIATE):
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
:param session: Resource Manager session (should always be a session returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:param access_mode: Specifies the mode by which the resource is to be accessed.
:type access_mode: :class:`pyvisa.constants.AccessModes`
:param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits
before returning an error.
:return: Unique logical identifier reference to a session, return value of the library call.
:rtype: session, :class:`pyvisa.constants.StatusCode`
"""
raise NotImplementedError | Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
:param session: Resource Manager session (should always be a session returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:param access_mode: Specifies the mode by which the resource is to be accessed.
:type access_mode: :class:`pyvisa.constants.AccessModes`
:param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits
before returning an error.
:return: Unique logical identifier reference to a session, return value of the library call.
:rtype: session, :class:`pyvisa.constants.StatusCode` | Below is the the instruction that describes the task:
### Input:
Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
:param session: Resource Manager session (should always be a session returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:param access_mode: Specifies the mode by which the resource is to be accessed.
:type access_mode: :class:`pyvisa.constants.AccessModes`
:param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits
before returning an error.
:return: Unique logical identifier reference to a session, return value of the library call.
:rtype: session, :class:`pyvisa.constants.StatusCode`
### Response:
def open(self, session, resource_name,
access_mode=constants.AccessModes.no_lock, open_timeout=constants.VI_TMO_IMMEDIATE):
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
:param session: Resource Manager session (should always be a session returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:param access_mode: Specifies the mode by which the resource is to be accessed.
:type access_mode: :class:`pyvisa.constants.AccessModes`
:param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits
before returning an error.
:return: Unique logical identifier reference to a session, return value of the library call.
:rtype: session, :class:`pyvisa.constants.StatusCode`
"""
raise NotImplementedError |
def update(self, value):
"""Add a value to the sample."""
super(UniformSample, self).update(value)
self.count += 1
c = self.count
if c < len(self.sample):
self.sample[c-1] = value
else:
r = random.randint(0, c)
if r < len(self.sample):
self.sample[r] = value | Add a value to the sample. | Below is the the instruction that describes the task:
### Input:
Add a value to the sample.
### Response:
def update(self, value):
"""Add a value to the sample."""
super(UniformSample, self).update(value)
self.count += 1
c = self.count
if c < len(self.sample):
self.sample[c-1] = value
else:
r = random.randint(0, c)
if r < len(self.sample):
self.sample[r] = value |
def validate_allowed_values(allowed_values, value):
"""Support a variable defining which values it allows.
Args:
allowed_values (Optional[list]): A list of allowed values from the
variable definition
value (obj): The object representing the value provided for the
variable
Returns:
bool: Boolean for whether or not the value is valid.
"""
# ignore CFNParameter, troposphere handles these for us
if not allowed_values or isinstance(value, CFNParameter):
return True
return value in allowed_values | Support a variable defining which values it allows.
Args:
allowed_values (Optional[list]): A list of allowed values from the
variable definition
value (obj): The object representing the value provided for the
variable
Returns:
bool: Boolean for whether or not the value is valid. | Below is the the instruction that describes the task:
### Input:
Support a variable defining which values it allows.
Args:
allowed_values (Optional[list]): A list of allowed values from the
variable definition
value (obj): The object representing the value provided for the
variable
Returns:
bool: Boolean for whether or not the value is valid.
### Response:
def validate_allowed_values(allowed_values, value):
"""Support a variable defining which values it allows.
Args:
allowed_values (Optional[list]): A list of allowed values from the
variable definition
value (obj): The object representing the value provided for the
variable
Returns:
bool: Boolean for whether or not the value is valid.
"""
# ignore CFNParameter, troposphere handles these for us
if not allowed_values or isinstance(value, CFNParameter):
return True
return value in allowed_values |
def send(self, message):
"""Send the supplied *message* (xml string) to NETCONF server."""
if not self.connected:
raise TransportError('Not connected to NETCONF server')
self.logger.debug('queueing %s', message)
self._q.put(message) | Send the supplied *message* (xml string) to NETCONF server. | Below is the the instruction that describes the task:
### Input:
Send the supplied *message* (xml string) to NETCONF server.
### Response:
def send(self, message):
"""Send the supplied *message* (xml string) to NETCONF server."""
if not self.connected:
raise TransportError('Not connected to NETCONF server')
self.logger.debug('queueing %s', message)
self._q.put(message) |
def add_standard_attention_hparams(hparams):
"""Adds the hparams used by get_standardized_layers."""
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
# hparams used and which should have been defined outside (in
# common_hparams):
# Global flags
# hparams.mode
# hparams.hidden_size
# Pre-post processing flags
# hparams.layer_preprocess_sequence
# hparams.layer_postprocess_sequence
# hparams.layer_prepostprocess_dropout
# hparams.norm_type
# hparams.norm_epsilon
# Mixture-of-Expert flags
# hparams.moe_hidden_sizes
# hparams.moe_num_experts
# hparams.moe_k
# hparams.moe_loss_coef
# Attention layers flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("attention_dropout", 0.0)
# Attention: Local
hparams.add_hparam("attention_loc_block_length", 256)
# Attention: Local (unmasked only): How much to look left.
hparams.add_hparam("attention_loc_block_width", 128)
# Attention: Memory-compressed
hparams.add_hparam("attention_red_factor", 3)
hparams.add_hparam("attention_red_type", "conv")
hparams.add_hparam("attention_red_nonlinearity", "none")
# Fully connected layers flags
# To be more consistent, should use filter_size to also control the MOE
# size if moe_hidden_sizes not set.
hparams.add_hparam("filter_size", 2048)
hparams.add_hparam("relu_dropout", 0.0)
return hparams | Adds the hparams used by get_standardized_layers. | Below is the the instruction that describes the task:
### Input:
Adds the hparams used by get_standardized_layers.
### Response:
def add_standard_attention_hparams(hparams):
"""Adds the hparams used by get_standardized_layers."""
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
# hparams used and which should have been defined outside (in
# common_hparams):
# Global flags
# hparams.mode
# hparams.hidden_size
# Pre-post processing flags
# hparams.layer_preprocess_sequence
# hparams.layer_postprocess_sequence
# hparams.layer_prepostprocess_dropout
# hparams.norm_type
# hparams.norm_epsilon
# Mixture-of-Expert flags
# hparams.moe_hidden_sizes
# hparams.moe_num_experts
# hparams.moe_k
# hparams.moe_loss_coef
# Attention layers flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("attention_dropout", 0.0)
# Attention: Local
hparams.add_hparam("attention_loc_block_length", 256)
# Attention: Local (unmasked only): How much to look left.
hparams.add_hparam("attention_loc_block_width", 128)
# Attention: Memory-compressed
hparams.add_hparam("attention_red_factor", 3)
hparams.add_hparam("attention_red_type", "conv")
hparams.add_hparam("attention_red_nonlinearity", "none")
# Fully connected layers flags
# To be more consistent, should use filter_size to also control the MOE
# size if moe_hidden_sizes not set.
hparams.add_hparam("filter_size", 2048)
hparams.add_hparam("relu_dropout", 0.0)
return hparams |
def show_kernel_error(self, error):
"""Show kernel initialization errors in infowidget."""
# Replace end of line chars with <br>
eol = sourcecode.get_eol_chars(error)
if eol:
error = error.replace(eol, '<br>')
# Don't break lines in hyphens
# From https://stackoverflow.com/q/7691569/438386
error = error.replace('-', '‑')
# Create error page
message = _("An error ocurred while starting the kernel")
kernel_error_template = Template(KERNEL_ERROR)
self.info_page = kernel_error_template.substitute(
css_path=self.css_path,
message=message,
error=error)
# Show error
self.set_info_page()
self.shellwidget.hide()
self.infowidget.show()
# Tell the client we're in error mode
self.is_error_shown = True | Show kernel initialization errors in infowidget. | Below is the the instruction that describes the task:
### Input:
Show kernel initialization errors in infowidget.
### Response:
def show_kernel_error(self, error):
"""Show kernel initialization errors in infowidget."""
# Replace end of line chars with <br>
eol = sourcecode.get_eol_chars(error)
if eol:
error = error.replace(eol, '<br>')
# Don't break lines in hyphens
# From https://stackoverflow.com/q/7691569/438386
error = error.replace('-', '‑')
# Create error page
message = _("An error ocurred while starting the kernel")
kernel_error_template = Template(KERNEL_ERROR)
self.info_page = kernel_error_template.substitute(
css_path=self.css_path,
message=message,
error=error)
# Show error
self.set_info_page()
self.shellwidget.hide()
self.infowidget.show()
# Tell the client we're in error mode
self.is_error_shown = True |
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess | Get default session or create one with a given config | Below is the the instruction that describes the task:
### Input:
Get default session or create one with a given config
### Response:
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess |
def upload(self, path, docs, **params):
"""
A deprecated alias for post(path, docs=docs), included only for
backward compatibility.
"""
logger.warning('The upload method is deprecated; use post instead.')
return self.post(path, docs=docs) | A deprecated alias for post(path, docs=docs), included only for
backward compatibility. | Below is the the instruction that describes the task:
### Input:
A deprecated alias for post(path, docs=docs), included only for
backward compatibility.
### Response:
def upload(self, path, docs, **params):
"""
A deprecated alias for post(path, docs=docs), included only for
backward compatibility.
"""
logger.warning('The upload method is deprecated; use post instead.')
return self.post(path, docs=docs) |
def press(self):
'''
press key via name or key code. Supported key name includes:
home, back, left, right, up, down, center, menu, search, enter,
delete(or del), recent(recent apps), volume_up, volume_down,
volume_mute, camera, power.
Usage:
d.press.back() # press back key
d.press.menu() # press home key
d.press(89) # press keycode
'''
@param_to_property(
key=["home", "back", "left", "right", "up", "down", "center",
"menu", "search", "enter", "delete", "del", "recent",
"volume_up", "volume_down", "volume_mute", "camera", "power"]
)
def _press(key, meta=None):
if isinstance(key, int):
return self.server.jsonrpc.pressKeyCode(key, meta) if meta else self.server.jsonrpc.pressKeyCode(key)
else:
return self.server.jsonrpc.pressKey(str(key))
return _press | press key via name or key code. Supported key name includes:
home, back, left, right, up, down, center, menu, search, enter,
delete(or del), recent(recent apps), volume_up, volume_down,
volume_mute, camera, power.
Usage:
d.press.back() # press back key
d.press.menu() # press home key
d.press(89) # press keycode | Below is the the instruction that describes the task:
### Input:
press key via name or key code. Supported key name includes:
home, back, left, right, up, down, center, menu, search, enter,
delete(or del), recent(recent apps), volume_up, volume_down,
volume_mute, camera, power.
Usage:
d.press.back() # press back key
d.press.menu() # press home key
d.press(89) # press keycode
### Response:
def press(self):
'''
press key via name or key code. Supported key name includes:
home, back, left, right, up, down, center, menu, search, enter,
delete(or del), recent(recent apps), volume_up, volume_down,
volume_mute, camera, power.
Usage:
d.press.back() # press back key
d.press.menu() # press home key
d.press(89) # press keycode
'''
@param_to_property(
key=["home", "back", "left", "right", "up", "down", "center",
"menu", "search", "enter", "delete", "del", "recent",
"volume_up", "volume_down", "volume_mute", "camera", "power"]
)
def _press(key, meta=None):
if isinstance(key, int):
return self.server.jsonrpc.pressKeyCode(key, meta) if meta else self.server.jsonrpc.pressKeyCode(key)
else:
return self.server.jsonrpc.pressKey(str(key))
return _press |
def zero_year_special_case(from_date, to_date, start, end):
"""strptime does not resolve a 0000 year, we must handle this."""
if start == 'pos' and end == 'pos':
# always interval from earlier to later
if from_date.startswith('0000') and not to_date.startswith('0000'):
return True
# always interval from later to earlier
if not from_date.startswith('0000') and to_date.startswith('0000'):
return False
# an interval from 0000-MM-DD/0000-MM-DD ??? PARSE !!!
if from_date.startswith('0000') and to_date.startswith('0000'):
# fill from date assuming first subsequent date object if missing
# missing m+d, assume jan 1
if len(from_date) == 4:
fm, fd = 1, 1
# missing d, assume the 1st
elif len(from_date) == 7:
fm, fd = int(from_date[5:7]), 1
# not missing any date objects
elif len(from_date) == 10:
fm, fd = int(from_date[5:7]), int(from_date[8:10])
# fill to date assuming first subsequent date object if missing
# missing m+d, assume jan 1
if len(to_date) == 4:
tm, td = 1, 1
# missing d, assume the 1st
elif len(to_date) == 7:
tm, td = int(to_date[5:7]), 1
# not missing any date objects
elif len(to_date) == 10:
tm, td = int(to_date[5:7]), int(to_date[8:10])
# equality check
if from_date == to_date:
return True
# compare the dates
if fm <= tm:
if fd <= td:
return True
else:
return False
else:
return False
# these cases are always one way or the other
# "-0000" is an invalid edtf
elif start == 'neg' and end == 'neg':
return False
# False unless start is not "0000"
elif start == 'neg' and end == 'pos':
if from_date.startswith("0000"):
return False
else:
return True | strptime does not resolve a 0000 year, we must handle this. | Below is the the instruction that describes the task:
### Input:
strptime does not resolve a 0000 year, we must handle this.
### Response:
def zero_year_special_case(from_date, to_date, start, end):
"""strptime does not resolve a 0000 year, we must handle this."""
if start == 'pos' and end == 'pos':
# always interval from earlier to later
if from_date.startswith('0000') and not to_date.startswith('0000'):
return True
# always interval from later to earlier
if not from_date.startswith('0000') and to_date.startswith('0000'):
return False
# an interval from 0000-MM-DD/0000-MM-DD ??? PARSE !!!
if from_date.startswith('0000') and to_date.startswith('0000'):
# fill from date assuming first subsequent date object if missing
# missing m+d, assume jan 1
if len(from_date) == 4:
fm, fd = 1, 1
# missing d, assume the 1st
elif len(from_date) == 7:
fm, fd = int(from_date[5:7]), 1
# not missing any date objects
elif len(from_date) == 10:
fm, fd = int(from_date[5:7]), int(from_date[8:10])
# fill to date assuming first subsequent date object if missing
# missing m+d, assume jan 1
if len(to_date) == 4:
tm, td = 1, 1
# missing d, assume the 1st
elif len(to_date) == 7:
tm, td = int(to_date[5:7]), 1
# not missing any date objects
elif len(to_date) == 10:
tm, td = int(to_date[5:7]), int(to_date[8:10])
# equality check
if from_date == to_date:
return True
# compare the dates
if fm <= tm:
if fd <= td:
return True
else:
return False
else:
return False
# these cases are always one way or the other
# "-0000" is an invalid edtf
elif start == 'neg' and end == 'neg':
return False
# False unless start is not "0000"
elif start == 'neg' and end == 'pos':
if from_date.startswith("0000"):
return False
else:
return True |
def allocate_hosting_port(self, context, router_id, port_db, network_type,
hosting_device_id):
"""Get the VLAN and port for this hosting device
The VLAN used between the APIC and the external router is stored
by the APIC driver. This calls into the APIC driver to first get
the ACI VRF information associated with this port, then uses that
to look up the VLAN to use for this port to the external router
(kept as part of the L3 Out policy in ACI).
"""
# If this is a router interface, the VLAN comes from APIC.
# If it's the gateway, the VLAN comes from the segment ID
if port_db.get('device_owner') == DEVICE_OWNER_ROUTER_GW:
ext_dict, net = self._get_external_network_dict(context, port_db)
# If an OpFlex network is used on the external network,
# the actual segment ID comes from the config file
if net and net.get('provider:network_type') == 'opflex':
if ext_dict.get('segmentation_id'):
return {'allocated_port_id': port_db.id,
'allocated_vlan': ext_dict['segmentation_id']}
else:
raise AciDriverConfigMissingSegmentationId(ext_net=net)
return super(AciVLANTrunkingPlugDriver,
self).allocate_hosting_port(
context, router_id,
port_db, network_type, hosting_device_id)
# shouldn't happen, but just in case
if port_db.get('device_owner') != DEVICE_OWNER_ROUTER_INTF:
return
# get the external network that this port connects to.
# if there isn't an external gateway yet on the router,
# then don't allocate a port
router = self.l3_plugin.get_router(context, router_id)
gw_info = router[EXTERNAL_GW_INFO]
if not gw_info:
return
network_id = gw_info.get('network_id')
networks = self._core_plugin.get_networks(
context.elevated(), {'id': [network_id]})
l3out_network = networks[0]
l3out_name = self.get_ext_net_name(l3out_network['name'])
# For VLAN apic driver provides VLAN tag
details = self.get_vrf_context(context, router_id, port_db)
if details is None:
LOG.debug('aci_vlan_trunking_driver: No vrf_details')
return
vrf_name = details.get('vrf_name')
vrf_tenant = details.get('vrf_tenant')
allocated_vlan = self.apic_driver.l3out_vlan_alloc.get_vlan_allocated(
l3out_name, vrf_name, vrf_tenant=vrf_tenant)
if allocated_vlan is None:
if not vrf_tenant:
# TODO(tbachman): I can't remember why this is here
return super(AciVLANTrunkingPlugDriver,
self).allocate_hosting_port(
context, router_id,
port_db, network_type, hosting_device_id
)
# Database must have been messed up if this happens ...
return
return {'allocated_port_id': port_db.id,
'allocated_vlan': allocated_vlan} | Get the VLAN and port for this hosting device
The VLAN used between the APIC and the external router is stored
by the APIC driver. This calls into the APIC driver to first get
the ACI VRF information associated with this port, then uses that
to look up the VLAN to use for this port to the external router
(kept as part of the L3 Out policy in ACI). | Below is the the instruction that describes the task:
### Input:
Get the VLAN and port for this hosting device
The VLAN used between the APIC and the external router is stored
by the APIC driver. This calls into the APIC driver to first get
the ACI VRF information associated with this port, then uses that
to look up the VLAN to use for this port to the external router
(kept as part of the L3 Out policy in ACI).
### Response:
def allocate_hosting_port(self, context, router_id, port_db, network_type,
hosting_device_id):
"""Get the VLAN and port for this hosting device
The VLAN used between the APIC and the external router is stored
by the APIC driver. This calls into the APIC driver to first get
the ACI VRF information associated with this port, then uses that
to look up the VLAN to use for this port to the external router
(kept as part of the L3 Out policy in ACI).
"""
# If this is a router interface, the VLAN comes from APIC.
# If it's the gateway, the VLAN comes from the segment ID
if port_db.get('device_owner') == DEVICE_OWNER_ROUTER_GW:
ext_dict, net = self._get_external_network_dict(context, port_db)
# If an OpFlex network is used on the external network,
# the actual segment ID comes from the config file
if net and net.get('provider:network_type') == 'opflex':
if ext_dict.get('segmentation_id'):
return {'allocated_port_id': port_db.id,
'allocated_vlan': ext_dict['segmentation_id']}
else:
raise AciDriverConfigMissingSegmentationId(ext_net=net)
return super(AciVLANTrunkingPlugDriver,
self).allocate_hosting_port(
context, router_id,
port_db, network_type, hosting_device_id)
# shouldn't happen, but just in case
if port_db.get('device_owner') != DEVICE_OWNER_ROUTER_INTF:
return
# get the external network that this port connects to.
# if there isn't an external gateway yet on the router,
# then don't allocate a port
router = self.l3_plugin.get_router(context, router_id)
gw_info = router[EXTERNAL_GW_INFO]
if not gw_info:
return
network_id = gw_info.get('network_id')
networks = self._core_plugin.get_networks(
context.elevated(), {'id': [network_id]})
l3out_network = networks[0]
l3out_name = self.get_ext_net_name(l3out_network['name'])
# For VLAN apic driver provides VLAN tag
details = self.get_vrf_context(context, router_id, port_db)
if details is None:
LOG.debug('aci_vlan_trunking_driver: No vrf_details')
return
vrf_name = details.get('vrf_name')
vrf_tenant = details.get('vrf_tenant')
allocated_vlan = self.apic_driver.l3out_vlan_alloc.get_vlan_allocated(
l3out_name, vrf_name, vrf_tenant=vrf_tenant)
if allocated_vlan is None:
if not vrf_tenant:
# TODO(tbachman): I can't remember why this is here
return super(AciVLANTrunkingPlugDriver,
self).allocate_hosting_port(
context, router_id,
port_db, network_type, hosting_device_id
)
# Database must have been messed up if this happens ...
return
return {'allocated_port_id': port_db.id,
'allocated_vlan': allocated_vlan} |
def set_volume_level(self, volume):
"""Set volume level."""
if self._volume_level is not None:
if volume > self._volume_level:
num = int(self._max_volume * (volume - self._volume_level))
self._volume_level = volume
self._device.vol_up(num=num)
elif volume < self._volume_level:
num = int(self._max_volume * (self._volume_level - volume))
self._volume_level = volume
self._device.vol_down(num=num) | Set volume level. | Below is the the instruction that describes the task:
### Input:
Set volume level.
### Response:
def set_volume_level(self, volume):
"""Set volume level."""
if self._volume_level is not None:
if volume > self._volume_level:
num = int(self._max_volume * (volume - self._volume_level))
self._volume_level = volume
self._device.vol_up(num=num)
elif volume < self._volume_level:
num = int(self._max_volume * (self._volume_level - volume))
self._volume_level = volume
self._device.vol_down(num=num) |
def replace_store_credit_payment_by_id(cls, store_credit_payment_id, store_credit_payment, **kwargs):
"""Replace StoreCreditPayment
Replace all attributes of StoreCreditPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_store_credit_payment_by_id(store_credit_payment_id, store_credit_payment, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to replace (required)
:param StoreCreditPayment store_credit_payment: Attributes of storeCreditPayment to replace (required)
:return: StoreCreditPayment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_store_credit_payment_by_id_with_http_info(store_credit_payment_id, store_credit_payment, **kwargs)
else:
(data) = cls._replace_store_credit_payment_by_id_with_http_info(store_credit_payment_id, store_credit_payment, **kwargs)
return data | Replace StoreCreditPayment
Replace all attributes of StoreCreditPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_store_credit_payment_by_id(store_credit_payment_id, store_credit_payment, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to replace (required)
:param StoreCreditPayment store_credit_payment: Attributes of storeCreditPayment to replace (required)
:return: StoreCreditPayment
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Replace StoreCreditPayment
Replace all attributes of StoreCreditPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_store_credit_payment_by_id(store_credit_payment_id, store_credit_payment, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to replace (required)
:param StoreCreditPayment store_credit_payment: Attributes of storeCreditPayment to replace (required)
:return: StoreCreditPayment
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_store_credit_payment_by_id(cls, store_credit_payment_id, store_credit_payment, **kwargs):
"""Replace StoreCreditPayment
Replace all attributes of StoreCreditPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_store_credit_payment_by_id(store_credit_payment_id, store_credit_payment, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to replace (required)
:param StoreCreditPayment store_credit_payment: Attributes of storeCreditPayment to replace (required)
:return: StoreCreditPayment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_store_credit_payment_by_id_with_http_info(store_credit_payment_id, store_credit_payment, **kwargs)
else:
(data) = cls._replace_store_credit_payment_by_id_with_http_info(store_credit_payment_id, store_credit_payment, **kwargs)
return data |
def runSamplesPermu(self, df, gmt=None):
"""Single Sample GSEA workflow with permutation procedure"""
assert self.min_size <= self.max_size
mkdirs(self.outdir)
self.resultsOnSamples = OrderedDict()
outdir = self.outdir
# iter throught each sample
for name, ser in df.iteritems():
self.outdir = os.path.join(outdir, str(name))
self._logger.info("Run Sample: %s " % name)
mkdirs(self.outdir)
# sort ranking values from high to low or reverse
dat2 = ser.sort_values(ascending=self.ascending)
# reset integer index, or caused unwanted problems
# df.reset_index(drop=True, inplace=True)
# compute ES, NES, pval, FDR, RES
gsea_results, hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=self.permutation_num, gmt=gmt,
weighted_score_type=self.weighted_score_type,
permutation_type='gene_set', method=None,
pheno_pos='', pheno_neg='',
classes=None, ascending=self.ascending,
processes=self._processes,
seed=self.seed, single=True, scale=self.scale)
# write file
res_zip = zip(subsets, list(gsea_results), hit_ind, rank_ES)
self._save_results(zipdata=res_zip, outdir=self.outdir, module=self.module,
gmt=gmt, rank_metric=dat2, permutation_type="gene_sets")
self.resultsOnSamples[name] = self.res2d.es
# plotting
if self._noplot: continue
self._logger.info("Plotting Sample: %s \n" % name)
self._plotting(rank_metric=dat2, results=self.results,
graph_num=self.graph_num, outdir=self.outdir,
figsize=self.figsize, format=self.format)
# save es, nes to file
self._save(outdir)
return | Single Sample GSEA workflow with permutation procedure | Below is the the instruction that describes the task:
### Input:
Single Sample GSEA workflow with permutation procedure
### Response:
def runSamplesPermu(self, df, gmt=None):
"""Single Sample GSEA workflow with permutation procedure"""
assert self.min_size <= self.max_size
mkdirs(self.outdir)
self.resultsOnSamples = OrderedDict()
outdir = self.outdir
# iter throught each sample
for name, ser in df.iteritems():
self.outdir = os.path.join(outdir, str(name))
self._logger.info("Run Sample: %s " % name)
mkdirs(self.outdir)
# sort ranking values from high to low or reverse
dat2 = ser.sort_values(ascending=self.ascending)
# reset integer index, or caused unwanted problems
# df.reset_index(drop=True, inplace=True)
# compute ES, NES, pval, FDR, RES
gsea_results, hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=self.permutation_num, gmt=gmt,
weighted_score_type=self.weighted_score_type,
permutation_type='gene_set', method=None,
pheno_pos='', pheno_neg='',
classes=None, ascending=self.ascending,
processes=self._processes,
seed=self.seed, single=True, scale=self.scale)
# write file
res_zip = zip(subsets, list(gsea_results), hit_ind, rank_ES)
self._save_results(zipdata=res_zip, outdir=self.outdir, module=self.module,
gmt=gmt, rank_metric=dat2, permutation_type="gene_sets")
self.resultsOnSamples[name] = self.res2d.es
# plotting
if self._noplot: continue
self._logger.info("Plotting Sample: %s \n" % name)
self._plotting(rank_metric=dat2, results=self.results,
graph_num=self.graph_num, outdir=self.outdir,
figsize=self.figsize, format=self.format)
# save es, nes to file
self._save(outdir)
return |
def convert(self, vroot, entry_variables):
"""Convert a given graph.
Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
"""
for converter in self.converters:
vroot = converter.convert(vroot, entry_variables)
return vroot | Convert a given graph.
Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | Below is the the instruction that describes the task:
### Input:
Convert a given graph.
Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
### Response:
def convert(self, vroot, entry_variables):
"""Convert a given graph.
Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
"""
for converter in self.converters:
vroot = converter.convert(vroot, entry_variables)
return vroot |
def elapsed(self):
"""elapsed time since initial submission"""
if self.ready():
return self.wall_time
now = submitted = datetime.now()
for msg_id in self.msg_ids:
if msg_id in self._client.metadata:
stamp = self._client.metadata[msg_id]['submitted']
if stamp and stamp < submitted:
submitted = stamp
return _total_seconds(now-submitted) | elapsed time since initial submission | Below is the the instruction that describes the task:
### Input:
elapsed time since initial submission
### Response:
def elapsed(self):
"""elapsed time since initial submission"""
if self.ready():
return self.wall_time
now = submitted = datetime.now()
for msg_id in self.msg_ids:
if msg_id in self._client.metadata:
stamp = self._client.metadata[msg_id]['submitted']
if stamp and stamp < submitted:
submitted = stamp
return _total_seconds(now-submitted) |
def im2mat(I):
"""Converts and image to matrix (one pixel per line)"""
return I.reshape((I.shape[0] * I.shape[1], I.shape[2])) | Converts and image to matrix (one pixel per line) | Below is the the instruction that describes the task:
### Input:
Converts and image to matrix (one pixel per line)
### Response:
def im2mat(I):
"""Converts and image to matrix (one pixel per line)"""
return I.reshape((I.shape[0] * I.shape[1], I.shape[2])) |
def set_observable(self,tseq,qseq):
"""Set the observable sequence data
:param tseq: target sequence (from the homopolymer)
:param qseq: query sequence ( from the homopolymer)
:type tseq: string
:type qseq: string
"""
tnt = None
qnt = None
if len(tseq) > 0: tnt = tseq[0]
if len(qseq) > 0: qnt = qseq[0]
self._observable.set(len(tseq),len(qseq),tnt,qnt) | Set the observable sequence data
:param tseq: target sequence (from the homopolymer)
:param qseq: query sequence ( from the homopolymer)
:type tseq: string
:type qseq: string | Below is the the instruction that describes the task:
### Input:
Set the observable sequence data
:param tseq: target sequence (from the homopolymer)
:param qseq: query sequence ( from the homopolymer)
:type tseq: string
:type qseq: string
### Response:
def set_observable(self,tseq,qseq):
"""Set the observable sequence data
:param tseq: target sequence (from the homopolymer)
:param qseq: query sequence ( from the homopolymer)
:type tseq: string
:type qseq: string
"""
tnt = None
qnt = None
if len(tseq) > 0: tnt = tseq[0]
if len(qseq) > 0: qnt = qseq[0]
self._observable.set(len(tseq),len(qseq),tnt,qnt) |
def _update_events(self):
"""Update our cached list of latest activity events."""
events = self._skybell.dev_cache(self, CONST.EVENT) or {}
for activity in self._activities:
event = activity.get(CONST.EVENT)
created_at = activity.get(CONST.CREATED_AT)
old_event = events.get(event)
if old_event and created_at < old_event.get(CONST.CREATED_AT):
continue
else:
events[event] = activity
self._skybell.update_dev_cache(
self,
{
CONST.EVENT: events
}) | Update our cached list of latest activity events. | Below is the the instruction that describes the task:
### Input:
Update our cached list of latest activity events.
### Response:
def _update_events(self):
"""Update our cached list of latest activity events."""
events = self._skybell.dev_cache(self, CONST.EVENT) or {}
for activity in self._activities:
event = activity.get(CONST.EVENT)
created_at = activity.get(CONST.CREATED_AT)
old_event = events.get(event)
if old_event and created_at < old_event.get(CONST.CREATED_AT):
continue
else:
events[event] = activity
self._skybell.update_dev_cache(
self,
{
CONST.EVENT: events
}) |
def is_sortable_index(self, index_name, catalog):
"""Returns whether the index is sortable
"""
index = self.get_index(index_name, catalog)
if not index:
return False
return index.meta_type in ["FieldIndex", "DateIndex"] | Returns whether the index is sortable | Below is the the instruction that describes the task:
### Input:
Returns whether the index is sortable
### Response:
def is_sortable_index(self, index_name, catalog):
"""Returns whether the index is sortable
"""
index = self.get_index(index_name, catalog)
if not index:
return False
return index.meta_type in ["FieldIndex", "DateIndex"] |
def bounce(sequence):
''' Return a driver function that can advance a "bounced" sequence
of values.
.. code-block:: none
seq = [0, 1, 2, 3]
# bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...]
Args:
sequence (seq) : a sequence of values for the driver to bounce
'''
N = len(sequence)
def f(i):
div, mod = divmod(i, N)
if div % 2 == 0:
return sequence[mod]
else:
return sequence[N-mod-1]
return partial(force, sequence=_advance(f)) | Return a driver function that can advance a "bounced" sequence
of values.
.. code-block:: none
seq = [0, 1, 2, 3]
# bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...]
Args:
sequence (seq) : a sequence of values for the driver to bounce | Below is the the instruction that describes the task:
### Input:
Return a driver function that can advance a "bounced" sequence
of values.
.. code-block:: none
seq = [0, 1, 2, 3]
# bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...]
Args:
sequence (seq) : a sequence of values for the driver to bounce
### Response:
def bounce(sequence):
''' Return a driver function that can advance a "bounced" sequence
of values.
.. code-block:: none
seq = [0, 1, 2, 3]
# bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...]
Args:
sequence (seq) : a sequence of values for the driver to bounce
'''
N = len(sequence)
def f(i):
div, mod = divmod(i, N)
if div % 2 == 0:
return sequence[mod]
else:
return sequence[N-mod-1]
return partial(force, sequence=_advance(f)) |
def text_pb(tag, data, description=None):
"""Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
try:
tensor = tensor_util.make_tensor_proto(data, dtype=np.object)
except TypeError as e:
raise TypeError('tensor must be of type string', e)
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
summary = summary_pb2.Summary()
summary.value.add(tag=tag,
metadata=summary_metadata,
tensor=tensor)
return summary | Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object. | Below is the the instruction that describes the task:
### Input:
Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
### Response:
def text_pb(tag, data, description=None):
"""Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
try:
tensor = tensor_util.make_tensor_proto(data, dtype=np.object)
except TypeError as e:
raise TypeError('tensor must be of type string', e)
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
summary = summary_pb2.Summary()
summary.value.add(tag=tag,
metadata=summary_metadata,
tensor=tensor)
return summary |
def init(self, force_deploy=False):
"""Reserve and deploys the vagrant boxes.
Args:
force_deploy (bool): True iff new machines should be started
"""
machines = self.provider_conf.machines
networks = self.provider_conf.networks
_networks = []
for network in networks:
ipnet = IPNetwork(network.cidr)
_networks.append({
"netpool": list(ipnet)[10:-10],
"cidr": network.cidr,
"roles": network.roles,
"gateway": ipnet.ip
})
vagrant_machines = []
vagrant_roles = {}
j = 0
for machine in machines:
for _ in range(machine.number):
vagrant_machine = {
"name": "enos-%s" % j,
"cpu": machine.flavour_desc["core"],
"mem": machine.flavour_desc["mem"],
"ips": [n["netpool"].pop() for n in _networks],
}
vagrant_machines.append(vagrant_machine)
# Assign the machines to the right roles
for role in machine.roles:
vagrant_roles.setdefault(role, []).append(vagrant_machine)
j = j + 1
logger.debug(vagrant_roles)
loader = FileSystemLoader(searchpath=TEMPLATE_DIR)
env = Environment(loader=loader, autoescape=True)
template = env.get_template('Vagrantfile.j2')
vagrantfile = template.render(machines=vagrant_machines,
provider_conf=self.provider_conf)
vagrantfile_path = os.path.join(os.getcwd(), "Vagrantfile")
with open(vagrantfile_path, 'w') as f:
f.write(vagrantfile)
# Build env for Vagrant with a copy of env variables (needed by
# subprocess opened by vagrant
v_env = dict(os.environ)
v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf.backend
v = vagrant.Vagrant(root=os.getcwd(),
quiet_stdout=False,
quiet_stderr=False,
env=v_env)
if force_deploy:
v.destroy()
v.up()
v.provision()
roles = {}
for role, machines in vagrant_roles.items():
for machine in machines:
keyfile = v.keyfile(vm_name=machine['name'])
port = v.port(vm_name=machine['name'])
address = v.hostname(vm_name=machine['name'])
roles.setdefault(role, []).append(
Host(address,
alias=machine['name'],
user=self.provider_conf.user,
port=port,
keyfile=keyfile))
networks = [{
'cidr': str(n["cidr"]),
'start': str(n["netpool"][0]),
'end': str(n["netpool"][-1]),
'dns': '8.8.8.8',
'gateway': str(n["gateway"]),
'roles': n["roles"]
} for n in _networks]
logger.debug(roles)
logger.debug(networks)
return (roles, networks) | Reserve and deploys the vagrant boxes.
Args:
force_deploy (bool): True iff new machines should be started | Below is the the instruction that describes the task:
### Input:
Reserve and deploys the vagrant boxes.
Args:
force_deploy (bool): True iff new machines should be started
### Response:
def init(self, force_deploy=False):
"""Reserve and deploys the vagrant boxes.
Args:
force_deploy (bool): True iff new machines should be started
"""
machines = self.provider_conf.machines
networks = self.provider_conf.networks
_networks = []
for network in networks:
ipnet = IPNetwork(network.cidr)
_networks.append({
"netpool": list(ipnet)[10:-10],
"cidr": network.cidr,
"roles": network.roles,
"gateway": ipnet.ip
})
vagrant_machines = []
vagrant_roles = {}
j = 0
for machine in machines:
for _ in range(machine.number):
vagrant_machine = {
"name": "enos-%s" % j,
"cpu": machine.flavour_desc["core"],
"mem": machine.flavour_desc["mem"],
"ips": [n["netpool"].pop() for n in _networks],
}
vagrant_machines.append(vagrant_machine)
# Assign the machines to the right roles
for role in machine.roles:
vagrant_roles.setdefault(role, []).append(vagrant_machine)
j = j + 1
logger.debug(vagrant_roles)
loader = FileSystemLoader(searchpath=TEMPLATE_DIR)
env = Environment(loader=loader, autoescape=True)
template = env.get_template('Vagrantfile.j2')
vagrantfile = template.render(machines=vagrant_machines,
provider_conf=self.provider_conf)
vagrantfile_path = os.path.join(os.getcwd(), "Vagrantfile")
with open(vagrantfile_path, 'w') as f:
f.write(vagrantfile)
# Build env for Vagrant with a copy of env variables (needed by
# subprocess opened by vagrant
v_env = dict(os.environ)
v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf.backend
v = vagrant.Vagrant(root=os.getcwd(),
quiet_stdout=False,
quiet_stderr=False,
env=v_env)
if force_deploy:
v.destroy()
v.up()
v.provision()
roles = {}
for role, machines in vagrant_roles.items():
for machine in machines:
keyfile = v.keyfile(vm_name=machine['name'])
port = v.port(vm_name=machine['name'])
address = v.hostname(vm_name=machine['name'])
roles.setdefault(role, []).append(
Host(address,
alias=machine['name'],
user=self.provider_conf.user,
port=port,
keyfile=keyfile))
networks = [{
'cidr': str(n["cidr"]),
'start': str(n["netpool"][0]),
'end': str(n["netpool"][-1]),
'dns': '8.8.8.8',
'gateway': str(n["gateway"]),
'roles': n["roles"]
} for n in _networks]
logger.debug(roles)
logger.debug(networks)
return (roles, networks) |
def visit_project(self, item):
"""
Adds create project command to task runner if project doesn't already exist.
"""
if not item.remote_id:
command = CreateProjectCommand(self.settings, item)
self.task_runner_add(None, item, command)
else:
self.settings.project_id = item.remote_id | Adds create project command to task runner if project doesn't already exist. | Below is the the instruction that describes the task:
### Input:
Adds create project command to task runner if project doesn't already exist.
### Response:
def visit_project(self, item):
"""
Adds create project command to task runner if project doesn't already exist.
"""
if not item.remote_id:
command = CreateProjectCommand(self.settings, item)
self.task_runner_add(None, item, command)
else:
self.settings.project_id = item.remote_id |
def list_attributes(self):
"""
Returns the Node attributes names.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute())
>>> node_a.list_attributes()
['attributeB', 'attributeA']
:return: Attributes names.
:rtype: list
"""
return [attribute for attribute, value in self.iteritems() if issubclass(value.__class__, Attribute)] | Returns the Node attributes names.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute())
>>> node_a.list_attributes()
['attributeB', 'attributeA']
:return: Attributes names.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Returns the Node attributes names.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute())
>>> node_a.list_attributes()
['attributeB', 'attributeA']
:return: Attributes names.
:rtype: list
### Response:
def list_attributes(self):
"""
Returns the Node attributes names.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute())
>>> node_a.list_attributes()
['attributeB', 'attributeA']
:return: Attributes names.
:rtype: list
"""
return [attribute for attribute, value in self.iteritems() if issubclass(value.__class__, Attribute)] |
def _set_data(self, action):
"""
Set category member data from API response
"""
data = self._load_response(action)
self._handle_continuations(data, 'category')
if action == 'category':
members = data.get('query').get('categorymembers')
if members:
self._add_members(members)
if action == 'random':
rand = data['query']['random'][0]
data = {'pageid': rand.get('id'),
'title': rand.get('title')}
self.data.update(data)
self.params.update(data) | Set category member data from API response | Below is the the instruction that describes the task:
### Input:
Set category member data from API response
### Response:
def _set_data(self, action):
"""
Set category member data from API response
"""
data = self._load_response(action)
self._handle_continuations(data, 'category')
if action == 'category':
members = data.get('query').get('categorymembers')
if members:
self._add_members(members)
if action == 'random':
rand = data['query']['random'][0]
data = {'pageid': rand.get('id'),
'title': rand.get('title')}
self.data.update(data)
self.params.update(data) |
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values()) | Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`. | Below is the the instruction that describes the task:
### Input:
Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
### Response:
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values()) |
def prod_sum_var(A, B):
"""dot product and sum over axis 1 (var) equivalent to np.sum(A * B, 1)
"""
return A.multiply(B).sum(1).A1 if issparse(A) else np.einsum('ij, ij -> i', A, B) | dot product and sum over axis 1 (var) equivalent to np.sum(A * B, 1) | Below is the the instruction that describes the task:
### Input:
dot product and sum over axis 1 (var) equivalent to np.sum(A * B, 1)
### Response:
def prod_sum_var(A, B):
"""dot product and sum over axis 1 (var) equivalent to np.sum(A * B, 1)
"""
return A.multiply(B).sum(1).A1 if issparse(A) else np.einsum('ij, ij -> i', A, B) |
def add_text(self, text, label=None):
"""stub"""
if label is None:
label = self._label_metadata['default_string_values'][0]
else:
if not self.my_osid_object_form._is_valid_string(
label, self.get_label_metadata()) or '.' in label:
raise InvalidArgument('label')
if text is None:
raise NullArgument('text cannot be none')
if not (self.my_osid_object_form._is_valid_string(
text, self.get_text_metadata()) or isinstance(text, DisplayText)):
raise InvalidArgument('text')
if utilities.is_string(text):
self.my_osid_object_form._my_map['texts'][label] = {
'text': text,
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE)
}
else:
self.my_osid_object_form._my_map['texts'][label] = {
'text': text.text,
'languageTypeId': str(text.language_type),
'scriptTypeId': str(text.script_type),
'formatTypeId': str(text.format_type)
} | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def add_text(self, text, label=None):
"""stub"""
if label is None:
label = self._label_metadata['default_string_values'][0]
else:
if not self.my_osid_object_form._is_valid_string(
label, self.get_label_metadata()) or '.' in label:
raise InvalidArgument('label')
if text is None:
raise NullArgument('text cannot be none')
if not (self.my_osid_object_form._is_valid_string(
text, self.get_text_metadata()) or isinstance(text, DisplayText)):
raise InvalidArgument('text')
if utilities.is_string(text):
self.my_osid_object_form._my_map['texts'][label] = {
'text': text,
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE)
}
else:
self.my_osid_object_form._my_map['texts'][label] = {
'text': text.text,
'languageTypeId': str(text.language_type),
'scriptTypeId': str(text.script_type),
'formatTypeId': str(text.format_type)
} |
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass) | int: The speaker's bass EQ.
An integer between -10 and 10. | Below is the the instruction that describes the task:
### Input:
int: The speaker's bass EQ.
An integer between -10 and 10.
### Response:
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass) |
def _subprocess_method(self, command):
"""Use the subprocess module to execute ipmitool commands
and and set status
"""
p = subprocess.Popen([self._ipmitool_path] + self.args + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.output, self.error = p.communicate()
self.status = p.returncode | Use the subprocess module to execute ipmitool commands
and and set status | Below is the the instruction that describes the task:
### Input:
Use the subprocess module to execute ipmitool commands
and and set status
### Response:
def _subprocess_method(self, command):
"""Use the subprocess module to execute ipmitool commands
and and set status
"""
p = subprocess.Popen([self._ipmitool_path] + self.args + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.output, self.error = p.communicate()
self.status = p.returncode |
def enrich(self, column1, column2):
""" This method calculates the difference in seconds between
the 2 columns (column2 - column1)
The final result may provided negative values depending on the values
from column1 and column2.
:param column1: first column. Values in column1 must be datetime type
:param column2: second column. Values in column2 must be datetime type
:type column1: string
:type column2: string
:return: original dataframe with a new column with the difference
between column2 - column1
:rtype: pandas.DataFrame
"""
if column1 not in self.data.columns or \
column2 not in self.data.columns:
return self.data
self.data["timedifference"] = (self.data[column2] - self.data[column1]) / np.timedelta64(1, 's')
return self.data | This method calculates the difference in seconds between
the 2 columns (column2 - column1)
The final result may provided negative values depending on the values
from column1 and column2.
:param column1: first column. Values in column1 must be datetime type
:param column2: second column. Values in column2 must be datetime type
:type column1: string
:type column2: string
:return: original dataframe with a new column with the difference
between column2 - column1
:rtype: pandas.DataFrame | Below is the the instruction that describes the task:
### Input:
This method calculates the difference in seconds between
the 2 columns (column2 - column1)
The final result may provided negative values depending on the values
from column1 and column2.
:param column1: first column. Values in column1 must be datetime type
:param column2: second column. Values in column2 must be datetime type
:type column1: string
:type column2: string
:return: original dataframe with a new column with the difference
between column2 - column1
:rtype: pandas.DataFrame
### Response:
def enrich(self, column1, column2):
""" This method calculates the difference in seconds between
the 2 columns (column2 - column1)
The final result may provided negative values depending on the values
from column1 and column2.
:param column1: first column. Values in column1 must be datetime type
:param column2: second column. Values in column2 must be datetime type
:type column1: string
:type column2: string
:return: original dataframe with a new column with the difference
between column2 - column1
:rtype: pandas.DataFrame
"""
if column1 not in self.data.columns or \
column2 not in self.data.columns:
return self.data
self.data["timedifference"] = (self.data[column2] - self.data[column1]) / np.timedelta64(1, 's')
return self.data |
def debug(f, *args, **kwargs):
"""Automatically log progress on function entry and exit. Default logging
value: debug.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Keyword parameters*
- log :: integer
- Specifies a custom level of logging to pass to the active logger.
- Default: DEBUG
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args.
"""
kwargs.update({'log': logging.DEBUG})
return _stump(f, *args, **kwargs) | Automatically log progress on function entry and exit. Default logging
value: debug.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Keyword parameters*
- log :: integer
- Specifies a custom level of logging to pass to the active logger.
- Default: DEBUG
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args. | Below is the the instruction that describes the task:
### Input:
Automatically log progress on function entry and exit. Default logging
value: debug.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Keyword parameters*
- log :: integer
- Specifies a custom level of logging to pass to the active logger.
- Default: DEBUG
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args.
### Response:
def debug(f, *args, **kwargs):
"""Automatically log progress on function entry and exit. Default logging
value: debug.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Keyword parameters*
- log :: integer
- Specifies a custom level of logging to pass to the active logger.
- Default: DEBUG
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args.
"""
kwargs.update({'log': logging.DEBUG})
return _stump(f, *args, **kwargs) |
async def remove_albums(self, *albums):
"""Remove one or more albums from the current user’s ‘Your Music’ library.
Parameters
----------
albums : Sequence[Union[Album, str]]
A sequence of artist objects or spotify IDs
"""
_albums = [(obj if isinstance(obj, str) else obj.id) for obj in albums]
await self.user.http.delete_saved_albums(','.join(_albums)) | Remove one or more albums from the current user’s ‘Your Music’ library.
Parameters
----------
albums : Sequence[Union[Album, str]]
A sequence of artist objects or spotify IDs | Below is the the instruction that describes the task:
### Input:
Remove one or more albums from the current user’s ‘Your Music’ library.
Parameters
----------
albums : Sequence[Union[Album, str]]
A sequence of artist objects or spotify IDs
### Response:
async def remove_albums(self, *albums):
"""Remove one or more albums from the current user’s ‘Your Music’ library.
Parameters
----------
albums : Sequence[Union[Album, str]]
A sequence of artist objects or spotify IDs
"""
_albums = [(obj if isinstance(obj, str) else obj.id) for obj in albums]
await self.user.http.delete_saved_albums(','.join(_albums)) |
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
val = self._cached_constants.get(dimension, None)
if val:
return np.array([val])
else:
raise Exception("Dimension %s not found in %s." %
(dimension, self.__class__.__name__)) | Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension | Below is the the instruction that describes the task:
### Input:
Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
### Response:
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
val = self._cached_constants.get(dimension, None)
if val:
return np.array([val])
else:
raise Exception("Dimension %s not found in %s." %
(dimension, self.__class__.__name__)) |
def get_num_image_channels(module_or_spec, signature=None, input_name=None):
"""Returns expected num_channels dimensions of an image input.
This is for advanced users only who expect to handle modules with
image inputs that might not have the 3 usual RGB channels.
Args:
module_or_spec: a Module or ModuleSpec that accepts image inputs.
signature: a string with the key of the signature in question.
If None, the default signature is used.
input_name: a string with the input name for images. If None, the
conventional input name `images` for the default signature is used.
Returns:
An integer with the number of input channels to the module.
Raises:
ValueError: If the channel information is missing or malformed.
"""
if input_name is None:
input_name = "images"
input_info_dict = module_or_spec.get_input_info_dict(signature)
try:
shape = input_info_dict[input_name].get_shape()
except KeyError:
raise ValueError("Module is missing input '%s' in signature '%s'." %
(input_name, signature or "default"))
try:
_, _, _, num_channels = shape.as_list()
if num_channels is None:
raise ValueError
except ValueError:
raise ValueError(
"Shape of module input is %s, "
"expected [batch_size, height, width, num_channels] "
"with known num_channels" % shape)
return num_channels | Returns expected num_channels dimensions of an image input.
This is for advanced users only who expect to handle modules with
image inputs that might not have the 3 usual RGB channels.
Args:
module_or_spec: a Module or ModuleSpec that accepts image inputs.
signature: a string with the key of the signature in question.
If None, the default signature is used.
input_name: a string with the input name for images. If None, the
conventional input name `images` for the default signature is used.
Returns:
An integer with the number of input channels to the module.
Raises:
ValueError: If the channel information is missing or malformed. | Below is the the instruction that describes the task:
### Input:
Returns expected num_channels dimensions of an image input.
This is for advanced users only who expect to handle modules with
image inputs that might not have the 3 usual RGB channels.
Args:
module_or_spec: a Module or ModuleSpec that accepts image inputs.
signature: a string with the key of the signature in question.
If None, the default signature is used.
input_name: a string with the input name for images. If None, the
conventional input name `images` for the default signature is used.
Returns:
An integer with the number of input channels to the module.
Raises:
ValueError: If the channel information is missing or malformed.
### Response:
def get_num_image_channels(module_or_spec, signature=None, input_name=None):
"""Returns expected num_channels dimensions of an image input.
This is for advanced users only who expect to handle modules with
image inputs that might not have the 3 usual RGB channels.
Args:
module_or_spec: a Module or ModuleSpec that accepts image inputs.
signature: a string with the key of the signature in question.
If None, the default signature is used.
input_name: a string with the input name for images. If None, the
conventional input name `images` for the default signature is used.
Returns:
An integer with the number of input channels to the module.
Raises:
ValueError: If the channel information is missing or malformed.
"""
if input_name is None:
input_name = "images"
input_info_dict = module_or_spec.get_input_info_dict(signature)
try:
shape = input_info_dict[input_name].get_shape()
except KeyError:
raise ValueError("Module is missing input '%s' in signature '%s'." %
(input_name, signature or "default"))
try:
_, _, _, num_channels = shape.as_list()
if num_channels is None:
raise ValueError
except ValueError:
raise ValueError(
"Shape of module input is %s, "
"expected [batch_size, height, width, num_channels] "
"with known num_channels" % shape)
return num_channels |
def list_all_refund_operations(cls, **kwargs):
"""List RefundOperations
Return a list of RefundOperations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_refund_operations(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[RefundOperation]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_refund_operations_with_http_info(**kwargs)
else:
(data) = cls._list_all_refund_operations_with_http_info(**kwargs)
return data | List RefundOperations
Return a list of RefundOperations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_refund_operations(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[RefundOperation]
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
List RefundOperations
Return a list of RefundOperations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_refund_operations(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[RefundOperation]
If the method is called asynchronously,
returns the request thread.
### Response:
def list_all_refund_operations(cls, **kwargs):
"""List RefundOperations
Return a list of RefundOperations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_refund_operations(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[RefundOperation]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_refund_operations_with_http_info(**kwargs)
else:
(data) = cls._list_all_refund_operations_with_http_info(**kwargs)
return data |
def filter(self, run_counts, criteria):
"""
Return run counts only for examples that are still correctly classified
"""
correctness = criteria['correctness']
assert correctness.dtype == np.bool
filtered_counts = deep_copy(run_counts)
for key in filtered_counts:
filtered_counts[key] = filtered_counts[key][correctness]
return filtered_counts | Return run counts only for examples that are still correctly classified | Below is the the instruction that describes the task:
### Input:
Return run counts only for examples that are still correctly classified
### Response:
def filter(self, run_counts, criteria):
"""
Return run counts only for examples that are still correctly classified
"""
correctness = criteria['correctness']
assert correctness.dtype == np.bool
filtered_counts = deep_copy(run_counts)
for key in filtered_counts:
filtered_counts[key] = filtered_counts[key][correctness]
return filtered_counts |
def rebalance(self):
"""The genetic rebalancing algorithm runs for a fixed number of
generations. Each generation has two phases: exploration and pruning.
In exploration, a large set of possible states are found by randomly
applying assignment changes to the existing states. In pruning, each
state is given a score based on the balance of the cluster and the
states with the highest scores are chosen as the starting states for
the next generation.
"""
if self.args.num_gens < self.args.max_partition_movements:
self.log.warning(
"num-gens ({num_gens}) is less than max-partition-movements"
" ({max_partition_movements}). max-partition-movements will"
" never be reached.".format(
num_gens=self.args.num_gens,
max_partition_movements=self.args.max_partition_movements,
)
)
if self.args.replication_groups:
self.log.info("Rebalancing replicas across replication groups...")
rg_movement_count, rg_movement_size = self.rebalance_replicas(
max_movement_count=self.args.max_partition_movements,
max_movement_size=self.args.max_movement_size,
)
self.log.info(
"Done rebalancing replicas. %d partitions moved.",
rg_movement_count,
)
else:
rg_movement_size = 0
rg_movement_count = 0
# Use a fixed random seed to make results reproducible.
random.seed(RANDOM_SEED)
# NOTE: only active brokers are considered when rebalancing
state = _State(
self.cluster_topology,
brokers=self.cluster_topology.active_brokers
)
state.movement_size = rg_movement_size
pop = {state}
do_rebalance = self.args.brokers or self.args.leaders
# Cannot rebalance when all partitions have zero weight because the
# score function is undefined.
if do_rebalance and not state.total_weight:
self.log.error(
"Rebalance impossible. All partitions have zero weight.",
)
do_rebalance = False
if do_rebalance:
self.log.info("Rebalancing with genetic algorithm.")
# Run the genetic algorithm for a fixed number of generations.
for i in range(self.args.num_gens):
start = time.time()
pop_candidates = self._explore(pop)
pop = self._prune(pop_candidates)
end = time.time()
self.log.debug(
"Generation %d: keeping %d of %d assignment(s) in %f seconds",
i,
len(pop),
len(pop_candidates),
end - start,
)
# Choose the state with the greatest score.
state = sorted(pop, key=self._score, reverse=True)[0]
self.log.info(
"Done rebalancing. %d partitions moved.",
state.movement_count,
)
self.log.info("Total movement size: %f", state.movement_size)
assignment = state.assignment
# Since only active brokers are considered when rebalancing, inactive
# brokers need to be added back to the new assignment.
all_brokers = set(self.cluster_topology.brokers.values())
inactive_brokers = all_brokers - set(state.brokers)
for partition_name, replicas in assignment:
for broker in inactive_brokers:
if broker in self.cluster_topology.partitions[partition_name].replicas:
replicas.append(broker.id)
self.cluster_topology.update_cluster_topology(assignment) | The genetic rebalancing algorithm runs for a fixed number of
generations. Each generation has two phases: exploration and pruning.
In exploration, a large set of possible states are found by randomly
applying assignment changes to the existing states. In pruning, each
state is given a score based on the balance of the cluster and the
states with the highest scores are chosen as the starting states for
the next generation. | Below is the the instruction that describes the task:
### Input:
The genetic rebalancing algorithm runs for a fixed number of
generations. Each generation has two phases: exploration and pruning.
In exploration, a large set of possible states are found by randomly
applying assignment changes to the existing states. In pruning, each
state is given a score based on the balance of the cluster and the
states with the highest scores are chosen as the starting states for
the next generation.
### Response:
def rebalance(self):
"""The genetic rebalancing algorithm runs for a fixed number of
generations. Each generation has two phases: exploration and pruning.
In exploration, a large set of possible states are found by randomly
applying assignment changes to the existing states. In pruning, each
state is given a score based on the balance of the cluster and the
states with the highest scores are chosen as the starting states for
the next generation.
"""
if self.args.num_gens < self.args.max_partition_movements:
self.log.warning(
"num-gens ({num_gens}) is less than max-partition-movements"
" ({max_partition_movements}). max-partition-movements will"
" never be reached.".format(
num_gens=self.args.num_gens,
max_partition_movements=self.args.max_partition_movements,
)
)
if self.args.replication_groups:
self.log.info("Rebalancing replicas across replication groups...")
rg_movement_count, rg_movement_size = self.rebalance_replicas(
max_movement_count=self.args.max_partition_movements,
max_movement_size=self.args.max_movement_size,
)
self.log.info(
"Done rebalancing replicas. %d partitions moved.",
rg_movement_count,
)
else:
rg_movement_size = 0
rg_movement_count = 0
# Use a fixed random seed to make results reproducible.
random.seed(RANDOM_SEED)
# NOTE: only active brokers are considered when rebalancing
state = _State(
self.cluster_topology,
brokers=self.cluster_topology.active_brokers
)
state.movement_size = rg_movement_size
pop = {state}
do_rebalance = self.args.brokers or self.args.leaders
# Cannot rebalance when all partitions have zero weight because the
# score function is undefined.
if do_rebalance and not state.total_weight:
self.log.error(
"Rebalance impossible. All partitions have zero weight.",
)
do_rebalance = False
if do_rebalance:
self.log.info("Rebalancing with genetic algorithm.")
# Run the genetic algorithm for a fixed number of generations.
for i in range(self.args.num_gens):
start = time.time()
pop_candidates = self._explore(pop)
pop = self._prune(pop_candidates)
end = time.time()
self.log.debug(
"Generation %d: keeping %d of %d assignment(s) in %f seconds",
i,
len(pop),
len(pop_candidates),
end - start,
)
# Choose the state with the greatest score.
state = sorted(pop, key=self._score, reverse=True)[0]
self.log.info(
"Done rebalancing. %d partitions moved.",
state.movement_count,
)
self.log.info("Total movement size: %f", state.movement_size)
assignment = state.assignment
# Since only active brokers are considered when rebalancing, inactive
# brokers need to be added back to the new assignment.
all_brokers = set(self.cluster_topology.brokers.values())
inactive_brokers = all_brokers - set(state.brokers)
for partition_name, replicas in assignment:
for broker in inactive_brokers:
if broker in self.cluster_topology.partitions[partition_name].replicas:
replicas.append(broker.id)
self.cluster_topology.update_cluster_topology(assignment) |
def stop(self):
"""Permanently stop sending heartbeats."""
if not self.stopped:
self.stopped = True
if self.pendingHeartbeat is not None:
self.pendingHeartbeat.cancel()
self.pendingHeartbeat = None | Permanently stop sending heartbeats. | Below is the the instruction that describes the task:
### Input:
Permanently stop sending heartbeats.
### Response:
def stop(self):
"""Permanently stop sending heartbeats."""
if not self.stopped:
self.stopped = True
if self.pendingHeartbeat is not None:
self.pendingHeartbeat.cancel()
self.pendingHeartbeat = None |
def verify_dataset(X, y):
"""Verifies if a dataset is valid for use i.e. scikit-learn format
Used to verify a dataset by returning shape and basic statistics of
returned data. This will also provide quick and dirty check on
capability of host machine to process the data.
Args:
X (array-like): Features array
y (array-like): Label array
Returns:
X_shape (2-tuple of int): Shape of X returned
y_shape (1-tuple of int): Shape of y returned
Raises:
AssertionError: `X_shape` must be of length 2 and `y_shape` must be of
length 1. `X` must have the same number of elements as `y`
i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met,
an AssertionError is raised.
"""
X_shape, y_shape = np.array(X).shape, np.array(y).shape
if len(X_shape) != 2:
raise exceptions.UserError("X must be 2-dimensional array")
if len(y_shape) != 1:
raise exceptions.UserError("y must be 1-dimensional array")
if X_shape[0] != y_shape[0]:
raise exceptions.UserError("X must have same number of elements as y")
return dict(
features_shape=X_shape,
labels_shape=y_shape
) | Verifies if a dataset is valid for use i.e. scikit-learn format
Used to verify a dataset by returning shape and basic statistics of
returned data. This will also provide quick and dirty check on
capability of host machine to process the data.
Args:
X (array-like): Features array
y (array-like): Label array
Returns:
X_shape (2-tuple of int): Shape of X returned
y_shape (1-tuple of int): Shape of y returned
Raises:
AssertionError: `X_shape` must be of length 2 and `y_shape` must be of
length 1. `X` must have the same number of elements as `y`
i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met,
an AssertionError is raised. | Below is the the instruction that describes the task:
### Input:
Verifies if a dataset is valid for use i.e. scikit-learn format
Used to verify a dataset by returning shape and basic statistics of
returned data. This will also provide quick and dirty check on
capability of host machine to process the data.
Args:
X (array-like): Features array
y (array-like): Label array
Returns:
X_shape (2-tuple of int): Shape of X returned
y_shape (1-tuple of int): Shape of y returned
Raises:
AssertionError: `X_shape` must be of length 2 and `y_shape` must be of
length 1. `X` must have the same number of elements as `y`
i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met,
an AssertionError is raised.
### Response:
def verify_dataset(X, y):
"""Verifies if a dataset is valid for use i.e. scikit-learn format
Used to verify a dataset by returning shape and basic statistics of
returned data. This will also provide quick and dirty check on
capability of host machine to process the data.
Args:
X (array-like): Features array
y (array-like): Label array
Returns:
X_shape (2-tuple of int): Shape of X returned
y_shape (1-tuple of int): Shape of y returned
Raises:
AssertionError: `X_shape` must be of length 2 and `y_shape` must be of
length 1. `X` must have the same number of elements as `y`
i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met,
an AssertionError is raised.
"""
X_shape, y_shape = np.array(X).shape, np.array(y).shape
if len(X_shape) != 2:
raise exceptions.UserError("X must be 2-dimensional array")
if len(y_shape) != 1:
raise exceptions.UserError("y must be 1-dimensional array")
if X_shape[0] != y_shape[0]:
raise exceptions.UserError("X must have same number of elements as y")
return dict(
features_shape=X_shape,
labels_shape=y_shape
) |
def _q_to_dcm(self, q):
"""
Create DCM (Matrix3) from q
:param q: array q which represents a quaternion [w, x, y, z]
:returns: Matrix3
"""
assert(len(q) == 4)
arr = super(Quaternion, self)._q_to_dcm(q)
return self._dcm_array_to_matrix3(arr) | Create DCM (Matrix3) from q
:param q: array q which represents a quaternion [w, x, y, z]
:returns: Matrix3 | Below is the the instruction that describes the task:
### Input:
Create DCM (Matrix3) from q
:param q: array q which represents a quaternion [w, x, y, z]
:returns: Matrix3
### Response:
def _q_to_dcm(self, q):
"""
Create DCM (Matrix3) from q
:param q: array q which represents a quaternion [w, x, y, z]
:returns: Matrix3
"""
assert(len(q) == 4)
arr = super(Quaternion, self)._q_to_dcm(q)
return self._dcm_array_to_matrix3(arr) |
def get_flow_by_id(self, flow_id):
"""
Gets an edge (flow) with requested ID.
Returns a tuple, where first value is node ID, second - a dictionary of all node attributes.
:param flow_id: string with edge ID.
"""
tmp_flows = self.diagram_graph.edges(data=True)
for flow in tmp_flows:
if flow[2][consts.Consts.id] == flow_id:
return flow | Gets an edge (flow) with requested ID.
Returns a tuple, where first value is node ID, second - a dictionary of all node attributes.
:param flow_id: string with edge ID. | Below is the the instruction that describes the task:
### Input:
Gets an edge (flow) with requested ID.
Returns a tuple, where first value is node ID, second - a dictionary of all node attributes.
:param flow_id: string with edge ID.
### Response:
def get_flow_by_id(self, flow_id):
"""
Gets an edge (flow) with requested ID.
Returns a tuple, where first value is node ID, second - a dictionary of all node attributes.
:param flow_id: string with edge ID.
"""
tmp_flows = self.diagram_graph.edges(data=True)
for flow in tmp_flows:
if flow[2][consts.Consts.id] == flow_id:
return flow |
def get_event_from_name(self, event_name):
"""
Return an event from a name
Args:
event_name (str): name of the event
Returns:
Event
"""
return next((e for e in self.events if e.name == event_name), None) | Return an event from a name
Args:
event_name (str): name of the event
Returns:
Event | Below is the the instruction that describes the task:
### Input:
Return an event from a name
Args:
event_name (str): name of the event
Returns:
Event
### Response:
def get_event_from_name(self, event_name):
"""
Return an event from a name
Args:
event_name (str): name of the event
Returns:
Event
"""
return next((e for e in self.events if e.name == event_name), None) |
def write_text(self, text, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the `append=True` keyword argument.
There are two differences between :meth:`write_text` and
:meth:`write_bytes`: newline handling and Unicode handling.
See below.
Parameters:
`text` - str/unicode - The text to be written.
`encoding` - str - The Unicode encoding that will be used.
This is ignored if `text` isn't a Unicode string.
`errors` - str - How to handle Unicode encoding errors.
Default is ``'strict'``. See ``help(unicode.encode)`` for the
options. This is ignored if `text` isn't a Unicode
string.
`linesep` - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
:data:`os.linesep`. You can also specify ``None`` to
leave all newlines as they are in `text`.
`append` - keyword argument - bool - Specifies what to do if
the file already exists (``True``: append to the end of it;
``False``: overwrite it.) The default is ``False``.
--- Newline handling.
``write_text()`` converts all standard end-of-line sequences
(``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
the end-of-line marker is ``'\r\n'``).
If you don't like your platform's default, you can override it
using the `linesep=` keyword argument. If you specifically want
``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
(This is slightly different from when you open a file for
writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
in Python.)
--- Unicode
If `text` isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The `encoding` and
`errors` arguments are not used and must be omitted.
If `text` is Unicode, it is first converted to :func:`bytes` using the
specified `encoding` (or the default encoding if `encoding`
isn't specified). The `errors` argument applies only to this
conversion.
"""
if isinstance(text, text_type):
if linesep is not None:
text = U_NEWLINE.sub(linesep, text)
text = text.encode(encoding or sys.getdefaultencoding(), errors)
else:
assert encoding is None
text = NEWLINE.sub(linesep, text)
self.write_bytes(text, append=append) | r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the `append=True` keyword argument.
There are two differences between :meth:`write_text` and
:meth:`write_bytes`: newline handling and Unicode handling.
See below.
Parameters:
`text` - str/unicode - The text to be written.
`encoding` - str - The Unicode encoding that will be used.
This is ignored if `text` isn't a Unicode string.
`errors` - str - How to handle Unicode encoding errors.
Default is ``'strict'``. See ``help(unicode.encode)`` for the
options. This is ignored if `text` isn't a Unicode
string.
`linesep` - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
:data:`os.linesep`. You can also specify ``None`` to
leave all newlines as they are in `text`.
`append` - keyword argument - bool - Specifies what to do if
the file already exists (``True``: append to the end of it;
``False``: overwrite it.) The default is ``False``.
--- Newline handling.
``write_text()`` converts all standard end-of-line sequences
(``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
the end-of-line marker is ``'\r\n'``).
If you don't like your platform's default, you can override it
using the `linesep=` keyword argument. If you specifically want
``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
(This is slightly different from when you open a file for
writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
in Python.)
--- Unicode
If `text` isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The `encoding` and
`errors` arguments are not used and must be omitted.
If `text` is Unicode, it is first converted to :func:`bytes` using the
specified `encoding` (or the default encoding if `encoding`
isn't specified). The `errors` argument applies only to this
conversion. | Below is the the instruction that describes the task:
### Input:
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the `append=True` keyword argument.
There are two differences between :meth:`write_text` and
:meth:`write_bytes`: newline handling and Unicode handling.
See below.
Parameters:
`text` - str/unicode - The text to be written.
`encoding` - str - The Unicode encoding that will be used.
This is ignored if `text` isn't a Unicode string.
`errors` - str - How to handle Unicode encoding errors.
Default is ``'strict'``. See ``help(unicode.encode)`` for the
options. This is ignored if `text` isn't a Unicode
string.
`linesep` - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
:data:`os.linesep`. You can also specify ``None`` to
leave all newlines as they are in `text`.
`append` - keyword argument - bool - Specifies what to do if
the file already exists (``True``: append to the end of it;
``False``: overwrite it.) The default is ``False``.
--- Newline handling.
``write_text()`` converts all standard end-of-line sequences
(``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
the end-of-line marker is ``'\r\n'``).
If you don't like your platform's default, you can override it
using the `linesep=` keyword argument. If you specifically want
``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
(This is slightly different from when you open a file for
writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
in Python.)
--- Unicode
If `text` isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The `encoding` and
`errors` arguments are not used and must be omitted.
If `text` is Unicode, it is first converted to :func:`bytes` using the
specified `encoding` (or the default encoding if `encoding`
isn't specified). The `errors` argument applies only to this
conversion.
### Response:
def write_text(self, text, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the `append=True` keyword argument.
There are two differences between :meth:`write_text` and
:meth:`write_bytes`: newline handling and Unicode handling.
See below.
Parameters:
`text` - str/unicode - The text to be written.
`encoding` - str - The Unicode encoding that will be used.
This is ignored if `text` isn't a Unicode string.
`errors` - str - How to handle Unicode encoding errors.
Default is ``'strict'``. See ``help(unicode.encode)`` for the
options. This is ignored if `text` isn't a Unicode
string.
`linesep` - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
:data:`os.linesep`. You can also specify ``None`` to
leave all newlines as they are in `text`.
`append` - keyword argument - bool - Specifies what to do if
the file already exists (``True``: append to the end of it;
``False``: overwrite it.) The default is ``False``.
--- Newline handling.
``write_text()`` converts all standard end-of-line sequences
(``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
the end-of-line marker is ``'\r\n'``).
If you don't like your platform's default, you can override it
using the `linesep=` keyword argument. If you specifically want
``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
(This is slightly different from when you open a file for
writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
in Python.)
--- Unicode
If `text` isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The `encoding` and
`errors` arguments are not used and must be omitted.
If `text` is Unicode, it is first converted to :func:`bytes` using the
specified `encoding` (or the default encoding if `encoding`
isn't specified). The `errors` argument applies only to this
conversion.
"""
if isinstance(text, text_type):
if linesep is not None:
text = U_NEWLINE.sub(linesep, text)
text = text.encode(encoding or sys.getdefaultencoding(), errors)
else:
assert encoding is None
text = NEWLINE.sub(linesep, text)
self.write_bytes(text, append=append) |
def normalize_address(self, hostname):
"""Ensure that address returned is an IP address (i.e. not fqdn)"""
if config_get('prefer-ipv6'):
# TODO: add support for ipv6 dns
return hostname
if hostname != unit_get('private-address'):
return get_host_ip(hostname, fallback=hostname)
# Otherwise assume localhost
return '127.0.0.1' | Ensure that address returned is an IP address (i.e. not fqdn) | Below is the the instruction that describes the task:
### Input:
Ensure that address returned is an IP address (i.e. not fqdn)
### Response:
def normalize_address(self, hostname):
"""Ensure that address returned is an IP address (i.e. not fqdn)"""
if config_get('prefer-ipv6'):
# TODO: add support for ipv6 dns
return hostname
if hostname != unit_get('private-address'):
return get_host_ip(hostname, fallback=hostname)
# Otherwise assume localhost
return '127.0.0.1' |
def allocate_sync_ensembles(self, tolerance = 0.1):
"""!
@brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster.
@param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators.
@return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].
"""
if (self.__ccore_legion_dynamic_pointer is not None):
self.__output = wrapper.legion_dynamic_get_output(self.__ccore_legion_dynamic_pointer);
return allocate_sync_ensembles(self.__output, tolerance); | !
@brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster.
@param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators.
@return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ]. | Below is the the instruction that describes the task:
### Input:
!
@brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster.
@param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators.
@return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].
### Response:
def allocate_sync_ensembles(self, tolerance = 0.1):
"""!
@brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster.
@param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators.
@return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].
"""
if (self.__ccore_legion_dynamic_pointer is not None):
self.__output = wrapper.legion_dynamic_get_output(self.__ccore_legion_dynamic_pointer);
return allocate_sync_ensembles(self.__output, tolerance); |
async def stop(self):
"""Stop playback?"""
state = await self.state()
res = await self.call("X_Stop", MasterSessionID=state.MasterSessionID)
return res | Stop playback? | Below is the the instruction that describes the task:
### Input:
Stop playback?
### Response:
async def stop(self):
"""Stop playback?"""
state = await self.state()
res = await self.call("X_Stop", MasterSessionID=state.MasterSessionID)
return res |
def read(self, visibility_timeout=None, callback=None):
"""
Read a single message from the queue.
:type visibility_timeout: int
:param visibility_timeout: The timeout for this message in seconds
:rtype: :class:`boto.sqs.message.Message`
:return: A single message or None if queue is empty
"""
def _read(rs):
if callable(callback):
callback(rs[0] if len(rs) == 1 else None)
self.get_messages(1, visibility_timeout, callback=callback) | Read a single message from the queue.
:type visibility_timeout: int
:param visibility_timeout: The timeout for this message in seconds
:rtype: :class:`boto.sqs.message.Message`
:return: A single message or None if queue is empty | Below is the the instruction that describes the task:
### Input:
Read a single message from the queue.
:type visibility_timeout: int
:param visibility_timeout: The timeout for this message in seconds
:rtype: :class:`boto.sqs.message.Message`
:return: A single message or None if queue is empty
### Response:
def read(self, visibility_timeout=None, callback=None):
"""
Read a single message from the queue.
:type visibility_timeout: int
:param visibility_timeout: The timeout for this message in seconds
:rtype: :class:`boto.sqs.message.Message`
:return: A single message or None if queue is empty
"""
def _read(rs):
if callable(callback):
callback(rs[0] if len(rs) == 1 else None)
self.get_messages(1, visibility_timeout, callback=callback) |
def upload_file(self, **kwargs):
"""
Upload a file to the Gett service. Takes keyword arguments.
Input:
* ``filename`` the filename to use in the Gett service (required)
* ``data`` the file contents to store in the Gett service (required) - must be a string
* ``sharename`` the name of the share in which to store the data (optional); if not given, a new share will be created.
* ``title`` the share title to use if a new share is created (optional)
Output:
* A :py:mod:`pygett.files.GettFile` object
Example::
file = client.upload_file(filaname="foo", data=open("foo.txt").read())
"""
params = None
if 'filename' not in kwargs:
raise AttributeError("Parameter 'filename' must be given")
else:
params = {
"filename": kwargs['filename']
}
if 'data' not in kwargs:
raise AttributeError("Parameter 'data' must be given")
sharename = None
if 'sharename' not in kwargs:
share = None
if 'title' in kwargs:
share = self.create_share(title=kwargs['title'])
else:
share = self.create_share()
sharename = share.sharename
else:
sharename = kwargs['sharename']
response = GettRequest().post("/files/%s/create?accesstoken=%s" % (sharename, self.user.access_token()), params)
f = None
if response.http_status == 200:
if 'sharename' not in response.response:
response.response['sharename'] = sharename
f = GettFile(self.user, **response.response)
if f.send_data(data=kwargs['data']):
return f | Upload a file to the Gett service. Takes keyword arguments.
Input:
* ``filename`` the filename to use in the Gett service (required)
* ``data`` the file contents to store in the Gett service (required) - must be a string
* ``sharename`` the name of the share in which to store the data (optional); if not given, a new share will be created.
* ``title`` the share title to use if a new share is created (optional)
Output:
* A :py:mod:`pygett.files.GettFile` object
Example::
file = client.upload_file(filaname="foo", data=open("foo.txt").read()) | Below is the the instruction that describes the task:
### Input:
Upload a file to the Gett service. Takes keyword arguments.
Input:
* ``filename`` the filename to use in the Gett service (required)
* ``data`` the file contents to store in the Gett service (required) - must be a string
* ``sharename`` the name of the share in which to store the data (optional); if not given, a new share will be created.
* ``title`` the share title to use if a new share is created (optional)
Output:
* A :py:mod:`pygett.files.GettFile` object
Example::
file = client.upload_file(filaname="foo", data=open("foo.txt").read())
### Response:
def upload_file(self, **kwargs):
"""
Upload a file to the Gett service. Takes keyword arguments.
Input:
* ``filename`` the filename to use in the Gett service (required)
* ``data`` the file contents to store in the Gett service (required) - must be a string
* ``sharename`` the name of the share in which to store the data (optional); if not given, a new share will be created.
* ``title`` the share title to use if a new share is created (optional)
Output:
* A :py:mod:`pygett.files.GettFile` object
Example::
file = client.upload_file(filaname="foo", data=open("foo.txt").read())
"""
params = None
if 'filename' not in kwargs:
raise AttributeError("Parameter 'filename' must be given")
else:
params = {
"filename": kwargs['filename']
}
if 'data' not in kwargs:
raise AttributeError("Parameter 'data' must be given")
sharename = None
if 'sharename' not in kwargs:
share = None
if 'title' in kwargs:
share = self.create_share(title=kwargs['title'])
else:
share = self.create_share()
sharename = share.sharename
else:
sharename = kwargs['sharename']
response = GettRequest().post("/files/%s/create?accesstoken=%s" % (sharename, self.user.access_token()), params)
f = None
if response.http_status == 200:
if 'sharename' not in response.response:
response.response['sharename'] = sharename
f = GettFile(self.user, **response.response)
if f.send_data(data=kwargs['data']):
return f |
def compose (composite_property_s, component_properties_s):
""" Sets the components of the given composite property.
All parameters are <feature>value strings
"""
from . import property
component_properties_s = to_seq (component_properties_s)
composite_property = property.create_from_string(composite_property_s)
f = composite_property.feature
if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property):
component_properties = component_properties_s
else:
component_properties = [property.create_from_string(p) for p in component_properties_s]
if not f.composite:
raise BaseException ("'%s' is not a composite feature" % f)
if property in __composite_properties:
raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property])))
if composite_property in component_properties:
raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property)
__composite_properties[composite_property] = component_properties | Sets the components of the given composite property.
All parameters are <feature>value strings | Below is the the instruction that describes the task:
### Input:
Sets the components of the given composite property.
All parameters are <feature>value strings
### Response:
def compose (composite_property_s, component_properties_s):
""" Sets the components of the given composite property.
All parameters are <feature>value strings
"""
from . import property
component_properties_s = to_seq (component_properties_s)
composite_property = property.create_from_string(composite_property_s)
f = composite_property.feature
if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property):
component_properties = component_properties_s
else:
component_properties = [property.create_from_string(p) for p in component_properties_s]
if not f.composite:
raise BaseException ("'%s' is not a composite feature" % f)
if property in __composite_properties:
raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property])))
if composite_property in component_properties:
raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property)
__composite_properties[composite_property] = component_properties |
def webify_file(srcfilename: str, destfilename: str) -> None:
"""
Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it
in the process.
"""
with open(srcfilename) as infile, open(destfilename, 'w') as ofile:
for line_ in infile:
ofile.write(escape(line_)) | Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it
in the process. | Below is the the instruction that describes the task:
### Input:
Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it
in the process.
### Response:
def webify_file(srcfilename: str, destfilename: str) -> None:
"""
Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it
in the process.
"""
with open(srcfilename) as infile, open(destfilename, 'w') as ofile:
for line_ in infile:
ofile.write(escape(line_)) |
def _replace_with_new_dims( # type: ignore
self: T,
variables: 'OrderedDict[Any, Variable]' = None,
coord_names: set = None,
attrs: 'Optional[OrderedDict]' = __default,
indexes: 'Optional[OrderedDict[Any, pd.Index]]' = __default,
inplace: bool = False,
) -> T:
"""Replace variables with recalculated dimensions."""
dims = dict(calculate_dimensions(variables))
return self._replace(
variables, coord_names, dims, attrs, indexes, inplace=inplace) | Replace variables with recalculated dimensions. | Below is the the instruction that describes the task:
### Input:
Replace variables with recalculated dimensions.
### Response:
def _replace_with_new_dims( # type: ignore
self: T,
variables: 'OrderedDict[Any, Variable]' = None,
coord_names: set = None,
attrs: 'Optional[OrderedDict]' = __default,
indexes: 'Optional[OrderedDict[Any, pd.Index]]' = __default,
inplace: bool = False,
) -> T:
"""Replace variables with recalculated dimensions."""
dims = dict(calculate_dimensions(variables))
return self._replace(
variables, coord_names, dims, attrs, indexes, inplace=inplace) |
def get_url_from_entry(entry):
"""Get a usable URL from a pybtex entry.
Parameters
----------
entry : `pybtex.database.Entry`
A pybtex bibliography entry.
Returns
-------
url : `str`
Best available URL from the ``entry``.
Raises
------
NoEntryUrlError
Raised when no URL can be made from the bibliography entry.
Notes
-----
The order of priority is:
1. ``url`` field
2. ``ls.st`` URL from the handle for ``@docushare`` entries.
3. ``adsurl``
4. DOI
"""
if 'url' in entry.fields:
return entry.fields['url']
elif entry.type.lower() == 'docushare':
return 'https://ls.st/' + entry.fields['handle']
elif 'adsurl' in entry.fields:
return entry.fields['adsurl']
elif 'doi' in entry.fields:
return 'https://doi.org/' + entry.fields['doi']
else:
raise NoEntryUrlError() | Get a usable URL from a pybtex entry.
Parameters
----------
entry : `pybtex.database.Entry`
A pybtex bibliography entry.
Returns
-------
url : `str`
Best available URL from the ``entry``.
Raises
------
NoEntryUrlError
Raised when no URL can be made from the bibliography entry.
Notes
-----
The order of priority is:
1. ``url`` field
2. ``ls.st`` URL from the handle for ``@docushare`` entries.
3. ``adsurl``
4. DOI | Below is the the instruction that describes the task:
### Input:
Get a usable URL from a pybtex entry.
Parameters
----------
entry : `pybtex.database.Entry`
A pybtex bibliography entry.
Returns
-------
url : `str`
Best available URL from the ``entry``.
Raises
------
NoEntryUrlError
Raised when no URL can be made from the bibliography entry.
Notes
-----
The order of priority is:
1. ``url`` field
2. ``ls.st`` URL from the handle for ``@docushare`` entries.
3. ``adsurl``
4. DOI
### Response:
def get_url_from_entry(entry):
"""Get a usable URL from a pybtex entry.
Parameters
----------
entry : `pybtex.database.Entry`
A pybtex bibliography entry.
Returns
-------
url : `str`
Best available URL from the ``entry``.
Raises
------
NoEntryUrlError
Raised when no URL can be made from the bibliography entry.
Notes
-----
The order of priority is:
1. ``url`` field
2. ``ls.st`` URL from the handle for ``@docushare`` entries.
3. ``adsurl``
4. DOI
"""
if 'url' in entry.fields:
return entry.fields['url']
elif entry.type.lower() == 'docushare':
return 'https://ls.st/' + entry.fields['handle']
elif 'adsurl' in entry.fields:
return entry.fields['adsurl']
elif 'doi' in entry.fields:
return 'https://doi.org/' + entry.fields['doi']
else:
raise NoEntryUrlError() |
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
"""Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd) | Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments. | Below is the the instruction that describes the task:
### Input:
Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
### Response:
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
"""Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd) |
def getMaturedSwarmGenerations(self):
"""Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)
"""
# Return results go in this list
result = []
# For each of the swarm generations which have had model result updates
# since the last time we were called, see which have completed.
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
# Walk through them in order from lowest to highest generation index
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
# Skip it if we've already reported on it. This should happen rarely, if
# ever. It means that some worker has started and completed a model in
# this generation after we've determined that the generation has ended.
if key in self._maturedSwarmGens:
self._modifiedSwarmGens.remove(key)
continue
# If the previous generation for this swarm is not complete yet, don't
# bother evaluating this one.
if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens:
continue
# We found a swarm generation that had some results reported since last
# time, see if it's complete or not
(_, _, errScores, completedFlags, maturedFlags) = \
self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if numMatured >= self._hsObj._minParticlesPerSwarm \
and numMatured == len(maturedFlags):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
# Return results
return result | Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore) | Below is the the instruction that describes the task:
### Input:
Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)
### Response:
def getMaturedSwarmGenerations(self):
"""Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)
"""
# Return results go in this list
result = []
# For each of the swarm generations which have had model result updates
# since the last time we were called, see which have completed.
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
# Walk through them in order from lowest to highest generation index
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
# Skip it if we've already reported on it. This should happen rarely, if
# ever. It means that some worker has started and completed a model in
# this generation after we've determined that the generation has ended.
if key in self._maturedSwarmGens:
self._modifiedSwarmGens.remove(key)
continue
# If the previous generation for this swarm is not complete yet, don't
# bother evaluating this one.
if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens:
continue
# We found a swarm generation that had some results reported since last
# time, see if it's complete or not
(_, _, errScores, completedFlags, maturedFlags) = \
self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if numMatured >= self._hsObj._minParticlesPerSwarm \
and numMatured == len(maturedFlags):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
# Return results
return result |
def _generate_filename(cls, writer_spec, name, job_id, num,
attempt=None, seg_index=None):
"""Generates a filename for a particular output.
Args:
writer_spec: specification dictionary for the output writer.
name: name of the job.
job_id: the ID number assigned to the job.
num: shard number.
attempt: the shard attempt number.
seg_index: index of the seg. None means the final output.
Returns:
a string containing the filename.
Raises:
BadWriterParamsError: if the template contains any errors such as invalid
syntax or contains unknown substitution placeholders.
"""
naming_format = cls._TMP_FILE_NAMING_FORMAT
if seg_index is None:
naming_format = writer_spec.get(cls.NAMING_FORMAT_PARAM,
cls._DEFAULT_NAMING_FORMAT)
template = string.Template(naming_format)
try:
# Check that template doesn't use undefined mappings and is formatted well
if seg_index is None:
return template.substitute(name=name, id=job_id, num=num)
else:
return template.substitute(name=name, id=job_id, num=num,
attempt=attempt,
seg=seg_index)
except ValueError, error:
raise errors.BadWriterParamsError("Naming template is bad, %s" % (error))
except KeyError, error:
raise errors.BadWriterParamsError("Naming template '%s' has extra "
"mappings, %s" % (naming_format, error)) | Generates a filename for a particular output.
Args:
writer_spec: specification dictionary for the output writer.
name: name of the job.
job_id: the ID number assigned to the job.
num: shard number.
attempt: the shard attempt number.
seg_index: index of the seg. None means the final output.
Returns:
a string containing the filename.
Raises:
BadWriterParamsError: if the template contains any errors such as invalid
syntax or contains unknown substitution placeholders. | Below is the the instruction that describes the task:
### Input:
Generates a filename for a particular output.
Args:
writer_spec: specification dictionary for the output writer.
name: name of the job.
job_id: the ID number assigned to the job.
num: shard number.
attempt: the shard attempt number.
seg_index: index of the seg. None means the final output.
Returns:
a string containing the filename.
Raises:
BadWriterParamsError: if the template contains any errors such as invalid
syntax or contains unknown substitution placeholders.
### Response:
def _generate_filename(cls, writer_spec, name, job_id, num,
attempt=None, seg_index=None):
"""Generates a filename for a particular output.
Args:
writer_spec: specification dictionary for the output writer.
name: name of the job.
job_id: the ID number assigned to the job.
num: shard number.
attempt: the shard attempt number.
seg_index: index of the seg. None means the final output.
Returns:
a string containing the filename.
Raises:
BadWriterParamsError: if the template contains any errors such as invalid
syntax or contains unknown substitution placeholders.
"""
naming_format = cls._TMP_FILE_NAMING_FORMAT
if seg_index is None:
naming_format = writer_spec.get(cls.NAMING_FORMAT_PARAM,
cls._DEFAULT_NAMING_FORMAT)
template = string.Template(naming_format)
try:
# Check that template doesn't use undefined mappings and is formatted well
if seg_index is None:
return template.substitute(name=name, id=job_id, num=num)
else:
return template.substitute(name=name, id=job_id, num=num,
attempt=attempt,
seg=seg_index)
except ValueError, error:
raise errors.BadWriterParamsError("Naming template is bad, %s" % (error))
except KeyError, error:
raise errors.BadWriterParamsError("Naming template '%s' has extra "
"mappings, %s" % (naming_format, error)) |
def login_required(function=None, required=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that, if required, checks that the user is logged in and redirect
to the log-in page if necessary.
"""
if required:
if django.VERSION < (1, 11):
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
redirect_field_name=redirect_field_name
)
else:
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
# login not required
def decorator(view_func):
def _wrapper(request, *args, **kwargs):
return function(request, *args, **kwargs)
return wraps(function)(_wrapper)
return method_decorator(decorator) | Decorator for views that, if required, checks that the user is logged in and redirect
to the log-in page if necessary. | Below is the the instruction that describes the task:
### Input:
Decorator for views that, if required, checks that the user is logged in and redirect
to the log-in page if necessary.
### Response:
def login_required(function=None, required=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that, if required, checks that the user is logged in and redirect
to the log-in page if necessary.
"""
if required:
if django.VERSION < (1, 11):
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
redirect_field_name=redirect_field_name
)
else:
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
# login not required
def decorator(view_func):
def _wrapper(request, *args, **kwargs):
return function(request, *args, **kwargs)
return wraps(function)(_wrapper)
return method_decorator(decorator) |
def close(self) -> None:
"""Closes the HTTPClient, freeing any resources used."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True | Closes the HTTPClient, freeing any resources used. | Below is the the instruction that describes the task:
### Input:
Closes the HTTPClient, freeing any resources used.
### Response:
def close(self) -> None:
"""Closes the HTTPClient, freeing any resources used."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True |
def _set_item_querytime(self, item, type_check=True):
"""
Sets the time for which the query was made on the resulting item
:param item: an item of type Versionable
:param type_check: Check the item to be a Versionable
:return: Returns the item itself with the time set
"""
if isinstance(item, Versionable):
item._querytime = self.querytime
elif isinstance(item, VersionedQuerySet):
item.querytime = self.querytime
else:
if type_check:
raise TypeError(
"This item is not a Versionable, it's a " + str(
type(item)))
return item | Sets the time for which the query was made on the resulting item
:param item: an item of type Versionable
:param type_check: Check the item to be a Versionable
:return: Returns the item itself with the time set | Below is the the instruction that describes the task:
### Input:
Sets the time for which the query was made on the resulting item
:param item: an item of type Versionable
:param type_check: Check the item to be a Versionable
:return: Returns the item itself with the time set
### Response:
def _set_item_querytime(self, item, type_check=True):
"""
Sets the time for which the query was made on the resulting item
:param item: an item of type Versionable
:param type_check: Check the item to be a Versionable
:return: Returns the item itself with the time set
"""
if isinstance(item, Versionable):
item._querytime = self.querytime
elif isinstance(item, VersionedQuerySet):
item.querytime = self.querytime
else:
if type_check:
raise TypeError(
"This item is not a Versionable, it's a " + str(
type(item)))
return item |
def tree_sph(polar, azimuthal, n, standardization, symbolic=False):
"""Evaluate all spherical harmonics of degree at most `n` at angles `polar`,
`azimuthal`.
"""
cos = numpy.vectorize(sympy.cos) if symbolic else numpy.cos
# Conventions from
# <https://en.wikipedia.org/wiki/Spherical_harmonics#Orthogonality_and_normalization>.
config = {
"acoustic": ("complex spherical", False),
"quantum mechanic": ("complex spherical", True),
"geodetic": ("complex spherical 1", False),
"schmidt": ("schmidt", False),
}
standard, cs_phase = config[standardization]
return tree_alp(
cos(polar),
n,
phi=azimuthal,
standardization=standard,
with_condon_shortley_phase=cs_phase,
symbolic=symbolic,
) | Evaluate all spherical harmonics of degree at most `n` at angles `polar`,
`azimuthal`. | Below is the the instruction that describes the task:
### Input:
Evaluate all spherical harmonics of degree at most `n` at angles `polar`,
`azimuthal`.
### Response:
def tree_sph(polar, azimuthal, n, standardization, symbolic=False):
"""Evaluate all spherical harmonics of degree at most `n` at angles `polar`,
`azimuthal`.
"""
cos = numpy.vectorize(sympy.cos) if symbolic else numpy.cos
# Conventions from
# <https://en.wikipedia.org/wiki/Spherical_harmonics#Orthogonality_and_normalization>.
config = {
"acoustic": ("complex spherical", False),
"quantum mechanic": ("complex spherical", True),
"geodetic": ("complex spherical 1", False),
"schmidt": ("schmidt", False),
}
standard, cs_phase = config[standardization]
return tree_alp(
cos(polar),
n,
phi=azimuthal,
standardization=standard,
with_condon_shortley_phase=cs_phase,
symbolic=symbolic,
) |
def save_keywords(filename, xml):
"""Save keyword XML to filename."""
tmp_dir = os.path.dirname(filename)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
file_desc = open(filename, "w")
file_desc.write(xml)
file_desc.close() | Save keyword XML to filename. | Below is the the instruction that describes the task:
### Input:
Save keyword XML to filename.
### Response:
def save_keywords(filename, xml):
"""Save keyword XML to filename."""
tmp_dir = os.path.dirname(filename)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
file_desc = open(filename, "w")
file_desc.write(xml)
file_desc.close() |
def identifier_md5(self):
"""
Return an MD5 of the identifier
"""
as_int = (self.identifier * 1e4).astype(np.int64)
hashed = util.md5_object(as_int.tostring(order='C'))
return hashed | Return an MD5 of the identifier | Below is the the instruction that describes the task:
### Input:
Return an MD5 of the identifier
### Response:
def identifier_md5(self):
"""
Return an MD5 of the identifier
"""
as_int = (self.identifier * 1e4).astype(np.int64)
hashed = util.md5_object(as_int.tostring(order='C'))
return hashed |
def search_service_code(self, service_index):
"""Search for a service code that corresponds to an index.
The Search Service Code command provides access to the
iterable list of services and areas within the activated
system. The *service_index* argument may be any value from 0
to 0xffff. As long as there is a service or area found for a
given *service_index*, the information returned is a tuple
with either one or two 16-bit integer elements. Two integers
are returned for an area definition, the first is the area
code and the second is the largest possible service index for
the area. One integer, the service code, is returned for a
service definition. The return value is :const:`None` if the
*service_index* was not found.
For example, to print all services and areas of the active
system: ::
for i in xrange(0x10000):
area_or_service = tag.search_service_code(i)
if area_or_service is None:
break
elif len(area_or_service) == 1:
sc = area_or_service[0]
print(nfc.tag.tt3.ServiceCode(sc >> 6, sc & 0x3f))
elif len(area_or_service) == 2:
area_code, area_last = area_or_service
print("Area {0:04x}--{0:04x}".format(area_code, area_last))
Command execution errors raise :exc:`~nfc.tag.TagCommandError`.
"""
log.debug("search service code index {0}".format(service_index))
# The maximum response time is given by the value of PMM[3].
# Some cards (like RC-S860 with IC RC-S915) encode a value
# that is too short, thus we use at lest 2 ms.
a, e = self.pmm[3] & 7, self.pmm[3] >> 6
timeout = max(302E-6 * (a + 1) * 4**e, 0.002)
data = pack("<H", service_index)
data = self.send_cmd_recv_rsp(0x0A, data, timeout, check_status=False)
if data != "\xFF\xFF":
unpack_format = "<H" if len(data) == 2 else "<HH"
return unpack(unpack_format, data) | Search for a service code that corresponds to an index.
The Search Service Code command provides access to the
iterable list of services and areas within the activated
system. The *service_index* argument may be any value from 0
to 0xffff. As long as there is a service or area found for a
given *service_index*, the information returned is a tuple
with either one or two 16-bit integer elements. Two integers
are returned for an area definition, the first is the area
code and the second is the largest possible service index for
the area. One integer, the service code, is returned for a
service definition. The return value is :const:`None` if the
*service_index* was not found.
For example, to print all services and areas of the active
system: ::
for i in xrange(0x10000):
area_or_service = tag.search_service_code(i)
if area_or_service is None:
break
elif len(area_or_service) == 1:
sc = area_or_service[0]
print(nfc.tag.tt3.ServiceCode(sc >> 6, sc & 0x3f))
elif len(area_or_service) == 2:
area_code, area_last = area_or_service
print("Area {0:04x}--{0:04x}".format(area_code, area_last))
Command execution errors raise :exc:`~nfc.tag.TagCommandError`. | Below is the the instruction that describes the task:
### Input:
Search for a service code that corresponds to an index.
The Search Service Code command provides access to the
iterable list of services and areas within the activated
system. The *service_index* argument may be any value from 0
to 0xffff. As long as there is a service or area found for a
given *service_index*, the information returned is a tuple
with either one or two 16-bit integer elements. Two integers
are returned for an area definition, the first is the area
code and the second is the largest possible service index for
the area. One integer, the service code, is returned for a
service definition. The return value is :const:`None` if the
*service_index* was not found.
For example, to print all services and areas of the active
system: ::
for i in xrange(0x10000):
area_or_service = tag.search_service_code(i)
if area_or_service is None:
break
elif len(area_or_service) == 1:
sc = area_or_service[0]
print(nfc.tag.tt3.ServiceCode(sc >> 6, sc & 0x3f))
elif len(area_or_service) == 2:
area_code, area_last = area_or_service
print("Area {0:04x}--{0:04x}".format(area_code, area_last))
Command execution errors raise :exc:`~nfc.tag.TagCommandError`.
### Response:
def search_service_code(self, service_index):
"""Search for a service code that corresponds to an index.
The Search Service Code command provides access to the
iterable list of services and areas within the activated
system. The *service_index* argument may be any value from 0
to 0xffff. As long as there is a service or area found for a
given *service_index*, the information returned is a tuple
with either one or two 16-bit integer elements. Two integers
are returned for an area definition, the first is the area
code and the second is the largest possible service index for
the area. One integer, the service code, is returned for a
service definition. The return value is :const:`None` if the
*service_index* was not found.
For example, to print all services and areas of the active
system: ::
for i in xrange(0x10000):
area_or_service = tag.search_service_code(i)
if area_or_service is None:
break
elif len(area_or_service) == 1:
sc = area_or_service[0]
print(nfc.tag.tt3.ServiceCode(sc >> 6, sc & 0x3f))
elif len(area_or_service) == 2:
area_code, area_last = area_or_service
print("Area {0:04x}--{0:04x}".format(area_code, area_last))
Command execution errors raise :exc:`~nfc.tag.TagCommandError`.
"""
log.debug("search service code index {0}".format(service_index))
# The maximum response time is given by the value of PMM[3].
# Some cards (like RC-S860 with IC RC-S915) encode a value
# that is too short, thus we use at lest 2 ms.
a, e = self.pmm[3] & 7, self.pmm[3] >> 6
timeout = max(302E-6 * (a + 1) * 4**e, 0.002)
data = pack("<H", service_index)
data = self.send_cmd_recv_rsp(0x0A, data, timeout, check_status=False)
if data != "\xFF\xFF":
unpack_format = "<H" if len(data) == 2 else "<HH"
return unpack(unpack_format, data) |
def parse_html(html, cleanup=True):
"""
Parses an HTML fragment, returning an lxml element. Note that the HTML will be
wrapped in a <div> tag that was not in the original document.
If cleanup is true, make sure there's no <head> or <body>, and get
rid of any <ins> and <del> tags.
"""
if cleanup:
# This removes any extra markup or structure like <head>:
html = cleanup_html(html)
return fragment_fromstring(html, create_parent=True) | Parses an HTML fragment, returning an lxml element. Note that the HTML will be
wrapped in a <div> tag that was not in the original document.
If cleanup is true, make sure there's no <head> or <body>, and get
rid of any <ins> and <del> tags. | Below is the the instruction that describes the task:
### Input:
Parses an HTML fragment, returning an lxml element. Note that the HTML will be
wrapped in a <div> tag that was not in the original document.
If cleanup is true, make sure there's no <head> or <body>, and get
rid of any <ins> and <del> tags.
### Response:
def parse_html(html, cleanup=True):
"""
Parses an HTML fragment, returning an lxml element. Note that the HTML will be
wrapped in a <div> tag that was not in the original document.
If cleanup is true, make sure there's no <head> or <body>, and get
rid of any <ins> and <del> tags.
"""
if cleanup:
# This removes any extra markup or structure like <head>:
html = cleanup_html(html)
return fragment_fromstring(html, create_parent=True) |
def fast_corr(x, y=None, destination=None):
"""calculate the pearson correlation matrix for the columns of x (with dimensions MxN), or optionally, the pearson correlaton matrix
between x and y (with dimensions OxP). If destination is provided, put the results there.
In the language of statistics the columns are the variables and the rows are the observations.
Args:
x (numpy array-like) MxN in shape
y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y)
destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy
memmap of a file)
returns (numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provied, shape is NxP
"""
if y is None:
y = x
r = fast_cov.fast_cov(x, y, destination)
std_x = numpy.std(x, axis=0, ddof=1)
std_y = numpy.std(y, axis=0, ddof=1)
numpy.divide(r, std_x[:, numpy.newaxis], out=r)
numpy.divide(r, std_y[numpy.newaxis, :], out=r)
return r | calculate the pearson correlation matrix for the columns of x (with dimensions MxN), or optionally, the pearson correlaton matrix
between x and y (with dimensions OxP). If destination is provided, put the results there.
In the language of statistics the columns are the variables and the rows are the observations.
Args:
x (numpy array-like) MxN in shape
y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y)
destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy
memmap of a file)
returns (numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provied, shape is NxP | Below is the the instruction that describes the task:
### Input:
calculate the pearson correlation matrix for the columns of x (with dimensions MxN), or optionally, the pearson correlaton matrix
between x and y (with dimensions OxP). If destination is provided, put the results there.
In the language of statistics the columns are the variables and the rows are the observations.
Args:
x (numpy array-like) MxN in shape
y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y)
destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy
memmap of a file)
returns (numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provied, shape is NxP
### Response:
def fast_corr(x, y=None, destination=None):
"""calculate the pearson correlation matrix for the columns of x (with dimensions MxN), or optionally, the pearson correlaton matrix
between x and y (with dimensions OxP). If destination is provided, put the results there.
In the language of statistics the columns are the variables and the rows are the observations.
Args:
x (numpy array-like) MxN in shape
y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y)
destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy
memmap of a file)
returns (numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provied, shape is NxP
"""
if y is None:
y = x
r = fast_cov.fast_cov(x, y, destination)
std_x = numpy.std(x, axis=0, ddof=1)
std_y = numpy.std(y, axis=0, ddof=1)
numpy.divide(r, std_x[:, numpy.newaxis], out=r)
numpy.divide(r, std_y[numpy.newaxis, :], out=r)
return r |
def get_transformer_encoder(config: transformer.TransformerConfig, prefix: str) -> 'Encoder':
"""
Returns a Transformer encoder, consisting of an embedding layer with
positional encodings and a TransformerEncoder instance.
:param config: Configuration for transformer encoder.
:param prefix: Prefix for variable names.
:return: Encoder instance.
"""
encoder_seq = EncoderSequence([], dtype=config.dtype)
cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type,
config.model_size,
config.max_seq_len_source,
fixed_pos_embed_scale_up_input=True,
fixed_pos_embed_scale_down_positions=False,
prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX)
encoder_seq.append(cls, **encoder_params)
if config.conv_config is not None:
encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config,
prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX)
encoder_seq.append(TransformerEncoder, config=config, prefix=prefix + C.TRANSFORMER_ENCODER_PREFIX)
return encoder_seq | Returns a Transformer encoder, consisting of an embedding layer with
positional encodings and a TransformerEncoder instance.
:param config: Configuration for transformer encoder.
:param prefix: Prefix for variable names.
:return: Encoder instance. | Below is the the instruction that describes the task:
### Input:
Returns a Transformer encoder, consisting of an embedding layer with
positional encodings and a TransformerEncoder instance.
:param config: Configuration for transformer encoder.
:param prefix: Prefix for variable names.
:return: Encoder instance.
### Response:
def get_transformer_encoder(config: transformer.TransformerConfig, prefix: str) -> 'Encoder':
"""
Returns a Transformer encoder, consisting of an embedding layer with
positional encodings and a TransformerEncoder instance.
:param config: Configuration for transformer encoder.
:param prefix: Prefix for variable names.
:return: Encoder instance.
"""
encoder_seq = EncoderSequence([], dtype=config.dtype)
cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type,
config.model_size,
config.max_seq_len_source,
fixed_pos_embed_scale_up_input=True,
fixed_pos_embed_scale_down_positions=False,
prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX)
encoder_seq.append(cls, **encoder_params)
if config.conv_config is not None:
encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config,
prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX)
encoder_seq.append(TransformerEncoder, config=config, prefix=prefix + C.TRANSFORMER_ENCODER_PREFIX)
return encoder_seq |
def classifymetagenome(self):
"""Run the classify metagenome of the CLARK package on the samples"""
logging.info('Classifying metagenomes')
# Define the system call
self.classifycall = 'cd {} && ./classify_metagenome.sh -O {} -R {} -n {} --light'\
.format(self.clarkpath,
self.filelist,
self.reportlist,
self.cpus)
# Variable to store classification state
classify = True
for sample in self.runmetadata.samples:
try:
# Define the name of the .csv classification file
sample.general.classification = sample.general.combined.split('.')[0] + '.csv'
# If the file exists, then set classify to False
if os.path.isfile(sample.general.classification):
classify = False
except KeyError:
pass
# Run the system call if the samples have not been classified
if classify:
# Run the call
subprocess.call(self.classifycall, shell=True, stdout=self.devnull, stderr=self.devnull) | Run the classify metagenome of the CLARK package on the samples | Below is the the instruction that describes the task:
### Input:
Run the classify metagenome of the CLARK package on the samples
### Response:
def classifymetagenome(self):
"""Run the classify metagenome of the CLARK package on the samples"""
logging.info('Classifying metagenomes')
# Define the system call
self.classifycall = 'cd {} && ./classify_metagenome.sh -O {} -R {} -n {} --light'\
.format(self.clarkpath,
self.filelist,
self.reportlist,
self.cpus)
# Variable to store classification state
classify = True
for sample in self.runmetadata.samples:
try:
# Define the name of the .csv classification file
sample.general.classification = sample.general.combined.split('.')[0] + '.csv'
# If the file exists, then set classify to False
if os.path.isfile(sample.general.classification):
classify = False
except KeyError:
pass
# Run the system call if the samples have not been classified
if classify:
# Run the call
subprocess.call(self.classifycall, shell=True, stdout=self.devnull, stderr=self.devnull) |
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if sys.version_info < (3, 3):
raise NotImplementedError("replace() is only available "
"with Python 3.3 and later")
self._accessor.replace(self, target) | Rename this path to the given path, clobbering the existing
destination if it exists. | Below is the the instruction that describes the task:
### Input:
Rename this path to the given path, clobbering the existing
destination if it exists.
### Response:
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if sys.version_info < (3, 3):
raise NotImplementedError("replace() is only available "
"with Python 3.3 and later")
self._accessor.replace(self, target) |
def download_videos(self, path, since=None, camera='all', stop=10):
"""
Download all videos from server since specified time.
:param path: Path to write files. /path/<cameraname>_<recorddate>.mp4
:param since: Date and time to get videos from.
Ex: "2018/07/28 12:33:00" to retrieve videos since
July 28th 2018 at 12:33:00
:param camera: Camera name to retrieve. Defaults to "all".
Use a list for multiple cameras.
:param stop: Page to stop on (~25 items per page. Default page 10).
"""
if since is None:
since_epochs = self.last_refresh
else:
parsed_datetime = parse(since, fuzzy=True)
since_epochs = parsed_datetime.timestamp()
formatted_date = get_time(time_to_convert=since_epochs)
_LOGGER.info("Retrieving videos since %s", formatted_date)
if not isinstance(camera, list):
camera = [camera]
for page in range(1, stop):
response = api.request_videos(self, time=since_epochs, page=page)
_LOGGER.debug("Processing page %s", page)
try:
result = response['videos']
if not result:
raise IndexError
except (KeyError, IndexError):
_LOGGER.info("No videos found on page %s. Exiting.", page)
break
self._parse_downloaded_items(result, camera, path) | Download all videos from server since specified time.
:param path: Path to write files. /path/<cameraname>_<recorddate>.mp4
:param since: Date and time to get videos from.
Ex: "2018/07/28 12:33:00" to retrieve videos since
July 28th 2018 at 12:33:00
:param camera: Camera name to retrieve. Defaults to "all".
Use a list for multiple cameras.
:param stop: Page to stop on (~25 items per page. Default page 10). | Below is the the instruction that describes the task:
### Input:
Download all videos from server since specified time.
:param path: Path to write files. /path/<cameraname>_<recorddate>.mp4
:param since: Date and time to get videos from.
Ex: "2018/07/28 12:33:00" to retrieve videos since
July 28th 2018 at 12:33:00
:param camera: Camera name to retrieve. Defaults to "all".
Use a list for multiple cameras.
:param stop: Page to stop on (~25 items per page. Default page 10).
### Response:
def download_videos(self, path, since=None, camera='all', stop=10):
"""
Download all videos from server since specified time.
:param path: Path to write files. /path/<cameraname>_<recorddate>.mp4
:param since: Date and time to get videos from.
Ex: "2018/07/28 12:33:00" to retrieve videos since
July 28th 2018 at 12:33:00
:param camera: Camera name to retrieve. Defaults to "all".
Use a list for multiple cameras.
:param stop: Page to stop on (~25 items per page. Default page 10).
"""
if since is None:
since_epochs = self.last_refresh
else:
parsed_datetime = parse(since, fuzzy=True)
since_epochs = parsed_datetime.timestamp()
formatted_date = get_time(time_to_convert=since_epochs)
_LOGGER.info("Retrieving videos since %s", formatted_date)
if not isinstance(camera, list):
camera = [camera]
for page in range(1, stop):
response = api.request_videos(self, time=since_epochs, page=page)
_LOGGER.debug("Processing page %s", page)
try:
result = response['videos']
if not result:
raise IndexError
except (KeyError, IndexError):
_LOGGER.info("No videos found on page %s. Exiting.", page)
break
self._parse_downloaded_items(result, camera, path) |
def _check_and_handle_includes(self, from_file):
"""Look for an optional INCLUDE section in the given file path. If
the parser set `paths`, it is cleared so that they do not keep
showing up when additional files are parsed.
"""
logger.debug("Check/handle includes from %s", from_file)
try:
paths = self._parser.get("INCLUDE", "paths")
except (config_parser.NoSectionError,
config_parser.NoOptionError) as exc:
logger.debug("_check_and_handle_includes: EXCEPTION: %s", exc)
return
paths_lines = [p.strip() for p in paths.split("\n")]
logger.debug("paths = %s (wanted just once; CLEARING)", paths_lines)
self._parser.remove_option("INCLUDE", "paths")
for f in paths_lines:
abspath = (f if os.path.isabs(f) else
os.path.abspath(
os.path.join(os.path.dirname(from_file), f)))
use_path = os.path.normpath(abspath)
if use_path in self._parsed_files:
raise RecursionInConfigFile("In %s: %s already read",
from_file, use_path)
self._parsed_files.append(use_path)
self._handle_rc_file(use_path) | Look for an optional INCLUDE section in the given file path. If
the parser set `paths`, it is cleared so that they do not keep
showing up when additional files are parsed. | Below is the the instruction that describes the task:
### Input:
Look for an optional INCLUDE section in the given file path. If
the parser set `paths`, it is cleared so that they do not keep
showing up when additional files are parsed.
### Response:
def _check_and_handle_includes(self, from_file):
"""Look for an optional INCLUDE section in the given file path. If
the parser set `paths`, it is cleared so that they do not keep
showing up when additional files are parsed.
"""
logger.debug("Check/handle includes from %s", from_file)
try:
paths = self._parser.get("INCLUDE", "paths")
except (config_parser.NoSectionError,
config_parser.NoOptionError) as exc:
logger.debug("_check_and_handle_includes: EXCEPTION: %s", exc)
return
paths_lines = [p.strip() for p in paths.split("\n")]
logger.debug("paths = %s (wanted just once; CLEARING)", paths_lines)
self._parser.remove_option("INCLUDE", "paths")
for f in paths_lines:
abspath = (f if os.path.isabs(f) else
os.path.abspath(
os.path.join(os.path.dirname(from_file), f)))
use_path = os.path.normpath(abspath)
if use_path in self._parsed_files:
raise RecursionInConfigFile("In %s: %s already read",
from_file, use_path)
self._parsed_files.append(use_path)
self._handle_rc_file(use_path) |
def get_arcs(analysis):
"""
Hit stats for each branch.
Returns a flat list where every four values represent a branch:
1. line-number
2. block-number (not used)
3. branch-number
4. hits (we only get 1/0 from coverage.py)
"""
if not analysis.has_arcs():
return None
branch_lines = analysis.branch_lines()
branches = []
for l1, l2 in analysis.arcs_executed():
if l1 in branch_lines:
branches.extend((l1, 0, abs(l2), 1))
for l1, l2 in analysis.arcs_missing():
if l1 in branch_lines:
branches.extend((l1, 0, abs(l2), 0))
return branches | Hit stats for each branch.
Returns a flat list where every four values represent a branch:
1. line-number
2. block-number (not used)
3. branch-number
4. hits (we only get 1/0 from coverage.py) | Below is the the instruction that describes the task:
### Input:
Hit stats for each branch.
Returns a flat list where every four values represent a branch:
1. line-number
2. block-number (not used)
3. branch-number
4. hits (we only get 1/0 from coverage.py)
### Response:
def get_arcs(analysis):
"""
Hit stats for each branch.
Returns a flat list where every four values represent a branch:
1. line-number
2. block-number (not used)
3. branch-number
4. hits (we only get 1/0 from coverage.py)
"""
if not analysis.has_arcs():
return None
branch_lines = analysis.branch_lines()
branches = []
for l1, l2 in analysis.arcs_executed():
if l1 in branch_lines:
branches.extend((l1, 0, abs(l2), 1))
for l1, l2 in analysis.arcs_missing():
if l1 in branch_lines:
branches.extend((l1, 0, abs(l2), 0))
return branches |
def get_file_checksums(url, ftp=None):
"""Download and parse an Ensembl CHECKSUMS file and obtain checksums.
Parameters
----------
url : str
The URL of the CHECKSUM file.
ftp : `ftplib.FTP` or `None`, optional
An FTP connection.
Returns
-------
`collections.OrderedDict`
An ordered dictionary containing file names as keys and checksums as
values.
Notes
-----
The checksums contains in Ensembl CHECKSUM files are obtained with the
UNIX `sum` command.
"""
assert isinstance(url, (str, _oldstr))
if ftp is not None:
assert isinstance(ftp, ftplib.FTP)
# open FTP connection if necessary
close_connection = False
ftp_server = 'ftp.ensembl.org'
ftp_user = 'anonymous'
if ftp is None:
ftp = ftplib.FTP(ftp_server)
ftp.login(ftp_user)
close_connection = True
# download and parse CHECKSUM file
data = []
ftp.retrbinary('RETR %s' % url, data.append)
data = ''.join(d.decode('utf-8') for d in data).split('\n')[:-1]
file_checksums = OrderedDict()
for d in data:
file_name = d[(d.rindex(' ') + 1):]
sum_ = int(d[:d.index(' ')])
file_checksums[file_name] = sum_
logger.debug('Obtained checksums for %d files', len(file_checksums))
# close FTP connection if we opened it
if close_connection:
ftp.close()
return file_checksums | Download and parse an Ensembl CHECKSUMS file and obtain checksums.
Parameters
----------
url : str
The URL of the CHECKSUM file.
ftp : `ftplib.FTP` or `None`, optional
An FTP connection.
Returns
-------
`collections.OrderedDict`
An ordered dictionary containing file names as keys and checksums as
values.
Notes
-----
The checksums contains in Ensembl CHECKSUM files are obtained with the
UNIX `sum` command. | Below is the the instruction that describes the task:
### Input:
Download and parse an Ensembl CHECKSUMS file and obtain checksums.
Parameters
----------
url : str
The URL of the CHECKSUM file.
ftp : `ftplib.FTP` or `None`, optional
An FTP connection.
Returns
-------
`collections.OrderedDict`
An ordered dictionary containing file names as keys and checksums as
values.
Notes
-----
The checksums contains in Ensembl CHECKSUM files are obtained with the
UNIX `sum` command.
### Response:
def get_file_checksums(url, ftp=None):
"""Download and parse an Ensembl CHECKSUMS file and obtain checksums.
Parameters
----------
url : str
The URL of the CHECKSUM file.
ftp : `ftplib.FTP` or `None`, optional
An FTP connection.
Returns
-------
`collections.OrderedDict`
An ordered dictionary containing file names as keys and checksums as
values.
Notes
-----
The checksums contains in Ensembl CHECKSUM files are obtained with the
UNIX `sum` command.
"""
assert isinstance(url, (str, _oldstr))
if ftp is not None:
assert isinstance(ftp, ftplib.FTP)
# open FTP connection if necessary
close_connection = False
ftp_server = 'ftp.ensembl.org'
ftp_user = 'anonymous'
if ftp is None:
ftp = ftplib.FTP(ftp_server)
ftp.login(ftp_user)
close_connection = True
# download and parse CHECKSUM file
data = []
ftp.retrbinary('RETR %s' % url, data.append)
data = ''.join(d.decode('utf-8') for d in data).split('\n')[:-1]
file_checksums = OrderedDict()
for d in data:
file_name = d[(d.rindex(' ') + 1):]
sum_ = int(d[:d.index(' ')])
file_checksums[file_name] = sum_
logger.debug('Obtained checksums for %d files', len(file_checksums))
# close FTP connection if we opened it
if close_connection:
ftp.close()
return file_checksums |
def grant_jsapi_ticket(self):
"""
获取 jsapi ticket 并更新当前配置
:return: 返回的 JSON 数据包 (传入 jsapi_ticket_refreshfunc 参数后返回 None)
"""
self._check_appid_appsecret()
if callable(self.__jsapi_ticket_refreshfunc):
self.__jsapi_ticket, self.__jsapi_ticket_expires_at = self.__jsapi_ticket_refreshfunc()
return
response_json = self.__request.get(
url="https://api.weixin.qq.com/cgi-bin/ticket/getticket",
params={
"type": "jsapi",
},
access_token=self.access_token,
)
self.__jsapi_ticket = response_json['ticket']
self.__jsapi_ticket_expires_at = int(time.time()) + response_json['expires_in']
if callable(self.__jsapi_ticket_setfunc):
self.__jsapi_ticket_setfunc(self.__jsapi_ticket, self.__jsapi_ticket_expires_at)
return response_json | 获取 jsapi ticket 并更新当前配置
:return: 返回的 JSON 数据包 (传入 jsapi_ticket_refreshfunc 参数后返回 None) | Below is the the instruction that describes the task:
### Input:
获取 jsapi ticket 并更新当前配置
:return: 返回的 JSON 数据包 (传入 jsapi_ticket_refreshfunc 参数后返回 None)
### Response:
def grant_jsapi_ticket(self):
"""
获取 jsapi ticket 并更新当前配置
:return: 返回的 JSON 数据包 (传入 jsapi_ticket_refreshfunc 参数后返回 None)
"""
self._check_appid_appsecret()
if callable(self.__jsapi_ticket_refreshfunc):
self.__jsapi_ticket, self.__jsapi_ticket_expires_at = self.__jsapi_ticket_refreshfunc()
return
response_json = self.__request.get(
url="https://api.weixin.qq.com/cgi-bin/ticket/getticket",
params={
"type": "jsapi",
},
access_token=self.access_token,
)
self.__jsapi_ticket = response_json['ticket']
self.__jsapi_ticket_expires_at = int(time.time()) + response_json['expires_in']
if callable(self.__jsapi_ticket_setfunc):
self.__jsapi_ticket_setfunc(self.__jsapi_ticket, self.__jsapi_ticket_expires_at)
return response_json |
def cleanup(self):
"""Remove expired associations.
@return: tuple of (removed associations, remaining associations)
"""
remove = []
for handle, assoc in self.assocs.items():
if assoc.expiresIn == 0:
remove.append(handle)
for handle in remove:
del self.assocs[handle]
return len(remove), len(self.assocs) | Remove expired associations.
@return: tuple of (removed associations, remaining associations) | Below is the the instruction that describes the task:
### Input:
Remove expired associations.
@return: tuple of (removed associations, remaining associations)
### Response:
def cleanup(self):
"""Remove expired associations.
@return: tuple of (removed associations, remaining associations)
"""
remove = []
for handle, assoc in self.assocs.items():
if assoc.expiresIn == 0:
remove.append(handle)
for handle in remove:
del self.assocs[handle]
return len(remove), len(self.assocs) |
def cli(ctx, email, first_name, last_name, password, metadata={}):
"""Update an existing user
Output:
a dictionary containing user information
"""
return ctx.gi.users.update_user(email, first_name, last_name, password, metadata=metadata) | Update an existing user
Output:
a dictionary containing user information | Below is the the instruction that describes the task:
### Input:
Update an existing user
Output:
a dictionary containing user information
### Response:
def cli(ctx, email, first_name, last_name, password, metadata={}):
"""Update an existing user
Output:
a dictionary containing user information
"""
return ctx.gi.users.update_user(email, first_name, last_name, password, metadata=metadata) |
def get_partial_text(fulltext):
"""Return a short version of the fulltext used with partial matching mode.
The version is composed of 20% in the beginning and 20% in the middle of
the text.
"""
def _get_index(x):
return int(float(x) / 100 * len(fulltext))
partial_text = [
fulltext[_get_index(start):_get_index(end)]
for start, end in current_app.config[
"CLASSIFIER_PARTIAL_TEXT_PERCENTAGES"
]
]
return "\n".join(partial_text) | Return a short version of the fulltext used with partial matching mode.
The version is composed of 20% in the beginning and 20% in the middle of
the text. | Below is the the instruction that describes the task:
### Input:
Return a short version of the fulltext used with partial matching mode.
The version is composed of 20% in the beginning and 20% in the middle of
the text.
### Response:
def get_partial_text(fulltext):
"""Return a short version of the fulltext used with partial matching mode.
The version is composed of 20% in the beginning and 20% in the middle of
the text.
"""
def _get_index(x):
return int(float(x) / 100 * len(fulltext))
partial_text = [
fulltext[_get_index(start):_get_index(end)]
for start, end in current_app.config[
"CLASSIFIER_PARTIAL_TEXT_PERCENTAGES"
]
]
return "\n".join(partial_text) |
def _file_size(file_path, uncompressed=False):
"""Return size of a single file, compressed or uncompressed"""
_, ext = os.path.splitext(file_path)
if uncompressed:
if ext in {".gz", ".gzip"}:
with gzip.GzipFile(file_path, mode="rb") as fp:
try:
fp.seek(0, os.SEEK_END)
return fp.tell()
except ValueError:
# on python2, cannot seek from end and must instead read to end
fp.seek(0)
while len(fp.read(8192)) != 0:
pass
return fp.tell()
elif ext in {".bz", ".bz2", ".bzip", ".bzip2"}:
with bz2.BZ2File(file_path, mode="rb") as fp:
fp.seek(0, os.SEEK_END)
return fp.tell()
return os.path.getsize(file_path) | Return size of a single file, compressed or uncompressed | Below is the the instruction that describes the task:
### Input:
Return size of a single file, compressed or uncompressed
### Response:
def _file_size(file_path, uncompressed=False):
"""Return size of a single file, compressed or uncompressed"""
_, ext = os.path.splitext(file_path)
if uncompressed:
if ext in {".gz", ".gzip"}:
with gzip.GzipFile(file_path, mode="rb") as fp:
try:
fp.seek(0, os.SEEK_END)
return fp.tell()
except ValueError:
# on python2, cannot seek from end and must instead read to end
fp.seek(0)
while len(fp.read(8192)) != 0:
pass
return fp.tell()
elif ext in {".bz", ".bz2", ".bzip", ".bzip2"}:
with bz2.BZ2File(file_path, mode="rb") as fp:
fp.seek(0, os.SEEK_END)
return fp.tell()
return os.path.getsize(file_path) |
def push_rule_nodes(self) -> bool:
"""Push context variable to store rule nodes."""
if self.rule_nodes is None:
self.rule_nodes = collections.ChainMap()
self.tag_cache = collections.ChainMap()
self.id_cache = collections.ChainMap()
else:
self.rule_nodes = self.rule_nodes.new_child()
self.tag_cache = self.tag_cache.new_child()
self.id_cache = self.id_cache.new_child()
return True | Push context variable to store rule nodes. | Below is the the instruction that describes the task:
### Input:
Push context variable to store rule nodes.
### Response:
def push_rule_nodes(self) -> bool:
"""Push context variable to store rule nodes."""
if self.rule_nodes is None:
self.rule_nodes = collections.ChainMap()
self.tag_cache = collections.ChainMap()
self.id_cache = collections.ChainMap()
else:
self.rule_nodes = self.rule_nodes.new_child()
self.tag_cache = self.tag_cache.new_child()
self.id_cache = self.id_cache.new_child()
return True |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a text file-like object using a pyparsing definition.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
if not self.LINE_STRUCTURES:
raise errors.UnableToParseFile('Missing line structures.')
encoding = self._ENCODING or parser_mediator.codepage
text_reader = EncodedTextReader(
encoding, buffer_size=self.BUFFER_SIZE)
text_reader.Reset()
try:
text_reader.ReadLines(file_object)
except UnicodeDecodeError as exception:
raise errors.UnableToParseFile(
'Not a text file, with error: {0!s}'.format(exception))
if not self.VerifyStructure(parser_mediator, text_reader.lines):
raise errors.UnableToParseFile('Wrong file structure.')
# Using parseWithTabs() overrides Pyparsing's default replacement of tabs
# with spaces to SkipAhead() the correct number of bytes after a match.
for key, structure in self.LINE_STRUCTURES:
structure.parseWithTabs()
consecutive_line_failures = 0
# Read every line in the text file.
while text_reader.lines:
if parser_mediator.abort:
break
# Initialize pyparsing objects.
tokens = None
start = 0
end = 0
key = None
index = None
# Try to parse the line using all the line structures.
for index, (key, structure) in enumerate(self._line_structures):
try:
structure_generator = structure.scanString(
text_reader.lines, maxMatches=1)
parsed_structure = next(structure_generator, None)
except pyparsing.ParseException:
parsed_structure = None
if not parsed_structure:
continue
tokens, start, end = parsed_structure
# Only want to parse the structure if it starts
# at the beginning of the buffer.
if start == 0:
break
if tokens and start == 0:
# Move matching key, structure pair to the front of the list, so that
# structures that are more likely to match are tried first.
if index is not None and index != 0:
key_structure = self._line_structures.pop(index)
self._line_structures.insert(0, key_structure)
try:
self.ParseRecord(parser_mediator, key, tokens)
consecutive_line_failures = 0
except (errors.ParseError, errors.TimestampError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse record: {0:s} with error: {1!s}'.format(
key, exception))
text_reader.SkipAhead(file_object, end)
else:
odd_line = text_reader.ReadLine(file_object)
if odd_line:
if len(odd_line) > 80:
odd_line = '{0:s}...'.format(odd_line[:77])
parser_mediator.ProduceExtractionWarning(
'unable to parse log line: {0:s}'.format(repr(odd_line)))
consecutive_line_failures += 1
if (consecutive_line_failures >
self.MAXIMUM_CONSECUTIVE_LINE_FAILURES):
raise errors.UnableToParseFile(
'more than {0:d} consecutive failures to parse lines.'.format(
self.MAXIMUM_CONSECUTIVE_LINE_FAILURES))
try:
text_reader.ReadLines(file_object)
except UnicodeDecodeError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to read lines with error: {0!s}'.format(exception)) | Parses a text file-like object using a pyparsing definition.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. | Below is the the instruction that describes the task:
### Input:
Parses a text file-like object using a pyparsing definition.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
### Response:
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a text file-like object using a pyparsing definition.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
if not self.LINE_STRUCTURES:
raise errors.UnableToParseFile('Missing line structures.')
encoding = self._ENCODING or parser_mediator.codepage
text_reader = EncodedTextReader(
encoding, buffer_size=self.BUFFER_SIZE)
text_reader.Reset()
try:
text_reader.ReadLines(file_object)
except UnicodeDecodeError as exception:
raise errors.UnableToParseFile(
'Not a text file, with error: {0!s}'.format(exception))
if not self.VerifyStructure(parser_mediator, text_reader.lines):
raise errors.UnableToParseFile('Wrong file structure.')
# Using parseWithTabs() overrides Pyparsing's default replacement of tabs
# with spaces to SkipAhead() the correct number of bytes after a match.
for key, structure in self.LINE_STRUCTURES:
structure.parseWithTabs()
consecutive_line_failures = 0
# Read every line in the text file.
while text_reader.lines:
if parser_mediator.abort:
break
# Initialize pyparsing objects.
tokens = None
start = 0
end = 0
key = None
index = None
# Try to parse the line using all the line structures.
for index, (key, structure) in enumerate(self._line_structures):
try:
structure_generator = structure.scanString(
text_reader.lines, maxMatches=1)
parsed_structure = next(structure_generator, None)
except pyparsing.ParseException:
parsed_structure = None
if not parsed_structure:
continue
tokens, start, end = parsed_structure
# Only want to parse the structure if it starts
# at the beginning of the buffer.
if start == 0:
break
if tokens and start == 0:
# Move matching key, structure pair to the front of the list, so that
# structures that are more likely to match are tried first.
if index is not None and index != 0:
key_structure = self._line_structures.pop(index)
self._line_structures.insert(0, key_structure)
try:
self.ParseRecord(parser_mediator, key, tokens)
consecutive_line_failures = 0
except (errors.ParseError, errors.TimestampError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse record: {0:s} with error: {1!s}'.format(
key, exception))
text_reader.SkipAhead(file_object, end)
else:
odd_line = text_reader.ReadLine(file_object)
if odd_line:
if len(odd_line) > 80:
odd_line = '{0:s}...'.format(odd_line[:77])
parser_mediator.ProduceExtractionWarning(
'unable to parse log line: {0:s}'.format(repr(odd_line)))
consecutive_line_failures += 1
if (consecutive_line_failures >
self.MAXIMUM_CONSECUTIVE_LINE_FAILURES):
raise errors.UnableToParseFile(
'more than {0:d} consecutive failures to parse lines.'.format(
self.MAXIMUM_CONSECUTIVE_LINE_FAILURES))
try:
text_reader.ReadLines(file_object)
except UnicodeDecodeError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to read lines with error: {0!s}'.format(exception)) |
def encrypt_password(**kwargs):
""" Crypt :new_value: if it's not crypted yet. """
new_value = kwargs['new_value']
field = kwargs['field']
min_length = field.params['min_length']
if len(new_value) < min_length:
raise ValueError(
'`{}`: Value length must be more than {}'.format(
field.name, field.params['min_length']))
if new_value and not crypt.match(new_value):
new_value = str(crypt.encode(new_value))
return new_value | Crypt :new_value: if it's not crypted yet. | Below is the the instruction that describes the task:
### Input:
Crypt :new_value: if it's not crypted yet.
### Response:
def encrypt_password(**kwargs):
""" Crypt :new_value: if it's not crypted yet. """
new_value = kwargs['new_value']
field = kwargs['field']
min_length = field.params['min_length']
if len(new_value) < min_length:
raise ValueError(
'`{}`: Value length must be more than {}'.format(
field.name, field.params['min_length']))
if new_value and not crypt.match(new_value):
new_value = str(crypt.encode(new_value))
return new_value |
def get_orientation_radians(self):
"""
Returns a dictionary object to represent the current orientation in
radians using the aircraft principal axes of pitch, roll and yaw
"""
raw = self._get_raw_data('fusionPoseValid', 'fusionPose')
if raw is not None:
raw['roll'] = raw.pop('x')
raw['pitch'] = raw.pop('y')
raw['yaw'] = raw.pop('z')
self._last_orientation = raw
return deepcopy(self._last_orientation) | Returns a dictionary object to represent the current orientation in
radians using the aircraft principal axes of pitch, roll and yaw | Below is the the instruction that describes the task:
### Input:
Returns a dictionary object to represent the current orientation in
radians using the aircraft principal axes of pitch, roll and yaw
### Response:
def get_orientation_radians(self):
"""
Returns a dictionary object to represent the current orientation in
radians using the aircraft principal axes of pitch, roll and yaw
"""
raw = self._get_raw_data('fusionPoseValid', 'fusionPose')
if raw is not None:
raw['roll'] = raw.pop('x')
raw['pitch'] = raw.pop('y')
raw['yaw'] = raw.pop('z')
self._last_orientation = raw
return deepcopy(self._last_orientation) |
def apply(self,word,ctx=None):
""" ignore ctx information right now """
chars = get_letters(word)
flag = True #no error assumed
reason = None #no reason
prev_letter = None
for char in chars:
if prev_letter == char:
flag = False
break
prev_letter = char # continue loop
if not flag:
reason = RepeatedLetters.reason
return flag,reason | ignore ctx information right now | Below is the the instruction that describes the task:
### Input:
ignore ctx information right now
### Response:
def apply(self,word,ctx=None):
""" ignore ctx information right now """
chars = get_letters(word)
flag = True #no error assumed
reason = None #no reason
prev_letter = None
for char in chars:
if prev_letter == char:
flag = False
break
prev_letter = char # continue loop
if not flag:
reason = RepeatedLetters.reason
return flag,reason |
def _set_proto_vrrpv3(self, v, load=False):
"""
Setter method for proto_vrrpv3, mapped from YANG variable /rbridge_id/ipv6/proto_vrrpv3 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_proto_vrrpv3 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_proto_vrrpv3() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=proto_vrrpv3.proto_vrrpv3, is_container='container', presence=False, yang_name="proto-vrrpv3", rest_name="protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpv3GlobalConf', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'alt-name': u'protocol'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """proto_vrrpv3 must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=proto_vrrpv3.proto_vrrpv3, is_container='container', presence=False, yang_name="proto-vrrpv3", rest_name="protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpv3GlobalConf', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'alt-name': u'protocol'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True)""",
})
self.__proto_vrrpv3 = t
if hasattr(self, '_set'):
self._set() | Setter method for proto_vrrpv3, mapped from YANG variable /rbridge_id/ipv6/proto_vrrpv3 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_proto_vrrpv3 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_proto_vrrpv3() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for proto_vrrpv3, mapped from YANG variable /rbridge_id/ipv6/proto_vrrpv3 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_proto_vrrpv3 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_proto_vrrpv3() directly.
### Response:
def _set_proto_vrrpv3(self, v, load=False):
"""
Setter method for proto_vrrpv3, mapped from YANG variable /rbridge_id/ipv6/proto_vrrpv3 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_proto_vrrpv3 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_proto_vrrpv3() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=proto_vrrpv3.proto_vrrpv3, is_container='container', presence=False, yang_name="proto-vrrpv3", rest_name="protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpv3GlobalConf', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'alt-name': u'protocol'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """proto_vrrpv3 must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=proto_vrrpv3.proto_vrrpv3, is_container='container', presence=False, yang_name="proto-vrrpv3", rest_name="protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpv3GlobalConf', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'alt-name': u'protocol'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True)""",
})
self.__proto_vrrpv3 = t
if hasattr(self, '_set'):
self._set() |
def _delay_for_ratelimits(cls, start):
"""If request was shorter than max request time, delay"""
stop = datetime.now()
duration_microseconds = (stop - start).microseconds
if duration_microseconds < cls.REQUEST_TIME_MICROSECONDS:
time.sleep((cls.REQUEST_TIME_MICROSECONDS - duration_microseconds)
/ MICROSECONDS_PER_SECOND) | If request was shorter than max request time, delay | Below is the the instruction that describes the task:
### Input:
If request was shorter than max request time, delay
### Response:
def _delay_for_ratelimits(cls, start):
"""If request was shorter than max request time, delay"""
stop = datetime.now()
duration_microseconds = (stop - start).microseconds
if duration_microseconds < cls.REQUEST_TIME_MICROSECONDS:
time.sleep((cls.REQUEST_TIME_MICROSECONDS - duration_microseconds)
/ MICROSECONDS_PER_SECOND) |
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node) | Check for expressions like type(x) == Y. | Below is the the instruction that describes the task:
### Input:
Check for expressions like type(x) == Y.
### Response:
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node) |
def add_to_object(self, target: object, override: bool = False) -> int:
"""
Add the bindings to the target object
:param target: target to add to
:param override: override existing bindings if they are of type Namespace
:return: number of items actually added
"""
nret = 0
for k, v in self:
key = k.upper()
exists = hasattr(target, key)
if not exists or (override and isinstance(getattr(target, k), (Namespace, _RDFNamespace))):
setattr(target, k, v)
nret += 1
else:
print(f"Warning: {key} is already defined in namespace {target}. Not overridden")
return nret | Add the bindings to the target object
:param target: target to add to
:param override: override existing bindings if they are of type Namespace
:return: number of items actually added | Below is the the instruction that describes the task:
### Input:
Add the bindings to the target object
:param target: target to add to
:param override: override existing bindings if they are of type Namespace
:return: number of items actually added
### Response:
def add_to_object(self, target: object, override: bool = False) -> int:
"""
Add the bindings to the target object
:param target: target to add to
:param override: override existing bindings if they are of type Namespace
:return: number of items actually added
"""
nret = 0
for k, v in self:
key = k.upper()
exists = hasattr(target, key)
if not exists or (override and isinstance(getattr(target, k), (Namespace, _RDFNamespace))):
setattr(target, k, v)
nret += 1
else:
print(f"Warning: {key} is already defined in namespace {target}. Not overridden")
return nret |
def merge_tracks(self, track_indices=None, mode='sum', program=0,
is_drum=False, name='merged', remove_merged=False):
"""
Merge pianorolls of the tracks specified by `track_indices`. The merged
track will have program number as given by `program` and drum indicator
as given by `is_drum`. The merged track will be appended at the end of
the track list.
Parameters
----------
track_indices : list
The indices of tracks to be merged. Defaults to all the tracks.
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of the collected
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among the
collected pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the collected pianorolls has nonzero value at that
pixel; False if all the collected pianorolls are inactive
(zero-valued) at that pixel.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
name : str
A name to be assigned to the merged track. Defaults to 'merged'.
remove_merged : bool
True to remove the source tracks from the track list. False to keep
them. Defaults to False.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set
"""
if mode not in ('max', 'sum', 'any'):
raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.")
merged = self[track_indices].get_merged_pianoroll(mode)
merged_track = Track(merged, program, is_drum, name)
self.append_track(merged_track)
if remove_merged:
self.remove_tracks(track_indices) | Merge pianorolls of the tracks specified by `track_indices`. The merged
track will have program number as given by `program` and drum indicator
as given by `is_drum`. The merged track will be appended at the end of
the track list.
Parameters
----------
track_indices : list
The indices of tracks to be merged. Defaults to all the tracks.
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of the collected
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among the
collected pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the collected pianorolls has nonzero value at that
pixel; False if all the collected pianorolls are inactive
(zero-valued) at that pixel.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
name : str
A name to be assigned to the merged track. Defaults to 'merged'.
remove_merged : bool
True to remove the source tracks from the track list. False to keep
them. Defaults to False.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set | Below is the the instruction that describes the task:
### Input:
Merge pianorolls of the tracks specified by `track_indices`. The merged
track will have program number as given by `program` and drum indicator
as given by `is_drum`. The merged track will be appended at the end of
the track list.
Parameters
----------
track_indices : list
The indices of tracks to be merged. Defaults to all the tracks.
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of the collected
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among the
collected pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the collected pianorolls has nonzero value at that
pixel; False if all the collected pianorolls are inactive
(zero-valued) at that pixel.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
name : str
A name to be assigned to the merged track. Defaults to 'merged'.
remove_merged : bool
True to remove the source tracks from the track list. False to keep
them. Defaults to False.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set
### Response:
def merge_tracks(self, track_indices=None, mode='sum', program=0,
is_drum=False, name='merged', remove_merged=False):
"""
Merge pianorolls of the tracks specified by `track_indices`. The merged
track will have program number as given by `program` and drum indicator
as given by `is_drum`. The merged track will be appended at the end of
the track list.
Parameters
----------
track_indices : list
The indices of tracks to be merged. Defaults to all the tracks.
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of the collected
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among the
collected pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the collected pianorolls has nonzero value at that
pixel; False if all the collected pianorolls are inactive
(zero-valued) at that pixel.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
name : str
A name to be assigned to the merged track. Defaults to 'merged'.
remove_merged : bool
True to remove the source tracks from the track list. False to keep
them. Defaults to False.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set
"""
if mode not in ('max', 'sum', 'any'):
raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.")
merged = self[track_indices].get_merged_pianoroll(mode)
merged_track = Track(merged, program, is_drum, name)
self.append_track(merged_track)
if remove_merged:
self.remove_tracks(track_indices) |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(EntropyStatCollector, self).get_default_config()
config.update({
'path': 'entropy'
})
return config | Returns the default collector settings | Below is the the instruction that describes the task:
### Input:
Returns the default collector settings
### Response:
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(EntropyStatCollector, self).get_default_config()
config.update({
'path': 'entropy'
})
return config |
Subsets and Splits