code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def remove_comments_and_docstrings(source):
"""
Returns *source* minus comments and docstrings.
.. note:: Uses Python's built-in tokenize module to great effect.
Example::
def noop(): # This is a comment
'''
Does nothing.
'''
pass # Don't do anything
Will become::
def noop():
pass
"""
io_obj = io.StringIO(source)
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# Remove comments:
if token_type == tokenize.COMMENT:
pass
# This series of conditionals removes docstrings:
elif token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
# Note regarding NEWLINE vs NL: The tokenize module
# differentiates between newlines that start a new statement
# and newlines inside of operators such as parens, brackes,
# and curly braces. Newlines inside of operators are
# NEWLINE and newlines that start new code are NL.
# Catch whole-module docstrings:
if start_col > 0:
# Unlabelled indentation means we're inside an operator
out += token_string
# Note regarding the INDENT token: The tokenize module does
# not label indentation inside of an operator (parens,
# brackets, and curly braces) as actual indentation.
# For example:
# def foo():
# "The spaces before this docstring are tokenize.INDENT"
# test = [
# "The spaces before this string do not get a token"
# ]
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
return out | Returns *source* minus comments and docstrings.
.. note:: Uses Python's built-in tokenize module to great effect.
Example::
def noop(): # This is a comment
'''
Does nothing.
'''
pass # Don't do anything
Will become::
def noop():
pass | Below is the the instruction that describes the task:
### Input:
Returns *source* minus comments and docstrings.
.. note:: Uses Python's built-in tokenize module to great effect.
Example::
def noop(): # This is a comment
'''
Does nothing.
'''
pass # Don't do anything
Will become::
def noop():
pass
### Response:
def remove_comments_and_docstrings(source):
"""
Returns *source* minus comments and docstrings.
.. note:: Uses Python's built-in tokenize module to great effect.
Example::
def noop(): # This is a comment
'''
Does nothing.
'''
pass # Don't do anything
Will become::
def noop():
pass
"""
io_obj = io.StringIO(source)
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# Remove comments:
if token_type == tokenize.COMMENT:
pass
# This series of conditionals removes docstrings:
elif token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
# Note regarding NEWLINE vs NL: The tokenize module
# differentiates between newlines that start a new statement
# and newlines inside of operators such as parens, brackes,
# and curly braces. Newlines inside of operators are
# NEWLINE and newlines that start new code are NL.
# Catch whole-module docstrings:
if start_col > 0:
# Unlabelled indentation means we're inside an operator
out += token_string
# Note regarding the INDENT token: The tokenize module does
# not label indentation inside of an operator (parens,
# brackets, and curly braces) as actual indentation.
# For example:
# def foo():
# "The spaces before this docstring are tokenize.INDENT"
# test = [
# "The spaces before this string do not get a token"
# ]
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
return out |
def _ref(self, param, base_name=None):
"""
Store a parameter schema and return a reference to it.
:param schema:
Swagger parameter definition.
:param base_name:
Name that should be used for the reference.
:rtype: dict
:returns: JSON pointer to the original parameter definition.
"""
name = base_name or param.get('title', '') or param.get('name', '')
pointer = self.json_pointer + name
self.parameter_registry[name] = param
return {'$ref': pointer} | Store a parameter schema and return a reference to it.
:param schema:
Swagger parameter definition.
:param base_name:
Name that should be used for the reference.
:rtype: dict
:returns: JSON pointer to the original parameter definition. | Below is the the instruction that describes the task:
### Input:
Store a parameter schema and return a reference to it.
:param schema:
Swagger parameter definition.
:param base_name:
Name that should be used for the reference.
:rtype: dict
:returns: JSON pointer to the original parameter definition.
### Response:
def _ref(self, param, base_name=None):
"""
Store a parameter schema and return a reference to it.
:param schema:
Swagger parameter definition.
:param base_name:
Name that should be used for the reference.
:rtype: dict
:returns: JSON pointer to the original parameter definition.
"""
name = base_name or param.get('title', '') or param.get('name', '')
pointer = self.json_pointer + name
self.parameter_registry[name] = param
return {'$ref': pointer} |
def byte_to_bitstring(byte):
"""Convert one byte to a list of bits"""
assert 0 <= byte <= 0xff
bits = [int(x) for x in list(bin(byte + 0x100)[3:])]
return bits | Convert one byte to a list of bits | Below is the the instruction that describes the task:
### Input:
Convert one byte to a list of bits
### Response:
def byte_to_bitstring(byte):
"""Convert one byte to a list of bits"""
assert 0 <= byte <= 0xff
bits = [int(x) for x in list(bin(byte + 0x100)[3:])]
return bits |
def select_star_cb(self, widget, res_dict):
"""This method is called when the user selects a star from the table.
"""
keys = list(res_dict.keys())
if len(keys) == 0:
self.selected = []
self.replot_stars()
else:
idx = int(keys[0])
star = self.starlist[idx]
if not self._select_flag:
self.mark_selection(star, fromtable=True)
return True | This method is called when the user selects a star from the table. | Below is the the instruction that describes the task:
### Input:
This method is called when the user selects a star from the table.
### Response:
def select_star_cb(self, widget, res_dict):
"""This method is called when the user selects a star from the table.
"""
keys = list(res_dict.keys())
if len(keys) == 0:
self.selected = []
self.replot_stars()
else:
idx = int(keys[0])
star = self.starlist[idx]
if not self._select_flag:
self.mark_selection(star, fromtable=True)
return True |
def get_factory_bundle(self, name):
# type: (str) -> Bundle
"""
Retrieves the Pelix Bundle object that registered the given factory
:param name: The name of a factory
:return: The Bundle that registered the given factory
:raise ValueError: Invalid factory
"""
with self.__factories_lock:
try:
factory = self.__factories[name]
except KeyError:
raise ValueError("Unknown factory '{0}'".format(name))
else:
# Bundle Context is stored in the Factory Context
factory_context = getattr(
factory, constants.IPOPO_FACTORY_CONTEXT
)
return factory_context.bundle_context.get_bundle() | Retrieves the Pelix Bundle object that registered the given factory
:param name: The name of a factory
:return: The Bundle that registered the given factory
:raise ValueError: Invalid factory | Below is the the instruction that describes the task:
### Input:
Retrieves the Pelix Bundle object that registered the given factory
:param name: The name of a factory
:return: The Bundle that registered the given factory
:raise ValueError: Invalid factory
### Response:
def get_factory_bundle(self, name):
# type: (str) -> Bundle
"""
Retrieves the Pelix Bundle object that registered the given factory
:param name: The name of a factory
:return: The Bundle that registered the given factory
:raise ValueError: Invalid factory
"""
with self.__factories_lock:
try:
factory = self.__factories[name]
except KeyError:
raise ValueError("Unknown factory '{0}'".format(name))
else:
# Bundle Context is stored in the Factory Context
factory_context = getattr(
factory, constants.IPOPO_FACTORY_CONTEXT
)
return factory_context.bundle_context.get_bundle() |
def _combine_variants(in_vcfs, out_file, ref_file, config):
"""Combine variant files, writing the header from the first non-empty input.
in_vcfs is a list with each item starting with the chromosome regions,
and ending with the input file.
We sort by these regions to ensure the output file is in the expected order.
"""
in_vcfs.sort()
wrote_header = False
with open(out_file, "w") as out_handle:
for in_vcf in (x[-1] for x in in_vcfs):
with open(in_vcf) as in_handle:
header = list(itertools.takewhile(lambda x: x.startswith("#"),
in_handle))
if not header[0].startswith("##fileformat=VCFv4"):
raise ValueError("Unexpected VCF file: %s" % in_vcf)
for line in in_handle:
if not wrote_header:
wrote_header = True
out_handle.write("".join(header))
out_handle.write(line)
if not wrote_header:
out_handle.write("".join(header))
return out_file | Combine variant files, writing the header from the first non-empty input.
in_vcfs is a list with each item starting with the chromosome regions,
and ending with the input file.
We sort by these regions to ensure the output file is in the expected order. | Below is the the instruction that describes the task:
### Input:
Combine variant files, writing the header from the first non-empty input.
in_vcfs is a list with each item starting with the chromosome regions,
and ending with the input file.
We sort by these regions to ensure the output file is in the expected order.
### Response:
def _combine_variants(in_vcfs, out_file, ref_file, config):
"""Combine variant files, writing the header from the first non-empty input.
in_vcfs is a list with each item starting with the chromosome regions,
and ending with the input file.
We sort by these regions to ensure the output file is in the expected order.
"""
in_vcfs.sort()
wrote_header = False
with open(out_file, "w") as out_handle:
for in_vcf in (x[-1] for x in in_vcfs):
with open(in_vcf) as in_handle:
header = list(itertools.takewhile(lambda x: x.startswith("#"),
in_handle))
if not header[0].startswith("##fileformat=VCFv4"):
raise ValueError("Unexpected VCF file: %s" % in_vcf)
for line in in_handle:
if not wrote_header:
wrote_header = True
out_handle.write("".join(header))
out_handle.write(line)
if not wrote_header:
out_handle.write("".join(header))
return out_file |
def print_treemap(self, format=None, output=sys.stdout, **kwargs):
"""
Print the matrix for self's nodes.
Args:
format (str): output format (csv, json or text).
output (file): file descriptor on which to write.
"""
treemap = self.as_treemap()
treemap.print(format=format, output=output, **kwargs) | Print the matrix for self's nodes.
Args:
format (str): output format (csv, json or text).
output (file): file descriptor on which to write. | Below is the the instruction that describes the task:
### Input:
Print the matrix for self's nodes.
Args:
format (str): output format (csv, json or text).
output (file): file descriptor on which to write.
### Response:
def print_treemap(self, format=None, output=sys.stdout, **kwargs):
"""
Print the matrix for self's nodes.
Args:
format (str): output format (csv, json or text).
output (file): file descriptor on which to write.
"""
treemap = self.as_treemap()
treemap.print(format=format, output=output, **kwargs) |
def build(self, X, Y, w=None, edges=None):
""" Assigns data to this object and builds the Morse-Smale
Complex
@ In, X, an m-by-n array of values specifying m
n-dimensional samples
@ In, Y, a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In, w, an optional m vector of values specifying the
weights associated to each of the m samples used. Default of
None means all points will be equally weighted
@ In, edges, an optional list of custom edges to use as a
starting point for pruning, or in place of a computed graph.
"""
super(ContourTree, self).build(X, Y, w, edges)
# Build the join and split trees that we will merge into the
# contour tree
joinTree = MergeTree(debug=self.debug)
splitTree = MergeTree(debug=self.debug)
joinTree.build_for_contour_tree(self, True)
splitTree.build_for_contour_tree(self, False)
self.augmentedEdges = dict(joinTree.augmentedEdges)
self.augmentedEdges.update(dict(splitTree.augmentedEdges))
if self.short_circuit:
jt = self._construct_nx_tree(joinTree, splitTree)
st = self._construct_nx_tree(splitTree, joinTree)
else:
jt = self._construct_nx_tree(joinTree)
st = self._construct_nx_tree(splitTree)
self._process_tree(jt, st)
self._process_tree(st, jt)
# Now we have a fully augmented contour tree stored in nodes and
# edges The rest is some convenience stuff for querying later
self._identifyBranches()
self._identifySuperGraph()
if self.debug:
sys.stdout.write("Sorting Nodes: ")
start = time.clock()
self.sortedNodes = sorted(enumerate(self.Y),
key=operator.itemgetter(1))
if self.debug:
end = time.clock()
sys.stdout.write("%f s\n" % (end - start)) | Assigns data to this object and builds the Morse-Smale
Complex
@ In, X, an m-by-n array of values specifying m
n-dimensional samples
@ In, Y, a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In, w, an optional m vector of values specifying the
weights associated to each of the m samples used. Default of
None means all points will be equally weighted
@ In, edges, an optional list of custom edges to use as a
starting point for pruning, or in place of a computed graph. | Below is the the instruction that describes the task:
### Input:
Assigns data to this object and builds the Morse-Smale
Complex
@ In, X, an m-by-n array of values specifying m
n-dimensional samples
@ In, Y, a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In, w, an optional m vector of values specifying the
weights associated to each of the m samples used. Default of
None means all points will be equally weighted
@ In, edges, an optional list of custom edges to use as a
starting point for pruning, or in place of a computed graph.
### Response:
def build(self, X, Y, w=None, edges=None):
""" Assigns data to this object and builds the Morse-Smale
Complex
@ In, X, an m-by-n array of values specifying m
n-dimensional samples
@ In, Y, a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In, w, an optional m vector of values specifying the
weights associated to each of the m samples used. Default of
None means all points will be equally weighted
@ In, edges, an optional list of custom edges to use as a
starting point for pruning, or in place of a computed graph.
"""
super(ContourTree, self).build(X, Y, w, edges)
# Build the join and split trees that we will merge into the
# contour tree
joinTree = MergeTree(debug=self.debug)
splitTree = MergeTree(debug=self.debug)
joinTree.build_for_contour_tree(self, True)
splitTree.build_for_contour_tree(self, False)
self.augmentedEdges = dict(joinTree.augmentedEdges)
self.augmentedEdges.update(dict(splitTree.augmentedEdges))
if self.short_circuit:
jt = self._construct_nx_tree(joinTree, splitTree)
st = self._construct_nx_tree(splitTree, joinTree)
else:
jt = self._construct_nx_tree(joinTree)
st = self._construct_nx_tree(splitTree)
self._process_tree(jt, st)
self._process_tree(st, jt)
# Now we have a fully augmented contour tree stored in nodes and
# edges The rest is some convenience stuff for querying later
self._identifyBranches()
self._identifySuperGraph()
if self.debug:
sys.stdout.write("Sorting Nodes: ")
start = time.clock()
self.sortedNodes = sorted(enumerate(self.Y),
key=operator.itemgetter(1))
if self.debug:
end = time.clock()
sys.stdout.write("%f s\n" % (end - start)) |
def set_policy(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0
Sets the default policy for iptables firewall tables
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
policy
The requested table policy
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if __salt__['iptables.get_policy'](
table,
kwargs['chain'],
family) == kwargs['policy']:
ret['result'] = True
ret['comment'] = ('iptables default policy for chain {0} on table {1} for {2} already set to {3}'
.format(kwargs['chain'], table, family, kwargs['policy']))
return ret
if __opts__['test']:
ret['comment'] = 'iptables default policy for chain {0} on table {1} for {2} needs to be set to {3}'.format(
kwargs['chain'],
table,
family,
kwargs['policy']
)
return ret
if not __salt__['iptables.set_policy'](
table,
kwargs['chain'],
kwargs['policy'],
family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set default policy for {0} to {1} family {2}'.format(
kwargs['chain'],
kwargs['policy'],
family
)
if 'save' in kwargs:
if kwargs['save']:
__salt__['iptables.save'](filename=None, family=family)
ret['comment'] = 'Set and saved default policy for {0} to {1} family {2}'.format(
kwargs['chain'],
kwargs['policy'],
family
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to set iptables default policy'
return ret | .. versionadded:: 2014.1.0
Sets the default policy for iptables firewall tables
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
policy
The requested table policy | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2014.1.0
Sets the default policy for iptables firewall tables
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
policy
The requested table policy
### Response:
def set_policy(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0
Sets the default policy for iptables firewall tables
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
policy
The requested table policy
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if __salt__['iptables.get_policy'](
table,
kwargs['chain'],
family) == kwargs['policy']:
ret['result'] = True
ret['comment'] = ('iptables default policy for chain {0} on table {1} for {2} already set to {3}'
.format(kwargs['chain'], table, family, kwargs['policy']))
return ret
if __opts__['test']:
ret['comment'] = 'iptables default policy for chain {0} on table {1} for {2} needs to be set to {3}'.format(
kwargs['chain'],
table,
family,
kwargs['policy']
)
return ret
if not __salt__['iptables.set_policy'](
table,
kwargs['chain'],
kwargs['policy'],
family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set default policy for {0} to {1} family {2}'.format(
kwargs['chain'],
kwargs['policy'],
family
)
if 'save' in kwargs:
if kwargs['save']:
__salt__['iptables.save'](filename=None, family=family)
ret['comment'] = 'Set and saved default policy for {0} to {1} family {2}'.format(
kwargs['chain'],
kwargs['policy'],
family
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to set iptables default policy'
return ret |
def get_children_to_delete(self):
"""Return all children that are not referenced
:returns: list or :class:`Reftrack`
:rtype: list
:raises: None
"""
refobjinter = self.get_refobjinter()
children = self.get_all_children()
todelete = []
for c in children:
if c.status() is None:
# if child is not in scene we do not have to delete it
continue
rby = refobjinter.referenced_by(c.get_refobj())
if rby is None:
# child is not part of another reference.
# we have to delete it for sure
todelete.append(c)
continue
# check if child is referenced by any parent up to self
# if it is not referenced by any refrence of a parent, then we
# can assume it is referenced by a parent of a greater scope,
# e.g. the parent of self. because we do not delete anything above self
# we would have to delete the child manually
parent = c.get_parent()
while parent != self.get_parent():
if refobjinter.get_reference(parent.get_refobj()) == rby:
# is referenced by a parent so it will get delted when the parent is deleted.
break
parent = parent.get_parent()
else:
todelete.append(c)
return todelete | Return all children that are not referenced
:returns: list or :class:`Reftrack`
:rtype: list
:raises: None | Below is the the instruction that describes the task:
### Input:
Return all children that are not referenced
:returns: list or :class:`Reftrack`
:rtype: list
:raises: None
### Response:
def get_children_to_delete(self):
"""Return all children that are not referenced
:returns: list or :class:`Reftrack`
:rtype: list
:raises: None
"""
refobjinter = self.get_refobjinter()
children = self.get_all_children()
todelete = []
for c in children:
if c.status() is None:
# if child is not in scene we do not have to delete it
continue
rby = refobjinter.referenced_by(c.get_refobj())
if rby is None:
# child is not part of another reference.
# we have to delete it for sure
todelete.append(c)
continue
# check if child is referenced by any parent up to self
# if it is not referenced by any refrence of a parent, then we
# can assume it is referenced by a parent of a greater scope,
# e.g. the parent of self. because we do not delete anything above self
# we would have to delete the child manually
parent = c.get_parent()
while parent != self.get_parent():
if refobjinter.get_reference(parent.get_refobj()) == rby:
# is referenced by a parent so it will get delted when the parent is deleted.
break
parent = parent.get_parent()
else:
todelete.append(c)
return todelete |
def get_path(self, tile):
"""
Determine target file path.
Parameters
----------
tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
path : string
"""
return os.path.join(*[
self.path,
str(tile.zoom),
str(tile.row),
str(tile.col) + self.file_extension
]) | Determine target file path.
Parameters
----------
tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
path : string | Below is the the instruction that describes the task:
### Input:
Determine target file path.
Parameters
----------
tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
path : string
### Response:
def get_path(self, tile):
"""
Determine target file path.
Parameters
----------
tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
path : string
"""
return os.path.join(*[
self.path,
str(tile.zoom),
str(tile.row),
str(tile.col) + self.file_extension
]) |
def flipcheck(content):
"""Checks a string for anger and soothes said anger
Args:
content (str): The message to be flipchecked
Returns:
putitback (str): The righted table or text
"""
# Prevent tampering with flip
punct = """!"#$%&'*+,-./:;<=>?@[\]^_`{|}~ ━─"""
tamperdict = str.maketrans('', '', punct)
tamperproof = content.translate(tamperdict)
# Unflip
if "(╯°□°)╯︵" in tamperproof:
# For tables
if "┻┻" in tamperproof:
# Calculate table length
length = 0
for letter in content:
if letter == "━":
length += 1.36
elif letter == "─":
length += 1
elif letter == "-":
length += 0.50
# Construct table
putitback = "┬"
for i in range(int(length)):
putitback += "─"
putitback += "┬ ノ( ゜-゜ノ)"
return putitback
# For text
else:
# Create dictionary for flipping text
flipdict = str.maketrans(
'abcdefghijklmnopqrstuvwxyzɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎz😅🙃😞😟😠😡☹🙁😱😨😰😦😧😢😓😥😭',
'ɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎzabcdefghijklmnopqrstuvwxyz😄🙂🙂🙂🙂🙂🙂😀😀🙂😄🙂🙂😄😄😄😁'
)
# Construct flipped text
flipstart = content.index('︵')
flipped = content[flipstart+1:]
flipped = str.lower(flipped).translate(flipdict)
putitback = ''.join(list(reversed(list(flipped))))
putitback += "ノ( ゜-゜ノ)"
return putitback
else:
return False | Checks a string for anger and soothes said anger
Args:
content (str): The message to be flipchecked
Returns:
putitback (str): The righted table or text | Below is the the instruction that describes the task:
### Input:
Checks a string for anger and soothes said anger
Args:
content (str): The message to be flipchecked
Returns:
putitback (str): The righted table or text
### Response:
def flipcheck(content):
"""Checks a string for anger and soothes said anger
Args:
content (str): The message to be flipchecked
Returns:
putitback (str): The righted table or text
"""
# Prevent tampering with flip
punct = """!"#$%&'*+,-./:;<=>?@[\]^_`{|}~ ━─"""
tamperdict = str.maketrans('', '', punct)
tamperproof = content.translate(tamperdict)
# Unflip
if "(╯°□°)╯︵" in tamperproof:
# For tables
if "┻┻" in tamperproof:
# Calculate table length
length = 0
for letter in content:
if letter == "━":
length += 1.36
elif letter == "─":
length += 1
elif letter == "-":
length += 0.50
# Construct table
putitback = "┬"
for i in range(int(length)):
putitback += "─"
putitback += "┬ ノ( ゜-゜ノ)"
return putitback
# For text
else:
# Create dictionary for flipping text
flipdict = str.maketrans(
'abcdefghijklmnopqrstuvwxyzɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎz😅🙃😞😟😠😡☹🙁😱😨😰😦😧😢😓😥😭',
'ɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎzabcdefghijklmnopqrstuvwxyz😄🙂🙂🙂🙂🙂🙂😀😀🙂😄🙂🙂😄😄😄😁'
)
# Construct flipped text
flipstart = content.index('︵')
flipped = content[flipstart+1:]
flipped = str.lower(flipped).translate(flipdict)
putitback = ''.join(list(reversed(list(flipped))))
putitback += "ノ( ゜-゜ノ)"
return putitback
else:
return False |
def setup_sort_column(widget, column=0, attribute=None, model=None):
"""
*model* is the :class:`TreeModelSort` to act on. Defaults to what is
displayed. Pass this if you sort before filtering.
*widget* is a clickable :class:`TreeViewColumn`.
*column* is an integer addressing the column in *model* that holds your
objects.
*attribute* is a string naming an object attribute to display. Defaults
to the name of *widget*.
"""
if not attribute:
attribute = widget.get_name()
if attribute is None:
raise TypeError("Column not named")
widget.connect('clicked', _clicked, column, attribute, model) | *model* is the :class:`TreeModelSort` to act on. Defaults to what is
displayed. Pass this if you sort before filtering.
*widget* is a clickable :class:`TreeViewColumn`.
*column* is an integer addressing the column in *model* that holds your
objects.
*attribute* is a string naming an object attribute to display. Defaults
to the name of *widget*. | Below is the the instruction that describes the task:
### Input:
*model* is the :class:`TreeModelSort` to act on. Defaults to what is
displayed. Pass this if you sort before filtering.
*widget* is a clickable :class:`TreeViewColumn`.
*column* is an integer addressing the column in *model* that holds your
objects.
*attribute* is a string naming an object attribute to display. Defaults
to the name of *widget*.
### Response:
def setup_sort_column(widget, column=0, attribute=None, model=None):
"""
*model* is the :class:`TreeModelSort` to act on. Defaults to what is
displayed. Pass this if you sort before filtering.
*widget* is a clickable :class:`TreeViewColumn`.
*column* is an integer addressing the column in *model* that holds your
objects.
*attribute* is a string naming an object attribute to display. Defaults
to the name of *widget*.
"""
if not attribute:
attribute = widget.get_name()
if attribute is None:
raise TypeError("Column not named")
widget.connect('clicked', _clicked, column, attribute, model) |
def hist2array(hist, include_overflow=False, copy=True, return_edges=False):
"""Convert a ROOT histogram into a NumPy array
Parameters
----------
hist : ROOT TH1, TH2, TH3, THn, or THnSparse
The ROOT histogram to convert into an array
include_overflow : bool, optional (default=False)
If True, the over- and underflow bins will be included in the
output numpy array. These bins are excluded by default.
copy : bool, optional (default=True)
If True (the default) then copy the underlying array, otherwise the
NumPy array will view (and not own) the same memory as the ROOT
histogram's array.
return_edges : bool, optional (default=False)
If True, also return the bin edges along each axis.
Returns
-------
array : numpy array
A NumPy array containing the histogram bin values
edges : list of numpy arrays
A list of numpy arrays where each array contains the bin edges along
the corresponding axis of ``hist``. Overflow and underflow bins are not
included.
Raises
------
TypeError
If hist is not a ROOT histogram.
See Also
--------
array2hist
"""
import ROOT
# Determine dimensionality and shape
simple_hist = True
if isinstance(hist, ROOT.TH3):
shape = (hist.GetNbinsZ() + 2,
hist.GetNbinsY() + 2,
hist.GetNbinsX() + 2)
elif isinstance(hist, ROOT.TH2):
shape = (hist.GetNbinsY() + 2, hist.GetNbinsX() + 2)
elif isinstance(hist, ROOT.TH1):
shape = (hist.GetNbinsX() + 2,)
elif isinstance(hist, ROOT.THnBase):
shape = tuple([hist.GetAxis(i).GetNbins() + 2
for i in range(hist.GetNdimensions())])
simple_hist = False
else:
raise TypeError(
"hist must be an instance of ROOT.TH1, "
"ROOT.TH2, ROOT.TH3, or ROOT.THnBase")
# Determine the corresponding numpy dtype
if simple_hist:
for hist_type in 'DFISC':
if isinstance(hist, getattr(ROOT, 'TArray{0}'.format(hist_type))):
break
else:
raise AssertionError(
"hist is somehow an instance of TH[1|2|3] "
"but not TArray[D|F|I|S|C]")
else: # THn, THnSparse
if isinstance(hist, ROOT.THnSparse):
cls_string = 'THnSparse{0}'
else:
cls_string = 'THn{0}'
for hist_type in 'CSILFD':
if isinstance(hist, getattr(ROOT, cls_string.format(hist_type))):
break
else:
raise AssertionError(
"unsupported THn or THnSparse bin type")
if simple_hist:
# Constuct a NumPy array viewing the underlying histogram array
if hist_type == 'C':
array_func = getattr(_librootnumpy,
'array_h{0}c'.format(len(shape)))
array = array_func(ROOT.AsCObject(hist))
array.shape = shape
else:
dtype = np.dtype(DTYPE_ROOT2NUMPY[hist_type])
array = np.ndarray(shape=shape, dtype=dtype,
buffer=hist.GetArray())
else: # THn THnSparse
dtype = np.dtype(DTYPE_ROOT2NUMPY[hist_type])
if isinstance(hist, ROOT.THnSparse):
array = _librootnumpy.thnsparse2array(ROOT.AsCObject(hist),
shape, dtype)
else:
array = _librootnumpy.thn2array(ROOT.AsCObject(hist),
shape, dtype)
if return_edges:
if simple_hist:
ndims = hist.GetDimension()
axis_getters = ['GetXaxis', 'GetYaxis', 'GetZaxis'][:ndims]
else:
ndims = hist.GetNdimensions()
axis_getters = ['GetAxis'] * ndims
edges = []
for idim, axis_getter in zip(range(ndims), axis_getters):
# GetXaxis expects 0 parameters while we need the axis in GetAxis
ax = getattr(hist, axis_getter)(*(() if simple_hist else (idim,)))
# `edges` is Nbins + 1 in order to have the last bin's upper edge as well
edges.append(np.empty(ax.GetNbins() + 1, dtype=np.double))
# load the lower edges into `edges`
ax.GetLowEdge(edges[-1])
# Get the upper edge of the last bin
edges[-1][-1] = ax.GetBinUpEdge(ax.GetNbins())
if not include_overflow:
# Remove overflow and underflow bins
array = array[tuple([slice(1, -1) for idim in range(array.ndim)])]
if simple_hist:
# Preserve x, y, z -> axis 0, 1, 2 order
array = np.transpose(array)
if copy:
array = np.copy(array)
if return_edges:
return array, edges
return array | Convert a ROOT histogram into a NumPy array
Parameters
----------
hist : ROOT TH1, TH2, TH3, THn, or THnSparse
The ROOT histogram to convert into an array
include_overflow : bool, optional (default=False)
If True, the over- and underflow bins will be included in the
output numpy array. These bins are excluded by default.
copy : bool, optional (default=True)
If True (the default) then copy the underlying array, otherwise the
NumPy array will view (and not own) the same memory as the ROOT
histogram's array.
return_edges : bool, optional (default=False)
If True, also return the bin edges along each axis.
Returns
-------
array : numpy array
A NumPy array containing the histogram bin values
edges : list of numpy arrays
A list of numpy arrays where each array contains the bin edges along
the corresponding axis of ``hist``. Overflow and underflow bins are not
included.
Raises
------
TypeError
If hist is not a ROOT histogram.
See Also
--------
array2hist | Below is the the instruction that describes the task:
### Input:
Convert a ROOT histogram into a NumPy array
Parameters
----------
hist : ROOT TH1, TH2, TH3, THn, or THnSparse
The ROOT histogram to convert into an array
include_overflow : bool, optional (default=False)
If True, the over- and underflow bins will be included in the
output numpy array. These bins are excluded by default.
copy : bool, optional (default=True)
If True (the default) then copy the underlying array, otherwise the
NumPy array will view (and not own) the same memory as the ROOT
histogram's array.
return_edges : bool, optional (default=False)
If True, also return the bin edges along each axis.
Returns
-------
array : numpy array
A NumPy array containing the histogram bin values
edges : list of numpy arrays
A list of numpy arrays where each array contains the bin edges along
the corresponding axis of ``hist``. Overflow and underflow bins are not
included.
Raises
------
TypeError
If hist is not a ROOT histogram.
See Also
--------
array2hist
### Response:
def hist2array(hist, include_overflow=False, copy=True, return_edges=False):
"""Convert a ROOT histogram into a NumPy array
Parameters
----------
hist : ROOT TH1, TH2, TH3, THn, or THnSparse
The ROOT histogram to convert into an array
include_overflow : bool, optional (default=False)
If True, the over- and underflow bins will be included in the
output numpy array. These bins are excluded by default.
copy : bool, optional (default=True)
If True (the default) then copy the underlying array, otherwise the
NumPy array will view (and not own) the same memory as the ROOT
histogram's array.
return_edges : bool, optional (default=False)
If True, also return the bin edges along each axis.
Returns
-------
array : numpy array
A NumPy array containing the histogram bin values
edges : list of numpy arrays
A list of numpy arrays where each array contains the bin edges along
the corresponding axis of ``hist``. Overflow and underflow bins are not
included.
Raises
------
TypeError
If hist is not a ROOT histogram.
See Also
--------
array2hist
"""
import ROOT
# Determine dimensionality and shape
simple_hist = True
if isinstance(hist, ROOT.TH3):
shape = (hist.GetNbinsZ() + 2,
hist.GetNbinsY() + 2,
hist.GetNbinsX() + 2)
elif isinstance(hist, ROOT.TH2):
shape = (hist.GetNbinsY() + 2, hist.GetNbinsX() + 2)
elif isinstance(hist, ROOT.TH1):
shape = (hist.GetNbinsX() + 2,)
elif isinstance(hist, ROOT.THnBase):
shape = tuple([hist.GetAxis(i).GetNbins() + 2
for i in range(hist.GetNdimensions())])
simple_hist = False
else:
raise TypeError(
"hist must be an instance of ROOT.TH1, "
"ROOT.TH2, ROOT.TH3, or ROOT.THnBase")
# Determine the corresponding numpy dtype
if simple_hist:
for hist_type in 'DFISC':
if isinstance(hist, getattr(ROOT, 'TArray{0}'.format(hist_type))):
break
else:
raise AssertionError(
"hist is somehow an instance of TH[1|2|3] "
"but not TArray[D|F|I|S|C]")
else: # THn, THnSparse
if isinstance(hist, ROOT.THnSparse):
cls_string = 'THnSparse{0}'
else:
cls_string = 'THn{0}'
for hist_type in 'CSILFD':
if isinstance(hist, getattr(ROOT, cls_string.format(hist_type))):
break
else:
raise AssertionError(
"unsupported THn or THnSparse bin type")
if simple_hist:
# Constuct a NumPy array viewing the underlying histogram array
if hist_type == 'C':
array_func = getattr(_librootnumpy,
'array_h{0}c'.format(len(shape)))
array = array_func(ROOT.AsCObject(hist))
array.shape = shape
else:
dtype = np.dtype(DTYPE_ROOT2NUMPY[hist_type])
array = np.ndarray(shape=shape, dtype=dtype,
buffer=hist.GetArray())
else: # THn THnSparse
dtype = np.dtype(DTYPE_ROOT2NUMPY[hist_type])
if isinstance(hist, ROOT.THnSparse):
array = _librootnumpy.thnsparse2array(ROOT.AsCObject(hist),
shape, dtype)
else:
array = _librootnumpy.thn2array(ROOT.AsCObject(hist),
shape, dtype)
if return_edges:
if simple_hist:
ndims = hist.GetDimension()
axis_getters = ['GetXaxis', 'GetYaxis', 'GetZaxis'][:ndims]
else:
ndims = hist.GetNdimensions()
axis_getters = ['GetAxis'] * ndims
edges = []
for idim, axis_getter in zip(range(ndims), axis_getters):
# GetXaxis expects 0 parameters while we need the axis in GetAxis
ax = getattr(hist, axis_getter)(*(() if simple_hist else (idim,)))
# `edges` is Nbins + 1 in order to have the last bin's upper edge as well
edges.append(np.empty(ax.GetNbins() + 1, dtype=np.double))
# load the lower edges into `edges`
ax.GetLowEdge(edges[-1])
# Get the upper edge of the last bin
edges[-1][-1] = ax.GetBinUpEdge(ax.GetNbins())
if not include_overflow:
# Remove overflow and underflow bins
array = array[tuple([slice(1, -1) for idim in range(array.ndim)])]
if simple_hist:
# Preserve x, y, z -> axis 0, 1, 2 order
array = np.transpose(array)
if copy:
array = np.copy(array)
if return_edges:
return array, edges
return array |
def load_header_chain( cls, chain_path ):
"""
Load the header chain from disk.
Each chain element will be a dictionary with:
*
"""
header_parser = BlockHeaderSerializer()
chain = []
height = 0
with open(chain_path, "rb") as f:
h = SPVClient.read_header_at( f )
h['block_height'] = height
height += 1
chain.append(h)
return chain | Load the header chain from disk.
Each chain element will be a dictionary with:
* | Below is the the instruction that describes the task:
### Input:
Load the header chain from disk.
Each chain element will be a dictionary with:
*
### Response:
def load_header_chain( cls, chain_path ):
"""
Load the header chain from disk.
Each chain element will be a dictionary with:
*
"""
header_parser = BlockHeaderSerializer()
chain = []
height = 0
with open(chain_path, "rb") as f:
h = SPVClient.read_header_at( f )
h['block_height'] = height
height += 1
chain.append(h)
return chain |
def validate_linux_host_name(namespace):
"""Validates a string as a legal host name component.
This validation will also occur server-side in the ARM API, but that may take
a minute or two before the user sees it. So it's more user-friendly to validate
in the CLI pre-flight.
"""
# https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address
rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') # pylint:disable=line-too-long
found = rfc1123_regex.findall(namespace.name)
if not found:
raise CLIError('--name cannot exceed 63 characters and can only contain '
'letters, numbers, or dashes (-).') | Validates a string as a legal host name component.
This validation will also occur server-side in the ARM API, but that may take
a minute or two before the user sees it. So it's more user-friendly to validate
in the CLI pre-flight. | Below is the the instruction that describes the task:
### Input:
Validates a string as a legal host name component.
This validation will also occur server-side in the ARM API, but that may take
a minute or two before the user sees it. So it's more user-friendly to validate
in the CLI pre-flight.
### Response:
def validate_linux_host_name(namespace):
"""Validates a string as a legal host name component.
This validation will also occur server-side in the ARM API, but that may take
a minute or two before the user sees it. So it's more user-friendly to validate
in the CLI pre-flight.
"""
# https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address
rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') # pylint:disable=line-too-long
found = rfc1123_regex.findall(namespace.name)
if not found:
raise CLIError('--name cannot exceed 63 characters and can only contain '
'letters, numbers, or dashes (-).') |
def Logs(loggername, echo=True, debug=False, chatty=False, loglevel=logging.INFO, logfile=None, logpath=None, fileHandler=None):
"""Initialize logging
"""
log = logging.getLogger(loggername)
if fileHandler is None:
if logfile is None:
logFilename = _ourName
else:
logFilename = logfile
if '.log' not in logFilename:
logFilename = '%s.log' % logFilename
if logpath is not None:
logFilename = os.path.join(logpath, logFilename)
_handler = logging.FileHandler(logFilename)
_formatter = logging.Formatter('%(asctime)s %(levelname)-7s %(message)s')
_handler.setFormatter(_formatter)
log.addHandler(_handler)
# logging.fileHandler = _handler
else:
log.addHandler(fileHandler)
# logging.fileHandler = fileHandler
if echo:
echoHandler = logging.StreamHandler()
if chatty:
echoFormatter = logging.Formatter('%(asctime)s %(levelname)-7s %(processName)s[%(process)d]: %(message)s')
else:
echoFormatter = logging.Formatter('%(asctime)s %(levelname)-7s %(message)s')
echoHandler.setFormatter(echoFormatter)
log.addHandler(echoHandler)
if debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(loglevel)
atexit.register(shutdownLogging) | Initialize logging | Below is the the instruction that describes the task:
### Input:
Initialize logging
### Response:
def Logs(loggername, echo=True, debug=False, chatty=False, loglevel=logging.INFO, logfile=None, logpath=None, fileHandler=None):
"""Initialize logging
"""
log = logging.getLogger(loggername)
if fileHandler is None:
if logfile is None:
logFilename = _ourName
else:
logFilename = logfile
if '.log' not in logFilename:
logFilename = '%s.log' % logFilename
if logpath is not None:
logFilename = os.path.join(logpath, logFilename)
_handler = logging.FileHandler(logFilename)
_formatter = logging.Formatter('%(asctime)s %(levelname)-7s %(message)s')
_handler.setFormatter(_formatter)
log.addHandler(_handler)
# logging.fileHandler = _handler
else:
log.addHandler(fileHandler)
# logging.fileHandler = fileHandler
if echo:
echoHandler = logging.StreamHandler()
if chatty:
echoFormatter = logging.Formatter('%(asctime)s %(levelname)-7s %(processName)s[%(process)d]: %(message)s')
else:
echoFormatter = logging.Formatter('%(asctime)s %(levelname)-7s %(message)s')
echoHandler.setFormatter(echoFormatter)
log.addHandler(echoHandler)
if debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(loglevel)
atexit.register(shutdownLogging) |
def find_spelling(n):
"""
Finds d, r s.t. n-1 = 2^r * d
"""
r = 0
d = n - 1
# divmod used for large numbers
quotient, remainder = divmod(d, 2)
# while we can still divide 2's into n-1...
while remainder != 1:
r += 1
d = quotient # previous quotient before we overwrite it
quotient, remainder = divmod(d, 2)
return r, d | Finds d, r s.t. n-1 = 2^r * d | Below is the the instruction that describes the task:
### Input:
Finds d, r s.t. n-1 = 2^r * d
### Response:
def find_spelling(n):
"""
Finds d, r s.t. n-1 = 2^r * d
"""
r = 0
d = n - 1
# divmod used for large numbers
quotient, remainder = divmod(d, 2)
# while we can still divide 2's into n-1...
while remainder != 1:
r += 1
d = quotient # previous quotient before we overwrite it
quotient, remainder = divmod(d, 2)
return r, d |
def removeJob(self, jobBatchSystemID):
"""Removes a job from the system."""
assert jobBatchSystemID in self.jobBatchSystemIDToIssuedJob
jobNode = self.jobBatchSystemIDToIssuedJob[jobBatchSystemID]
if jobNode.preemptable:
# len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued,
# so decrement this value before removing the job from the issuedJob map
assert self.preemptableJobsIssued > 0
self.preemptableJobsIssued -= 1
del self.jobBatchSystemIDToIssuedJob[jobBatchSystemID]
# If service job
if jobNode.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob:
# Decrement the number of services
if jobNode.preemptable:
self.preemptableServiceJobsIssued -= 1
else:
self.serviceJobsIssued -= 1
return jobNode | Removes a job from the system. | Below is the the instruction that describes the task:
### Input:
Removes a job from the system.
### Response:
def removeJob(self, jobBatchSystemID):
"""Removes a job from the system."""
assert jobBatchSystemID in self.jobBatchSystemIDToIssuedJob
jobNode = self.jobBatchSystemIDToIssuedJob[jobBatchSystemID]
if jobNode.preemptable:
# len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued,
# so decrement this value before removing the job from the issuedJob map
assert self.preemptableJobsIssued > 0
self.preemptableJobsIssued -= 1
del self.jobBatchSystemIDToIssuedJob[jobBatchSystemID]
# If service job
if jobNode.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob:
# Decrement the number of services
if jobNode.preemptable:
self.preemptableServiceJobsIssued -= 1
else:
self.serviceJobsIssued -= 1
return jobNode |
def analysis(self):
"""The list of analysis of ``words`` layer elements."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [word[ANALYSIS] for word in self.words] | The list of analysis of ``words`` layer elements. | Below is the the instruction that describes the task:
### Input:
The list of analysis of ``words`` layer elements.
### Response:
def analysis(self):
"""The list of analysis of ``words`` layer elements."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [word[ANALYSIS] for word in self.words] |
def chromiumContext(self, url, extra_tid=None):
'''
Return a active chromium context, useable for manual operations directly against
chromium.
The WebRequest user agent and other context is synchronized into the chromium
instance at startup, and changes are flushed back to the webrequest instance
from chromium at completion.
'''
assert url is not None, "You need to pass a URL to the contextmanager, so it can dispatch to the correct tab!"
if extra_tid is True:
extra_tid = threading.get_ident()
return self._chrome_context(url, extra_tid=extra_tid) | Return a active chromium context, useable for manual operations directly against
chromium.
The WebRequest user agent and other context is synchronized into the chromium
instance at startup, and changes are flushed back to the webrequest instance
from chromium at completion. | Below is the the instruction that describes the task:
### Input:
Return a active chromium context, useable for manual operations directly against
chromium.
The WebRequest user agent and other context is synchronized into the chromium
instance at startup, and changes are flushed back to the webrequest instance
from chromium at completion.
### Response:
def chromiumContext(self, url, extra_tid=None):
'''
Return a active chromium context, useable for manual operations directly against
chromium.
The WebRequest user agent and other context is synchronized into the chromium
instance at startup, and changes are flushed back to the webrequest instance
from chromium at completion.
'''
assert url is not None, "You need to pass a URL to the contextmanager, so it can dispatch to the correct tab!"
if extra_tid is True:
extra_tid = threading.get_ident()
return self._chrome_context(url, extra_tid=extra_tid) |
def parse_analyzer_arguments(arguments):
"""
Parse string in format `function_1:param1=value:param2 function_2:param` into array of FunctionArguments
"""
rets = []
for argument in arguments:
args = argument.split(argument_splitter)
# The first one is the function name
func_name = args[0]
# The rest is the args
func_args = {}
for arg in args[1:]:
key, value = parse_arg(arg)
func_args[key] = value
rets.append(FunctionArguments(function=func_name, arguments=func_args))
return rets | Parse string in format `function_1:param1=value:param2 function_2:param` into array of FunctionArguments | Below is the the instruction that describes the task:
### Input:
Parse string in format `function_1:param1=value:param2 function_2:param` into array of FunctionArguments
### Response:
def parse_analyzer_arguments(arguments):
"""
Parse string in format `function_1:param1=value:param2 function_2:param` into array of FunctionArguments
"""
rets = []
for argument in arguments:
args = argument.split(argument_splitter)
# The first one is the function name
func_name = args[0]
# The rest is the args
func_args = {}
for arg in args[1:]:
key, value = parse_arg(arg)
func_args[key] = value
rets.append(FunctionArguments(function=func_name, arguments=func_args))
return rets |
def regenerate_models(self, propnames=None, exclude=[], deep=False):
r"""
Re-runs the specified model or models.
Parameters
----------
propnames : string or list of strings
The list of property names to be regenerated. If None are given
then ALL models are re-run (except for those whose ``regen_mode``
is 'constant').
exclude : list of strings
Since the default behavior is to run ALL models, this can be used
to exclude specific models. It may be more convenient to supply
as list of 2 models to exclude than to specify 8 models to include.
deep : boolean
Specifies whether or not to regenerate models on all associated
objects. For instance, if ``True``, then all Physics models will
be regenerated when method is called on the corresponding Phase.
The default is ``False``. The method does not work in reverse,
so regenerating models on a Physics will not update a Phase.
"""
# If empty list of propnames was given, do nothing and return
if type(propnames) is list and len(propnames) == 0:
return
if type(propnames) is str: # Convert string to list if necessary
propnames = [propnames]
if propnames is None: # If no props given, then regenerate them all
propnames = self.models.dependency_list()
# If some props are to be excluded, remove them from list
if len(exclude) > 0:
propnames = [i for i in propnames if i not in exclude]
# Re-order given propnames according to dependency tree
self_models = self.models.dependency_list()
propnames = [i for i in self_models if i in propnames]
if deep:
other_models = None # Will trigger regen of ALL models
else:
# Make list of given propnames that are not in self
other_models = list(set(propnames).difference(set(self_models)))
# The following has some redundant lines, but is easier to understand
if self._isa('phase'):
# Start be regenerating models on self
for item in propnames:
self._regen(item)
# Then regen models on associated objects, if any in other_models
for phys in self.project.find_physics(phase=self):
phys.regenerate_models(propnames=other_models, deep=False)
elif self._isa('network'): # Repeat for other object types
for item in propnames:
self._regen(item)
for geom in self.project.geometries().values():
geom.regenerate_models(propnames=other_models, deep=False)
else:
for item in propnames:
self._regen(item) | r"""
Re-runs the specified model or models.
Parameters
----------
propnames : string or list of strings
The list of property names to be regenerated. If None are given
then ALL models are re-run (except for those whose ``regen_mode``
is 'constant').
exclude : list of strings
Since the default behavior is to run ALL models, this can be used
to exclude specific models. It may be more convenient to supply
as list of 2 models to exclude than to specify 8 models to include.
deep : boolean
Specifies whether or not to regenerate models on all associated
objects. For instance, if ``True``, then all Physics models will
be regenerated when method is called on the corresponding Phase.
The default is ``False``. The method does not work in reverse,
so regenerating models on a Physics will not update a Phase. | Below is the the instruction that describes the task:
### Input:
r"""
Re-runs the specified model or models.
Parameters
----------
propnames : string or list of strings
The list of property names to be regenerated. If None are given
then ALL models are re-run (except for those whose ``regen_mode``
is 'constant').
exclude : list of strings
Since the default behavior is to run ALL models, this can be used
to exclude specific models. It may be more convenient to supply
as list of 2 models to exclude than to specify 8 models to include.
deep : boolean
Specifies whether or not to regenerate models on all associated
objects. For instance, if ``True``, then all Physics models will
be regenerated when method is called on the corresponding Phase.
The default is ``False``. The method does not work in reverse,
so regenerating models on a Physics will not update a Phase.
### Response:
def regenerate_models(self, propnames=None, exclude=[], deep=False):
r"""
Re-runs the specified model or models.
Parameters
----------
propnames : string or list of strings
The list of property names to be regenerated. If None are given
then ALL models are re-run (except for those whose ``regen_mode``
is 'constant').
exclude : list of strings
Since the default behavior is to run ALL models, this can be used
to exclude specific models. It may be more convenient to supply
as list of 2 models to exclude than to specify 8 models to include.
deep : boolean
Specifies whether or not to regenerate models on all associated
objects. For instance, if ``True``, then all Physics models will
be regenerated when method is called on the corresponding Phase.
The default is ``False``. The method does not work in reverse,
so regenerating models on a Physics will not update a Phase.
"""
# If empty list of propnames was given, do nothing and return
if type(propnames) is list and len(propnames) == 0:
return
if type(propnames) is str: # Convert string to list if necessary
propnames = [propnames]
if propnames is None: # If no props given, then regenerate them all
propnames = self.models.dependency_list()
# If some props are to be excluded, remove them from list
if len(exclude) > 0:
propnames = [i for i in propnames if i not in exclude]
# Re-order given propnames according to dependency tree
self_models = self.models.dependency_list()
propnames = [i for i in self_models if i in propnames]
if deep:
other_models = None # Will trigger regen of ALL models
else:
# Make list of given propnames that are not in self
other_models = list(set(propnames).difference(set(self_models)))
# The following has some redundant lines, but is easier to understand
if self._isa('phase'):
# Start be regenerating models on self
for item in propnames:
self._regen(item)
# Then regen models on associated objects, if any in other_models
for phys in self.project.find_physics(phase=self):
phys.regenerate_models(propnames=other_models, deep=False)
elif self._isa('network'): # Repeat for other object types
for item in propnames:
self._regen(item)
for geom in self.project.geometries().values():
geom.regenerate_models(propnames=other_models, deep=False)
else:
for item in propnames:
self._regen(item) |
def _update_similarity_view(self):
"""Update the similarity view with matches for the specified
clusters."""
if not self.similarity:
return
selection = self.cluster_view.selected
if not len(selection):
return
cluster_id = selection[0]
cluster_ids = self.clustering.cluster_ids
self._best = cluster_id
logger.log(5, "Update the similarity view.")
# This is a list of pairs (closest_cluster, similarity).
similarities = self.similarity(cluster_id)
# We save the similarity values wrt the currently-selected clusters.
# Note that we keep the order of the output of the self.similary()
# function.
clusters_sim = OrderedDict([(int(cl), s) for (cl, s) in similarities])
# List of similar clusters, remove non-existing ones.
clusters = [c for c in clusters_sim.keys()
if c in cluster_ids]
# The similarity view will use these values.
self._current_similarity_values = clusters_sim
# Set the rows of the similarity view.
# TODO: instead of the self._current_similarity_values hack,
# give the possibility to specify the values here (?).
self.similarity_view.set_rows([c for c in clusters
if c not in selection]) | Update the similarity view with matches for the specified
clusters. | Below is the the instruction that describes the task:
### Input:
Update the similarity view with matches for the specified
clusters.
### Response:
def _update_similarity_view(self):
"""Update the similarity view with matches for the specified
clusters."""
if not self.similarity:
return
selection = self.cluster_view.selected
if not len(selection):
return
cluster_id = selection[0]
cluster_ids = self.clustering.cluster_ids
self._best = cluster_id
logger.log(5, "Update the similarity view.")
# This is a list of pairs (closest_cluster, similarity).
similarities = self.similarity(cluster_id)
# We save the similarity values wrt the currently-selected clusters.
# Note that we keep the order of the output of the self.similary()
# function.
clusters_sim = OrderedDict([(int(cl), s) for (cl, s) in similarities])
# List of similar clusters, remove non-existing ones.
clusters = [c for c in clusters_sim.keys()
if c in cluster_ids]
# The similarity view will use these values.
self._current_similarity_values = clusters_sim
# Set the rows of the similarity view.
# TODO: instead of the self._current_similarity_values hack,
# give the possibility to specify the values here (?).
self.similarity_view.set_rows([c for c in clusters
if c not in selection]) |
def _update_lock_icon(self):
"""Update locked state icon"""
icon = ima.icon('lock') if self.locked else ima.icon('lock_open')
self.locked_button.setIcon(icon)
tip = _("Unlock") if self.locked else _("Lock")
self.locked_button.setToolTip(tip) | Update locked state icon | Below is the the instruction that describes the task:
### Input:
Update locked state icon
### Response:
def _update_lock_icon(self):
"""Update locked state icon"""
icon = ima.icon('lock') if self.locked else ima.icon('lock_open')
self.locked_button.setIcon(icon)
tip = _("Unlock") if self.locked else _("Lock")
self.locked_button.setToolTip(tip) |
def bind_events(self, events):
'''Register all known events found in ``events`` key-valued parameters.
'''
evs = self._events
if evs and events:
for event in evs.values():
if event.name in events:
event.bind(events[event.name]) | Register all known events found in ``events`` key-valued parameters. | Below is the the instruction that describes the task:
### Input:
Register all known events found in ``events`` key-valued parameters.
### Response:
def bind_events(self, events):
'''Register all known events found in ``events`` key-valued parameters.
'''
evs = self._events
if evs and events:
for event in evs.values():
if event.name in events:
event.bind(events[event.name]) |
def generate_mediation_matrix(dsm):
"""
Generate the mediation matrix of the given matrix.
Rules for mediation matrix generation:
Set -1 for items NOT to be considered
Set 0 for items which MUST NOT be present
Set 1 for items which MUST be present
Each module has optional dependencies to itself.
- Framework has optional dependency to all framework items (-1),
and to nothing else.
- Core libraries have dependencies to framework.
Dependencies to other core libraries are tolerated.
- Application libraries have dependencies to framework.
Dependencies to other core or application libraries are tolerated.
No dependencies to application modules.
- Application modules have dependencies to framework and libraries.
Dependencies to other application modules
should be mediated over a broker.
Dependencies to data are tolerated.
- Data have no dependencies at all
(but framework/libraries would be tolerated).
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to generate
the mediation matrix for.
"""
cat = dsm.categories
ent = dsm.entities
size = dsm.size[0]
if not cat:
cat = ['appmodule'] * size
packages = [e.split('.')[0] for e in ent]
# define and initialize the mediation matrix
mediation_matrix = [[0 for _ in range(size)]
for _ in range(size)]
for i in range(0, size):
for j in range(0, size):
if cat[i] == 'framework':
if cat[j] == 'framework':
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
elif cat[i] == 'corelib':
if (cat[j] in ('framework', 'corelib') or
ent[i].startswith(packages[j] + '.') or
i == j):
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
elif cat[i] == 'applib':
if (cat[j] in ('framework', 'corelib', 'applib') or
ent[i].startswith(packages[j] + '.') or
i == j):
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
elif cat[i] == 'appmodule':
# we cannot force an app module to import things from
# the broker if the broker itself did not import anything
if (cat[j] in ('framework', 'corelib',
'applib', 'broker', 'data') or
ent[i].startswith(packages[j] + '.') or
i == j):
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
elif cat[i] == 'broker':
# we cannot force the broker to import things from
# app modules if there is nothing to be imported.
# also broker should be authorized to use third apps
if (cat[j] in (
'appmodule', 'corelib', 'framework') or
ent[i].startswith(packages[j] + '.') or
i == j):
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
elif cat[i] == 'data':
if (cat[j] == 'framework' or
i == j):
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
else:
# mediation_matrix[i][j] = -2 # errors in the generation
raise DesignStructureMatrixError(
'Mediation matrix value NOT generated for %s:%s' % (
i, j))
return mediation_matrix | Generate the mediation matrix of the given matrix.
Rules for mediation matrix generation:
Set -1 for items NOT to be considered
Set 0 for items which MUST NOT be present
Set 1 for items which MUST be present
Each module has optional dependencies to itself.
- Framework has optional dependency to all framework items (-1),
and to nothing else.
- Core libraries have dependencies to framework.
Dependencies to other core libraries are tolerated.
- Application libraries have dependencies to framework.
Dependencies to other core or application libraries are tolerated.
No dependencies to application modules.
- Application modules have dependencies to framework and libraries.
Dependencies to other application modules
should be mediated over a broker.
Dependencies to data are tolerated.
- Data have no dependencies at all
(but framework/libraries would be tolerated).
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to generate
the mediation matrix for. | Below is the the instruction that describes the task:
### Input:
Generate the mediation matrix of the given matrix.
Rules for mediation matrix generation:
Set -1 for items NOT to be considered
Set 0 for items which MUST NOT be present
Set 1 for items which MUST be present
Each module has optional dependencies to itself.
- Framework has optional dependency to all framework items (-1),
and to nothing else.
- Core libraries have dependencies to framework.
Dependencies to other core libraries are tolerated.
- Application libraries have dependencies to framework.
Dependencies to other core or application libraries are tolerated.
No dependencies to application modules.
- Application modules have dependencies to framework and libraries.
Dependencies to other application modules
should be mediated over a broker.
Dependencies to data are tolerated.
- Data have no dependencies at all
(but framework/libraries would be tolerated).
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to generate
the mediation matrix for.
### Response:
def generate_mediation_matrix(dsm):
"""
Generate the mediation matrix of the given matrix.
Rules for mediation matrix generation:
Set -1 for items NOT to be considered
Set 0 for items which MUST NOT be present
Set 1 for items which MUST be present
Each module has optional dependencies to itself.
- Framework has optional dependency to all framework items (-1),
and to nothing else.
- Core libraries have dependencies to framework.
Dependencies to other core libraries are tolerated.
- Application libraries have dependencies to framework.
Dependencies to other core or application libraries are tolerated.
No dependencies to application modules.
- Application modules have dependencies to framework and libraries.
Dependencies to other application modules
should be mediated over a broker.
Dependencies to data are tolerated.
- Data have no dependencies at all
(but framework/libraries would be tolerated).
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to generate
the mediation matrix for.
"""
cat = dsm.categories
ent = dsm.entities
size = dsm.size[0]
if not cat:
cat = ['appmodule'] * size
packages = [e.split('.')[0] for e in ent]
# define and initialize the mediation matrix
mediation_matrix = [[0 for _ in range(size)]
for _ in range(size)]
for i in range(0, size):
for j in range(0, size):
if cat[i] == 'framework':
if cat[j] == 'framework':
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
elif cat[i] == 'corelib':
if (cat[j] in ('framework', 'corelib') or
ent[i].startswith(packages[j] + '.') or
i == j):
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
elif cat[i] == 'applib':
if (cat[j] in ('framework', 'corelib', 'applib') or
ent[i].startswith(packages[j] + '.') or
i == j):
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
elif cat[i] == 'appmodule':
# we cannot force an app module to import things from
# the broker if the broker itself did not import anything
if (cat[j] in ('framework', 'corelib',
'applib', 'broker', 'data') or
ent[i].startswith(packages[j] + '.') or
i == j):
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
elif cat[i] == 'broker':
# we cannot force the broker to import things from
# app modules if there is nothing to be imported.
# also broker should be authorized to use third apps
if (cat[j] in (
'appmodule', 'corelib', 'framework') or
ent[i].startswith(packages[j] + '.') or
i == j):
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
elif cat[i] == 'data':
if (cat[j] == 'framework' or
i == j):
mediation_matrix[i][j] = -1
else:
mediation_matrix[i][j] = 0
else:
# mediation_matrix[i][j] = -2 # errors in the generation
raise DesignStructureMatrixError(
'Mediation matrix value NOT generated for %s:%s' % (
i, j))
return mediation_matrix |
def remove_capability(capability, image=None, restart=False):
'''
Uninstall a capability
Args:
capability(str): The capability to be removed
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Raises:
NotImplementedError: For all versions of Windows that are not Windows 10
and later. Server editions of Windows use ServerManager instead.
Returns:
dict: A dictionary containing the results of the command
CLI Example:
.. code-block:: bash
salt '*' dism.remove_capability Tools.Graphics.DirectX~~~~0.0.1.0
'''
if salt.utils.versions.version_cmp(__grains__['osversion'], '10') == -1:
raise NotImplementedError(
'`uninstall_capability` is not available on this version of '
'Windows: {0}'.format(__grains__['osversion']))
cmd = ['DISM',
'/Quiet',
'/Image:{0}'.format(image) if image else '/Online',
'/Remove-Capability',
'/CapabilityName:{0}'.format(capability)]
if not restart:
cmd.append('/NoRestart')
return __salt__['cmd.run_all'](cmd) | Uninstall a capability
Args:
capability(str): The capability to be removed
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Raises:
NotImplementedError: For all versions of Windows that are not Windows 10
and later. Server editions of Windows use ServerManager instead.
Returns:
dict: A dictionary containing the results of the command
CLI Example:
.. code-block:: bash
salt '*' dism.remove_capability Tools.Graphics.DirectX~~~~0.0.1.0 | Below is the the instruction that describes the task:
### Input:
Uninstall a capability
Args:
capability(str): The capability to be removed
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Raises:
NotImplementedError: For all versions of Windows that are not Windows 10
and later. Server editions of Windows use ServerManager instead.
Returns:
dict: A dictionary containing the results of the command
CLI Example:
.. code-block:: bash
salt '*' dism.remove_capability Tools.Graphics.DirectX~~~~0.0.1.0
### Response:
def remove_capability(capability, image=None, restart=False):
'''
Uninstall a capability
Args:
capability(str): The capability to be removed
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Raises:
NotImplementedError: For all versions of Windows that are not Windows 10
and later. Server editions of Windows use ServerManager instead.
Returns:
dict: A dictionary containing the results of the command
CLI Example:
.. code-block:: bash
salt '*' dism.remove_capability Tools.Graphics.DirectX~~~~0.0.1.0
'''
if salt.utils.versions.version_cmp(__grains__['osversion'], '10') == -1:
raise NotImplementedError(
'`uninstall_capability` is not available on this version of '
'Windows: {0}'.format(__grains__['osversion']))
cmd = ['DISM',
'/Quiet',
'/Image:{0}'.format(image) if image else '/Online',
'/Remove-Capability',
'/CapabilityName:{0}'.format(capability)]
if not restart:
cmd.append('/NoRestart')
return __salt__['cmd.run_all'](cmd) |
def xf2rav(xform):
"""
This routine determines the rotation matrix and angular velocity
of the rotation from a state transformation matrix.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xf2rav_c.html
:param xform: state transformation matrix
:type xform: list[6][6]
:return:
rotation associated with xform,
angular velocity associated with xform.
:rtype: tuple
"""
xform = stypes.toDoubleMatrix(xform)
rot = stypes.emptyDoubleMatrix()
av = stypes.emptyDoubleVector(3)
libspice.xf2rav_c(xform, rot, av)
return stypes.cMatrixToNumpy(rot), stypes.cVectorToPython(av) | This routine determines the rotation matrix and angular velocity
of the rotation from a state transformation matrix.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xf2rav_c.html
:param xform: state transformation matrix
:type xform: list[6][6]
:return:
rotation associated with xform,
angular velocity associated with xform.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
This routine determines the rotation matrix and angular velocity
of the rotation from a state transformation matrix.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xf2rav_c.html
:param xform: state transformation matrix
:type xform: list[6][6]
:return:
rotation associated with xform,
angular velocity associated with xform.
:rtype: tuple
### Response:
def xf2rav(xform):
"""
This routine determines the rotation matrix and angular velocity
of the rotation from a state transformation matrix.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xf2rav_c.html
:param xform: state transformation matrix
:type xform: list[6][6]
:return:
rotation associated with xform,
angular velocity associated with xform.
:rtype: tuple
"""
xform = stypes.toDoubleMatrix(xform)
rot = stypes.emptyDoubleMatrix()
av = stypes.emptyDoubleVector(3)
libspice.xf2rav_c(xform, rot, av)
return stypes.cMatrixToNumpy(rot), stypes.cVectorToPython(av) |
def deserialize(cls, cls_target, obj_raw):
"""
:type cls_target: T|type
:type obj_raw: int|str|bool|float|list|dict|None
:rtype: T
"""
cls._initialize()
deserializer = cls._get_deserializer(cls_target)
if deserializer == cls:
return cls._deserialize_default(cls_target, obj_raw)
else:
return deserializer.deserialize(cls_target, obj_raw) | :type cls_target: T|type
:type obj_raw: int|str|bool|float|list|dict|None
:rtype: T | Below is the the instruction that describes the task:
### Input:
:type cls_target: T|type
:type obj_raw: int|str|bool|float|list|dict|None
:rtype: T
### Response:
def deserialize(cls, cls_target, obj_raw):
"""
:type cls_target: T|type
:type obj_raw: int|str|bool|float|list|dict|None
:rtype: T
"""
cls._initialize()
deserializer = cls._get_deserializer(cls_target)
if deserializer == cls:
return cls._deserialize_default(cls_target, obj_raw)
else:
return deserializer.deserialize(cls_target, obj_raw) |
def construct_exc_class(cls):
"""Constructs proxy class for the exception."""
class ProxyException(cls, BaseException):
__pep3134__ = True
@property
def __traceback__(self):
if self.__fixed_traceback__:
return self.__fixed_traceback__
current_exc, current_tb = sys.exc_info()[1:]
if current_exc is self:
return current_tb
def __init__(self, instance=None): # pylint: disable=W0231
self.__original_exception__ = instance
self.__fixed_traceback__ = None
def __getattr__(self, item):
return getattr(self.__original_exception__, item)
def __repr__(self):
return repr(self.__original_exception__)
def __str__(self):
return str(self.__original_exception__)
def with_traceback(self, traceback):
instance = copy.copy(self)
instance.__fixed_traceback__ = traceback
return instance
ProxyException.__name__ = cls.__name__
return ProxyException | Constructs proxy class for the exception. | Below is the the instruction that describes the task:
### Input:
Constructs proxy class for the exception.
### Response:
def construct_exc_class(cls):
"""Constructs proxy class for the exception."""
class ProxyException(cls, BaseException):
__pep3134__ = True
@property
def __traceback__(self):
if self.__fixed_traceback__:
return self.__fixed_traceback__
current_exc, current_tb = sys.exc_info()[1:]
if current_exc is self:
return current_tb
def __init__(self, instance=None): # pylint: disable=W0231
self.__original_exception__ = instance
self.__fixed_traceback__ = None
def __getattr__(self, item):
return getattr(self.__original_exception__, item)
def __repr__(self):
return repr(self.__original_exception__)
def __str__(self):
return str(self.__original_exception__)
def with_traceback(self, traceback):
instance = copy.copy(self)
instance.__fixed_traceback__ = traceback
return instance
ProxyException.__name__ = cls.__name__
return ProxyException |
def ColorfullyWrite(log: str, consoleColor: int = -1, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:
"""
log: str.
consoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.
writeToFile: bool.
printToStdout: bool.
logFile: str, log file path.
ColorfullyWrite('Hello <Color=Green>Green</Color> !!!'), color name must be in Logger.ColorNames.
"""
text = []
start = 0
while True:
index1 = log.find('<Color=', start)
if index1 >= 0:
if index1 > start:
text.append((log[start:index1], consoleColor))
index2 = log.find('>', index1)
colorName = log[index1+7:index2]
index3 = log.find('</Color>', index2 + 1)
text.append((log[index2 + 1:index3], Logger.ColorNames[colorName]))
start = index3 + 8
else:
if start < len(log):
text.append((log[start:], consoleColor))
break
for t, c in text:
Logger.Write(t, c, writeToFile, printToStdout, logFile) | log: str.
consoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.
writeToFile: bool.
printToStdout: bool.
logFile: str, log file path.
ColorfullyWrite('Hello <Color=Green>Green</Color> !!!'), color name must be in Logger.ColorNames. | Below is the the instruction that describes the task:
### Input:
log: str.
consoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.
writeToFile: bool.
printToStdout: bool.
logFile: str, log file path.
ColorfullyWrite('Hello <Color=Green>Green</Color> !!!'), color name must be in Logger.ColorNames.
### Response:
def ColorfullyWrite(log: str, consoleColor: int = -1, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:
"""
log: str.
consoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.
writeToFile: bool.
printToStdout: bool.
logFile: str, log file path.
ColorfullyWrite('Hello <Color=Green>Green</Color> !!!'), color name must be in Logger.ColorNames.
"""
text = []
start = 0
while True:
index1 = log.find('<Color=', start)
if index1 >= 0:
if index1 > start:
text.append((log[start:index1], consoleColor))
index2 = log.find('>', index1)
colorName = log[index1+7:index2]
index3 = log.find('</Color>', index2 + 1)
text.append((log[index2 + 1:index3], Logger.ColorNames[colorName]))
start = index3 + 8
else:
if start < len(log):
text.append((log[start:], consoleColor))
break
for t, c in text:
Logger.Write(t, c, writeToFile, printToStdout, logFile) |
def _post(self, url, data={}):
"""Wrapper around request.post() to use the API prefix. Returns a JSON response."""
r = requests.post(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r) | Wrapper around request.post() to use the API prefix. Returns a JSON response. | Below is the the instruction that describes the task:
### Input:
Wrapper around request.post() to use the API prefix. Returns a JSON response.
### Response:
def _post(self, url, data={}):
"""Wrapper around request.post() to use the API prefix. Returns a JSON response."""
r = requests.post(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r) |
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Audio(tag) | Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`. | Below is the the instruction that describes the task:
### Input:
Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
### Response:
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Audio(tag) |
def send_notification(self, user, sender=None, **kwargs):
"""
An intermediary function for sending an notification email informing
a pre-existing, active user that they have been added to a new
organization.
"""
if not user.is_active:
return False
self.email_message(
user, self.notification_subject, self.notification_body, sender, **kwargs
).send()
return True | An intermediary function for sending an notification email informing
a pre-existing, active user that they have been added to a new
organization. | Below is the the instruction that describes the task:
### Input:
An intermediary function for sending an notification email informing
a pre-existing, active user that they have been added to a new
organization.
### Response:
def send_notification(self, user, sender=None, **kwargs):
"""
An intermediary function for sending an notification email informing
a pre-existing, active user that they have been added to a new
organization.
"""
if not user.is_active:
return False
self.email_message(
user, self.notification_subject, self.notification_body, sender, **kwargs
).send()
return True |
def run(ctx, service, args, show_args, daemon, editable, integration):
"""Load and run a specific service."""
home = ctx.obj["HOME"]
service_path = plugin_utils.get_plugin_path(home, SERVICES, service, editable)
service_log_path = os.path.join(service_path, LOGS_DIR)
logger.debug("running command %s (%s)", ctx.command.name, ctx.params,
extra={"command": ctx.command.name, "params": ctx.params})
logger.debug("loading {} ({})".format(service, service_path))
service = register_service(service_path)
if show_args:
return plugin_utils.print_plugin_args(service_path)
# get our service class instance
service_module = get_service_module(service_path)
service_args = plugin_utils.parse_plugin_args(args, config_utils.get_config_parameters(service_path))
service_obj = service_module.service_class(alert_types=service.alert_types, service_args=service_args)
if not os.path.exists(service_log_path):
os.mkdir(service_log_path)
# prepare runner
if daemon:
runner = myRunner(service_obj,
pidfile=service_path + ".pid",
stdout=open(os.path.join(service_log_path, STDOUTLOG), "ab"),
stderr=open(os.path.join(service_log_path, STDERRLOG), "ab"))
files_preserve = []
for handler in logging.getLogger().handlers:
if hasattr(handler, "stream"):
if hasattr(handler.stream, "fileno"):
files_preserve.append(handler.stream.fileno())
if hasattr(handler, "socket"):
files_preserve.append(handler.socket.fileno())
runner.daemon_context.files_preserve = files_preserve
runner.daemon_context.signal_map.update({
signal.SIGTERM: service_obj._on_server_shutdown,
signal.SIGINT: service_obj._on_server_shutdown,
})
logger.debug("daemon_context", extra={"daemon_context": vars(runner.daemon_context)})
for integration_name in integration:
integration_path = plugin_utils.get_plugin_path(home, INTEGRATIONS, integration_name, editable)
configure_integration(integration_path)
click.secho("[+] Launching {} {}".format(service.name, "in daemon mode" if daemon else ""))
try:
# save service_args for external reference (see test)
with open(os.path.join(service_path, ARGS_JSON), "w") as f:
f.write(json.dumps(service_args))
runner._start() if daemon else service_obj.run()
except KeyboardInterrupt:
service_obj._on_server_shutdown()
click.secho("[*] {} has stopped".format(service.name)) | Load and run a specific service. | Below is the the instruction that describes the task:
### Input:
Load and run a specific service.
### Response:
def run(ctx, service, args, show_args, daemon, editable, integration):
"""Load and run a specific service."""
home = ctx.obj["HOME"]
service_path = plugin_utils.get_plugin_path(home, SERVICES, service, editable)
service_log_path = os.path.join(service_path, LOGS_DIR)
logger.debug("running command %s (%s)", ctx.command.name, ctx.params,
extra={"command": ctx.command.name, "params": ctx.params})
logger.debug("loading {} ({})".format(service, service_path))
service = register_service(service_path)
if show_args:
return plugin_utils.print_plugin_args(service_path)
# get our service class instance
service_module = get_service_module(service_path)
service_args = plugin_utils.parse_plugin_args(args, config_utils.get_config_parameters(service_path))
service_obj = service_module.service_class(alert_types=service.alert_types, service_args=service_args)
if not os.path.exists(service_log_path):
os.mkdir(service_log_path)
# prepare runner
if daemon:
runner = myRunner(service_obj,
pidfile=service_path + ".pid",
stdout=open(os.path.join(service_log_path, STDOUTLOG), "ab"),
stderr=open(os.path.join(service_log_path, STDERRLOG), "ab"))
files_preserve = []
for handler in logging.getLogger().handlers:
if hasattr(handler, "stream"):
if hasattr(handler.stream, "fileno"):
files_preserve.append(handler.stream.fileno())
if hasattr(handler, "socket"):
files_preserve.append(handler.socket.fileno())
runner.daemon_context.files_preserve = files_preserve
runner.daemon_context.signal_map.update({
signal.SIGTERM: service_obj._on_server_shutdown,
signal.SIGINT: service_obj._on_server_shutdown,
})
logger.debug("daemon_context", extra={"daemon_context": vars(runner.daemon_context)})
for integration_name in integration:
integration_path = plugin_utils.get_plugin_path(home, INTEGRATIONS, integration_name, editable)
configure_integration(integration_path)
click.secho("[+] Launching {} {}".format(service.name, "in daemon mode" if daemon else ""))
try:
# save service_args for external reference (see test)
with open(os.path.join(service_path, ARGS_JSON), "w") as f:
f.write(json.dumps(service_args))
runner._start() if daemon else service_obj.run()
except KeyboardInterrupt:
service_obj._on_server_shutdown()
click.secho("[*] {} has stopped".format(service.name)) |
def nacm_rule_list_rule_rule_type_notification_notification_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm")
rule_list = ET.SubElement(nacm, "rule-list")
name_key = ET.SubElement(rule_list, "name")
name_key.text = kwargs.pop('name')
rule = ET.SubElement(rule_list, "rule")
name_key = ET.SubElement(rule, "name")
name_key.text = kwargs.pop('name')
rule_type = ET.SubElement(rule, "rule-type")
notification = ET.SubElement(rule_type, "notification")
notification_name = ET.SubElement(notification, "notification-name")
notification_name.text = kwargs.pop('notification_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def nacm_rule_list_rule_rule_type_notification_notification_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm")
rule_list = ET.SubElement(nacm, "rule-list")
name_key = ET.SubElement(rule_list, "name")
name_key.text = kwargs.pop('name')
rule = ET.SubElement(rule_list, "rule")
name_key = ET.SubElement(rule, "name")
name_key.text = kwargs.pop('name')
rule_type = ET.SubElement(rule, "rule-type")
notification = ET.SubElement(rule_type, "notification")
notification_name = ET.SubElement(notification, "notification-name")
notification_name.text = kwargs.pop('notification_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def RV_timeseries(self,ts,recalc=False):
"""
Radial Velocity time series for star 1 at given times ts.
:param ts:
Times. If not ``Quantity``, assumed to be in days.
:type ts:
array-like or ``Quantity``
:param recalc: (optional)
If ``False``, then if called with the exact same ``ts``
as last call, it will return cached calculation.
"""
if type(ts) != Quantity:
ts *= u.day
if not recalc and hasattr(self,'RV_measurements'):
if (ts == self.ts).all():
return self._RV_measurements
else:
pass
RVs = Quantity(np.zeros((len(ts),self.N)),unit='km/s')
for i,t in enumerate(ts):
RVs[i,:] = self.dRV(t,com=True)
self._RV_measurements = RVs
self.ts = ts
return RVs | Radial Velocity time series for star 1 at given times ts.
:param ts:
Times. If not ``Quantity``, assumed to be in days.
:type ts:
array-like or ``Quantity``
:param recalc: (optional)
If ``False``, then if called with the exact same ``ts``
as last call, it will return cached calculation. | Below is the the instruction that describes the task:
### Input:
Radial Velocity time series for star 1 at given times ts.
:param ts:
Times. If not ``Quantity``, assumed to be in days.
:type ts:
array-like or ``Quantity``
:param recalc: (optional)
If ``False``, then if called with the exact same ``ts``
as last call, it will return cached calculation.
### Response:
def RV_timeseries(self,ts,recalc=False):
"""
Radial Velocity time series for star 1 at given times ts.
:param ts:
Times. If not ``Quantity``, assumed to be in days.
:type ts:
array-like or ``Quantity``
:param recalc: (optional)
If ``False``, then if called with the exact same ``ts``
as last call, it will return cached calculation.
"""
if type(ts) != Quantity:
ts *= u.day
if not recalc and hasattr(self,'RV_measurements'):
if (ts == self.ts).all():
return self._RV_measurements
else:
pass
RVs = Quantity(np.zeros((len(ts),self.N)),unit='km/s')
for i,t in enumerate(ts):
RVs[i,:] = self.dRV(t,com=True)
self._RV_measurements = RVs
self.ts = ts
return RVs |
def blurring_grid_from_mask_and_psf_shape(cls, mask, psf_shape):
"""Setup a blurring-grid from a mask, where a blurring grid consists of all pixels that are masked, but they \
are close enough to the unmasked pixels that a fraction of their light will be blurred into those pixels \
via PSF convolution. For example, if our mask is as follows:
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x| This is an ccd.Mask, where:
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x| x = True (Pixel is masked and excluded from lens)
|x|x|x|o|o|o|x|x|x|x| o = False (Pixel is not masked and included in lens)
|x|x|x|o|o|o|x|x|x|x|
|x|x|x|o|o|o|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
For a PSF of shape (3,3), the following blurring mask is computed (noting that only pixels that are direct \
neighbors of the unmasked pixels above will blur light into an unmasked pixel):
|x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where:
|x|x|x|x|x|x|x|x|x|
|x|x|o|o|o|o|o|x|x| x = True (Pixel is masked and excluded from lens)
|x|x|o|x|x|x|o|x|x| o = False (Pixel is not masked and included in lens)
|x|x|o|x|x|x|o|x|x|
|x|x|o|x|x|x|o|x|x|
|x|x|o|o|o|o|o|x|x|
|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|
Thus, the blurring grid coordinates and indexes will be as follows:
pixel_scale = 1.0"
<--- -ve x +ve -->
y x
|x|x|x |x |x |x |x |x|x| | blurring_grid[0] = [2.0, -2.0] blurring_grid[9] = [-1.0, -2.0]
|x|x|x |x |x |x |x |x|x| | blurring_grid[1] = [2.0, -1.0] blurring_grid[10] = [-1.0, 2.0]
|x|x|0 |1 |2 |3 |4 |x|x| +ve blurring_grid[2] = [2.0, 0.0] blurring_grid[11] = [-2.0, -2.0]
|x|x|5 |x |x |x |6 |x|x| y blurring_grid[3] = [2.0, 1.0] blurring_grid[12] = [-2.0, -1.0]
|x|x|7 |x |x |x |8 |x|x| -ve blurring_grid[4] = [2.0, 2.0] blurring_grid[13] = [-2.0, 0.0]
|x|x|9 |x |x |x |10|x|x| | blurring_grid[5] = [1.0, -2.0] blurring_grid[14] = [-2.0, 1.0]
|x|x|11|12|13|14|15|x|x| | blurring_grid[6] = [1.0, 2.0] blurring_grid[15] = [-2.0, 2.0]
|x|x|x |x |x |x |x |x|x| \/ blurring_grid[7] = [0.0, -2.0]
|x|x|x |x |x |x |x |x|x| blurring_grid[8] = [0.0, 2.0]
For a PSF of shape (5,5), the following blurring mask is computed (noting that pixels that are 2 pixels from an
direct unmasked pixels now blur light into an unmasked pixel):
|x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where:
|x|o|o|o|o|o|o|o|x|
|x|o|o|o|o|o|o|o|x| x = True (Pixel is masked and excluded from lens)
|x|o|o|x|x|x|o|o|x| o = False (Pixel is not masked and included in lens)
|x|o|o|x|x|x|o|o|x|
|x|o|o|x|x|x|o|o|x|
|x|o|o|o|o|o|o|o|x|
|x|o|o|o|o|o|o|o|x|
|x|x|x|x|x|x|x|x|x|
"""
blurring_mask = mask.blurring_mask_for_psf_shape(psf_shape)
return RegularGrid.from_mask(blurring_mask) | Setup a blurring-grid from a mask, where a blurring grid consists of all pixels that are masked, but they \
are close enough to the unmasked pixels that a fraction of their light will be blurred into those pixels \
via PSF convolution. For example, if our mask is as follows:
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x| This is an ccd.Mask, where:
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x| x = True (Pixel is masked and excluded from lens)
|x|x|x|o|o|o|x|x|x|x| o = False (Pixel is not masked and included in lens)
|x|x|x|o|o|o|x|x|x|x|
|x|x|x|o|o|o|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
For a PSF of shape (3,3), the following blurring mask is computed (noting that only pixels that are direct \
neighbors of the unmasked pixels above will blur light into an unmasked pixel):
|x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where:
|x|x|x|x|x|x|x|x|x|
|x|x|o|o|o|o|o|x|x| x = True (Pixel is masked and excluded from lens)
|x|x|o|x|x|x|o|x|x| o = False (Pixel is not masked and included in lens)
|x|x|o|x|x|x|o|x|x|
|x|x|o|x|x|x|o|x|x|
|x|x|o|o|o|o|o|x|x|
|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|
Thus, the blurring grid coordinates and indexes will be as follows:
pixel_scale = 1.0"
<--- -ve x +ve -->
y x
|x|x|x |x |x |x |x |x|x| | blurring_grid[0] = [2.0, -2.0] blurring_grid[9] = [-1.0, -2.0]
|x|x|x |x |x |x |x |x|x| | blurring_grid[1] = [2.0, -1.0] blurring_grid[10] = [-1.0, 2.0]
|x|x|0 |1 |2 |3 |4 |x|x| +ve blurring_grid[2] = [2.0, 0.0] blurring_grid[11] = [-2.0, -2.0]
|x|x|5 |x |x |x |6 |x|x| y blurring_grid[3] = [2.0, 1.0] blurring_grid[12] = [-2.0, -1.0]
|x|x|7 |x |x |x |8 |x|x| -ve blurring_grid[4] = [2.0, 2.0] blurring_grid[13] = [-2.0, 0.0]
|x|x|9 |x |x |x |10|x|x| | blurring_grid[5] = [1.0, -2.0] blurring_grid[14] = [-2.0, 1.0]
|x|x|11|12|13|14|15|x|x| | blurring_grid[6] = [1.0, 2.0] blurring_grid[15] = [-2.0, 2.0]
|x|x|x |x |x |x |x |x|x| \/ blurring_grid[7] = [0.0, -2.0]
|x|x|x |x |x |x |x |x|x| blurring_grid[8] = [0.0, 2.0]
For a PSF of shape (5,5), the following blurring mask is computed (noting that pixels that are 2 pixels from an
direct unmasked pixels now blur light into an unmasked pixel):
|x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where:
|x|o|o|o|o|o|o|o|x|
|x|o|o|o|o|o|o|o|x| x = True (Pixel is masked and excluded from lens)
|x|o|o|x|x|x|o|o|x| o = False (Pixel is not masked and included in lens)
|x|o|o|x|x|x|o|o|x|
|x|o|o|x|x|x|o|o|x|
|x|o|o|o|o|o|o|o|x|
|x|o|o|o|o|o|o|o|x|
|x|x|x|x|x|x|x|x|x| | Below is the the instruction that describes the task:
### Input:
Setup a blurring-grid from a mask, where a blurring grid consists of all pixels that are masked, but they \
are close enough to the unmasked pixels that a fraction of their light will be blurred into those pixels \
via PSF convolution. For example, if our mask is as follows:
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x| This is an ccd.Mask, where:
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x| x = True (Pixel is masked and excluded from lens)
|x|x|x|o|o|o|x|x|x|x| o = False (Pixel is not masked and included in lens)
|x|x|x|o|o|o|x|x|x|x|
|x|x|x|o|o|o|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
For a PSF of shape (3,3), the following blurring mask is computed (noting that only pixels that are direct \
neighbors of the unmasked pixels above will blur light into an unmasked pixel):
|x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where:
|x|x|x|x|x|x|x|x|x|
|x|x|o|o|o|o|o|x|x| x = True (Pixel is masked and excluded from lens)
|x|x|o|x|x|x|o|x|x| o = False (Pixel is not masked and included in lens)
|x|x|o|x|x|x|o|x|x|
|x|x|o|x|x|x|o|x|x|
|x|x|o|o|o|o|o|x|x|
|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|
Thus, the blurring grid coordinates and indexes will be as follows:
pixel_scale = 1.0"
<--- -ve x +ve -->
y x
|x|x|x |x |x |x |x |x|x| | blurring_grid[0] = [2.0, -2.0] blurring_grid[9] = [-1.0, -2.0]
|x|x|x |x |x |x |x |x|x| | blurring_grid[1] = [2.0, -1.0] blurring_grid[10] = [-1.0, 2.0]
|x|x|0 |1 |2 |3 |4 |x|x| +ve blurring_grid[2] = [2.0, 0.0] blurring_grid[11] = [-2.0, -2.0]
|x|x|5 |x |x |x |6 |x|x| y blurring_grid[3] = [2.0, 1.0] blurring_grid[12] = [-2.0, -1.0]
|x|x|7 |x |x |x |8 |x|x| -ve blurring_grid[4] = [2.0, 2.0] blurring_grid[13] = [-2.0, 0.0]
|x|x|9 |x |x |x |10|x|x| | blurring_grid[5] = [1.0, -2.0] blurring_grid[14] = [-2.0, 1.0]
|x|x|11|12|13|14|15|x|x| | blurring_grid[6] = [1.0, 2.0] blurring_grid[15] = [-2.0, 2.0]
|x|x|x |x |x |x |x |x|x| \/ blurring_grid[7] = [0.0, -2.0]
|x|x|x |x |x |x |x |x|x| blurring_grid[8] = [0.0, 2.0]
For a PSF of shape (5,5), the following blurring mask is computed (noting that pixels that are 2 pixels from an
direct unmasked pixels now blur light into an unmasked pixel):
|x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where:
|x|o|o|o|o|o|o|o|x|
|x|o|o|o|o|o|o|o|x| x = True (Pixel is masked and excluded from lens)
|x|o|o|x|x|x|o|o|x| o = False (Pixel is not masked and included in lens)
|x|o|o|x|x|x|o|o|x|
|x|o|o|x|x|x|o|o|x|
|x|o|o|o|o|o|o|o|x|
|x|o|o|o|o|o|o|o|x|
|x|x|x|x|x|x|x|x|x|
### Response:
def blurring_grid_from_mask_and_psf_shape(cls, mask, psf_shape):
"""Setup a blurring-grid from a mask, where a blurring grid consists of all pixels that are masked, but they \
are close enough to the unmasked pixels that a fraction of their light will be blurred into those pixels \
via PSF convolution. For example, if our mask is as follows:
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x| This is an ccd.Mask, where:
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x| x = True (Pixel is masked and excluded from lens)
|x|x|x|o|o|o|x|x|x|x| o = False (Pixel is not masked and included in lens)
|x|x|x|o|o|o|x|x|x|x|
|x|x|x|o|o|o|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|x|
For a PSF of shape (3,3), the following blurring mask is computed (noting that only pixels that are direct \
neighbors of the unmasked pixels above will blur light into an unmasked pixel):
|x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where:
|x|x|x|x|x|x|x|x|x|
|x|x|o|o|o|o|o|x|x| x = True (Pixel is masked and excluded from lens)
|x|x|o|x|x|x|o|x|x| o = False (Pixel is not masked and included in lens)
|x|x|o|x|x|x|o|x|x|
|x|x|o|x|x|x|o|x|x|
|x|x|o|o|o|o|o|x|x|
|x|x|x|x|x|x|x|x|x|
|x|x|x|x|x|x|x|x|x|
Thus, the blurring grid coordinates and indexes will be as follows:
pixel_scale = 1.0"
<--- -ve x +ve -->
y x
|x|x|x |x |x |x |x |x|x| | blurring_grid[0] = [2.0, -2.0] blurring_grid[9] = [-1.0, -2.0]
|x|x|x |x |x |x |x |x|x| | blurring_grid[1] = [2.0, -1.0] blurring_grid[10] = [-1.0, 2.0]
|x|x|0 |1 |2 |3 |4 |x|x| +ve blurring_grid[2] = [2.0, 0.0] blurring_grid[11] = [-2.0, -2.0]
|x|x|5 |x |x |x |6 |x|x| y blurring_grid[3] = [2.0, 1.0] blurring_grid[12] = [-2.0, -1.0]
|x|x|7 |x |x |x |8 |x|x| -ve blurring_grid[4] = [2.0, 2.0] blurring_grid[13] = [-2.0, 0.0]
|x|x|9 |x |x |x |10|x|x| | blurring_grid[5] = [1.0, -2.0] blurring_grid[14] = [-2.0, 1.0]
|x|x|11|12|13|14|15|x|x| | blurring_grid[6] = [1.0, 2.0] blurring_grid[15] = [-2.0, 2.0]
|x|x|x |x |x |x |x |x|x| \/ blurring_grid[7] = [0.0, -2.0]
|x|x|x |x |x |x |x |x|x| blurring_grid[8] = [0.0, 2.0]
For a PSF of shape (5,5), the following blurring mask is computed (noting that pixels that are 2 pixels from an
direct unmasked pixels now blur light into an unmasked pixel):
|x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where:
|x|o|o|o|o|o|o|o|x|
|x|o|o|o|o|o|o|o|x| x = True (Pixel is masked and excluded from lens)
|x|o|o|x|x|x|o|o|x| o = False (Pixel is not masked and included in lens)
|x|o|o|x|x|x|o|o|x|
|x|o|o|x|x|x|o|o|x|
|x|o|o|o|o|o|o|o|x|
|x|o|o|o|o|o|o|o|x|
|x|x|x|x|x|x|x|x|x|
"""
blurring_mask = mask.blurring_mask_for_psf_shape(psf_shape)
return RegularGrid.from_mask(blurring_mask) |
def trial_end(self, trial_job_id, success):
"""trial_end
Parameters
----------
trial_job_id: int
trial job id
success: bool
True if succssfully finish the experiment, False otherwise
"""
if trial_job_id in self.running_history:
if success:
cnt = 0
history_sum = 0
self.completed_avg_history[trial_job_id] = []
for each in self.running_history[trial_job_id]:
cnt += 1
history_sum += each
self.completed_avg_history[trial_job_id].append(history_sum / cnt)
self.running_history.pop(trial_job_id)
else:
logger.warning('trial_end: trial_job_id does not in running_history') | trial_end
Parameters
----------
trial_job_id: int
trial job id
success: bool
True if succssfully finish the experiment, False otherwise | Below is the the instruction that describes the task:
### Input:
trial_end
Parameters
----------
trial_job_id: int
trial job id
success: bool
True if succssfully finish the experiment, False otherwise
### Response:
def trial_end(self, trial_job_id, success):
"""trial_end
Parameters
----------
trial_job_id: int
trial job id
success: bool
True if succssfully finish the experiment, False otherwise
"""
if trial_job_id in self.running_history:
if success:
cnt = 0
history_sum = 0
self.completed_avg_history[trial_job_id] = []
for each in self.running_history[trial_job_id]:
cnt += 1
history_sum += each
self.completed_avg_history[trial_job_id].append(history_sum / cnt)
self.running_history.pop(trial_job_id)
else:
logger.warning('trial_end: trial_job_id does not in running_history') |
def load(self):
"""Load from a file and return an x509 object"""
private = self.is_private()
with open_tls_file(self.file_path, 'r', private=private) as fh:
if private:
self.x509 = crypto.load_privatekey(self.encoding, fh.read())
else:
self.x509 = crypto.load_certificate(self.encoding, fh.read())
return self.x509 | Load from a file and return an x509 object | Below is the the instruction that describes the task:
### Input:
Load from a file and return an x509 object
### Response:
def load(self):
"""Load from a file and return an x509 object"""
private = self.is_private()
with open_tls_file(self.file_path, 'r', private=private) as fh:
if private:
self.x509 = crypto.load_privatekey(self.encoding, fh.read())
else:
self.x509 = crypto.load_certificate(self.encoding, fh.read())
return self.x509 |
def update_editor(self):
"""
updates the logger and plot on the interpretation editor window
"""
self.fit_list = []
self.search_choices = []
for specimen in self.specimens_list:
if specimen not in self.parent.pmag_results_data['specimens']: continue
self.fit_list += [(fit,specimen) for fit in self.parent.pmag_results_data['specimens'][specimen]]
self.logger.DeleteAllItems()
offset = 0
for i in range(len(self.fit_list)):
i -= offset
v = self.update_logger_entry(i)
if v == "s": offset += 1 | updates the logger and plot on the interpretation editor window | Below is the the instruction that describes the task:
### Input:
updates the logger and plot on the interpretation editor window
### Response:
def update_editor(self):
"""
updates the logger and plot on the interpretation editor window
"""
self.fit_list = []
self.search_choices = []
for specimen in self.specimens_list:
if specimen not in self.parent.pmag_results_data['specimens']: continue
self.fit_list += [(fit,specimen) for fit in self.parent.pmag_results_data['specimens'][specimen]]
self.logger.DeleteAllItems()
offset = 0
for i in range(len(self.fit_list)):
i -= offset
v = self.update_logger_entry(i)
if v == "s": offset += 1 |
def getWorkingCollisionBoundsInfo(self):
"""
Returns the number of Quads if the buffer points to null. Otherwise it returns Quads
into the buffer up to the max specified from the working copy.
"""
fn = self.function_table.getWorkingCollisionBoundsInfo
pQuadsBuffer = HmdQuad_t()
punQuadsCount = c_uint32()
result = fn(byref(pQuadsBuffer), byref(punQuadsCount))
return result, pQuadsBuffer, punQuadsCount.value | Returns the number of Quads if the buffer points to null. Otherwise it returns Quads
into the buffer up to the max specified from the working copy. | Below is the the instruction that describes the task:
### Input:
Returns the number of Quads if the buffer points to null. Otherwise it returns Quads
into the buffer up to the max specified from the working copy.
### Response:
def getWorkingCollisionBoundsInfo(self):
"""
Returns the number of Quads if the buffer points to null. Otherwise it returns Quads
into the buffer up to the max specified from the working copy.
"""
fn = self.function_table.getWorkingCollisionBoundsInfo
pQuadsBuffer = HmdQuad_t()
punQuadsCount = c_uint32()
result = fn(byref(pQuadsBuffer), byref(punQuadsCount))
return result, pQuadsBuffer, punQuadsCount.value |
def action(self):
"""
This class overrides this method
"""
commandline = "{0} {1}".format(self.command, " ".join(self.arguments))
try:
completed_process = subprocess.run(commandline, shell=True)
self.exit_status = completed_process.returncode
except AttributeError:
self.exit_status = subprocess.call(commandline, shell=True) | This class overrides this method | Below is the the instruction that describes the task:
### Input:
This class overrides this method
### Response:
def action(self):
"""
This class overrides this method
"""
commandline = "{0} {1}".format(self.command, " ".join(self.arguments))
try:
completed_process = subprocess.run(commandline, shell=True)
self.exit_status = completed_process.returncode
except AttributeError:
self.exit_status = subprocess.call(commandline, shell=True) |
def confirmations(self, txn_or_pmt):
"""
Returns the number of confirmations for given
:class:`Transaction <monero.transaction.Transaction>` or
:class:`Payment <monero.transaction.Payment>` object.
:rtype: int
"""
if isinstance(txn_or_pmt, Payment):
txn = txn_or_pmt.transaction
else:
txn = txn_or_pmt
try:
return max(0, self.height() - txn.height)
except TypeError:
return 0 | Returns the number of confirmations for given
:class:`Transaction <monero.transaction.Transaction>` or
:class:`Payment <monero.transaction.Payment>` object.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Returns the number of confirmations for given
:class:`Transaction <monero.transaction.Transaction>` or
:class:`Payment <monero.transaction.Payment>` object.
:rtype: int
### Response:
def confirmations(self, txn_or_pmt):
"""
Returns the number of confirmations for given
:class:`Transaction <monero.transaction.Transaction>` or
:class:`Payment <monero.transaction.Payment>` object.
:rtype: int
"""
if isinstance(txn_or_pmt, Payment):
txn = txn_or_pmt.transaction
else:
txn = txn_or_pmt
try:
return max(0, self.height() - txn.height)
except TypeError:
return 0 |
def unstage_signature(vcs, signature):
"""Remove `signature` from the list of staged signatures
Args:
vcs (easyci.vcs.base.Vcs)
signature (basestring)
Raises:
NotStagedError
"""
evidence_path = _get_staged_history_path(vcs)
staged = get_staged_signatures(vcs)
if signature not in staged:
raise NotStagedError
staged.remove(signature)
string = '\n'.join(staged)
with open(evidence_path, 'w') as f:
f.write(string) | Remove `signature` from the list of staged signatures
Args:
vcs (easyci.vcs.base.Vcs)
signature (basestring)
Raises:
NotStagedError | Below is the the instruction that describes the task:
### Input:
Remove `signature` from the list of staged signatures
Args:
vcs (easyci.vcs.base.Vcs)
signature (basestring)
Raises:
NotStagedError
### Response:
def unstage_signature(vcs, signature):
"""Remove `signature` from the list of staged signatures
Args:
vcs (easyci.vcs.base.Vcs)
signature (basestring)
Raises:
NotStagedError
"""
evidence_path = _get_staged_history_path(vcs)
staged = get_staged_signatures(vcs)
if signature not in staged:
raise NotStagedError
staged.remove(signature)
string = '\n'.join(staged)
with open(evidence_path, 'w') as f:
f.write(string) |
def _precheck(self, curtailment_timeseries, feedin_df, curtailment_key):
"""
Raises an error if the curtailment at any time step exceeds the
total feed-in of all generators curtailment can be distributed among
at that time.
Parameters
-----------
curtailment_timeseries : :pandas:`pandas.Series<series>`
Curtailment time series in kW for the technology (and weather
cell) specified in `curtailment_key`.
feedin_df : :pandas:`pandas.Series<series>`
Feed-in time series in kW for all generators of type (and in
weather cell) specified in `curtailment_key`.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
Technology (and weather cell) curtailment is given for.
"""
if not feedin_df.empty:
feedin_selected_sum = feedin_df.sum(axis=1)
diff = feedin_selected_sum - curtailment_timeseries
# add tolerance (set small negative values to zero)
diff[diff.between(-1, 0)] = 0
if not (diff >= 0).all():
bad_time_steps = [_ for _ in diff.index
if diff[_] < 0]
message = 'Curtailment demand exceeds total feed-in in time ' \
'steps {}.'.format(bad_time_steps)
logging.error(message)
raise ValueError(message)
else:
bad_time_steps = [_ for _ in curtailment_timeseries.index
if curtailment_timeseries[_] > 0]
if bad_time_steps:
message = 'Curtailment given for time steps {} but there ' \
'are no generators to meet the curtailment target ' \
'for {}.'.format(bad_time_steps, curtailment_key)
logging.error(message)
raise ValueError(message) | Raises an error if the curtailment at any time step exceeds the
total feed-in of all generators curtailment can be distributed among
at that time.
Parameters
-----------
curtailment_timeseries : :pandas:`pandas.Series<series>`
Curtailment time series in kW for the technology (and weather
cell) specified in `curtailment_key`.
feedin_df : :pandas:`pandas.Series<series>`
Feed-in time series in kW for all generators of type (and in
weather cell) specified in `curtailment_key`.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
Technology (and weather cell) curtailment is given for. | Below is the the instruction that describes the task:
### Input:
Raises an error if the curtailment at any time step exceeds the
total feed-in of all generators curtailment can be distributed among
at that time.
Parameters
-----------
curtailment_timeseries : :pandas:`pandas.Series<series>`
Curtailment time series in kW for the technology (and weather
cell) specified in `curtailment_key`.
feedin_df : :pandas:`pandas.Series<series>`
Feed-in time series in kW for all generators of type (and in
weather cell) specified in `curtailment_key`.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
Technology (and weather cell) curtailment is given for.
### Response:
def _precheck(self, curtailment_timeseries, feedin_df, curtailment_key):
"""
Raises an error if the curtailment at any time step exceeds the
total feed-in of all generators curtailment can be distributed among
at that time.
Parameters
-----------
curtailment_timeseries : :pandas:`pandas.Series<series>`
Curtailment time series in kW for the technology (and weather
cell) specified in `curtailment_key`.
feedin_df : :pandas:`pandas.Series<series>`
Feed-in time series in kW for all generators of type (and in
weather cell) specified in `curtailment_key`.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
Technology (and weather cell) curtailment is given for.
"""
if not feedin_df.empty:
feedin_selected_sum = feedin_df.sum(axis=1)
diff = feedin_selected_sum - curtailment_timeseries
# add tolerance (set small negative values to zero)
diff[diff.between(-1, 0)] = 0
if not (diff >= 0).all():
bad_time_steps = [_ for _ in diff.index
if diff[_] < 0]
message = 'Curtailment demand exceeds total feed-in in time ' \
'steps {}.'.format(bad_time_steps)
logging.error(message)
raise ValueError(message)
else:
bad_time_steps = [_ for _ in curtailment_timeseries.index
if curtailment_timeseries[_] > 0]
if bad_time_steps:
message = 'Curtailment given for time steps {} but there ' \
'are no generators to meet the curtailment target ' \
'for {}.'.format(bad_time_steps, curtailment_key)
logging.error(message)
raise ValueError(message) |
def parse(text: str) -> Docstring:
"""
Parse the Google-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = _titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith("\n")
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
# Split by sections determined by titles
matches = list(_titles_re.finditer(meta_chunk))
if not matches:
return ret
splits = []
for j in range(len(matches) - 1):
splits.append((matches[j].end(), matches[j + 1].start()))
splits.append((matches[-1].end(), len(meta_chunk)))
chunks = {}
for j, (start, end) in enumerate(splits):
title = matches[j].group(1)
if title not in _valid:
continue
chunks[title] = meta_chunk[start:end].strip("\n")
if not chunks:
return ret
# Add elements from each chunk
for title, chunk in chunks.items():
# Determine indent
indent_match = re.search(r"^\s+", chunk)
if not indent_match:
raise ParseError(f'Can\'t infer indent from "{chunk}"')
indent = indent_match.group()
# Check for returns/yeilds (only one element)
if _sections[title] in ("returns", "yields"):
part = inspect.cleandoc(chunk)
ret.meta.append(_build_meta(part, title))
continue
# Split based on lines which have exactly that indent
_re = "^" + indent + r"(?=\S)"
c_matches = list(re.finditer(_re, chunk, flags=re.M))
if not c_matches:
raise ParseError(f'No specification for "{title}": "{chunk}"')
c_splits = []
for j in range(len(c_matches) - 1):
c_splits.append((c_matches[j].end(), c_matches[j + 1].start()))
c_splits.append((c_matches[-1].end(), len(chunk)))
for j, (start, end) in enumerate(c_splits):
part = chunk[start:end].strip("\n")
ret.meta.append(_build_meta(part, title))
return ret | Parse the Google-style docstring into its components.
:returns: parsed docstring | Below is the the instruction that describes the task:
### Input:
Parse the Google-style docstring into its components.
:returns: parsed docstring
### Response:
def parse(text: str) -> Docstring:
"""
Parse the Google-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = _titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith("\n")
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
# Split by sections determined by titles
matches = list(_titles_re.finditer(meta_chunk))
if not matches:
return ret
splits = []
for j in range(len(matches) - 1):
splits.append((matches[j].end(), matches[j + 1].start()))
splits.append((matches[-1].end(), len(meta_chunk)))
chunks = {}
for j, (start, end) in enumerate(splits):
title = matches[j].group(1)
if title not in _valid:
continue
chunks[title] = meta_chunk[start:end].strip("\n")
if not chunks:
return ret
# Add elements from each chunk
for title, chunk in chunks.items():
# Determine indent
indent_match = re.search(r"^\s+", chunk)
if not indent_match:
raise ParseError(f'Can\'t infer indent from "{chunk}"')
indent = indent_match.group()
# Check for returns/yeilds (only one element)
if _sections[title] in ("returns", "yields"):
part = inspect.cleandoc(chunk)
ret.meta.append(_build_meta(part, title))
continue
# Split based on lines which have exactly that indent
_re = "^" + indent + r"(?=\S)"
c_matches = list(re.finditer(_re, chunk, flags=re.M))
if not c_matches:
raise ParseError(f'No specification for "{title}": "{chunk}"')
c_splits = []
for j in range(len(c_matches) - 1):
c_splits.append((c_matches[j].end(), c_matches[j + 1].start()))
c_splits.append((c_matches[-1].end(), len(chunk)))
for j, (start, end) in enumerate(c_splits):
part = chunk[start:end].strip("\n")
ret.meta.append(_build_meta(part, title))
return ret |
def create_token(self,
token_name,
project_name,
dataset_name,
is_public):
"""
Creates a token with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
token_name (str): Token name
is_public (int): 1 is public. 0 is not public
Returns:
bool: True if project created, false if not created.
"""
return self.resources.create_token(token_name,
project_name,
dataset_name,
is_public) | Creates a token with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
token_name (str): Token name
is_public (int): 1 is public. 0 is not public
Returns:
bool: True if project created, false if not created. | Below is the the instruction that describes the task:
### Input:
Creates a token with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
token_name (str): Token name
is_public (int): 1 is public. 0 is not public
Returns:
bool: True if project created, false if not created.
### Response:
def create_token(self,
token_name,
project_name,
dataset_name,
is_public):
"""
Creates a token with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
token_name (str): Token name
is_public (int): 1 is public. 0 is not public
Returns:
bool: True if project created, false if not created.
"""
return self.resources.create_token(token_name,
project_name,
dataset_name,
is_public) |
def K(self):
"""Normalizing constant for wishart CDF."""
K1 = np.float_power(pi, 0.5 * self.n_min * self.n_min)
K1 /= (
np.float_power(2, 0.5 * self.n_min * self._n_max)
* self._mgamma(0.5 * self._n_max, self.n_min)
* self._mgamma(0.5 * self.n_min, self.n_min)
)
K2 = np.float_power(
2, self.alpha * self.size + 0.5 * self.size * (self.size + 1)
)
for i in xrange(self.size):
K2 *= gamma(self.alpha + i + 1)
return K1 * K2 | Normalizing constant for wishart CDF. | Below is the the instruction that describes the task:
### Input:
Normalizing constant for wishart CDF.
### Response:
def K(self):
"""Normalizing constant for wishart CDF."""
K1 = np.float_power(pi, 0.5 * self.n_min * self.n_min)
K1 /= (
np.float_power(2, 0.5 * self.n_min * self._n_max)
* self._mgamma(0.5 * self._n_max, self.n_min)
* self._mgamma(0.5 * self.n_min, self.n_min)
)
K2 = np.float_power(
2, self.alpha * self.size + 0.5 * self.size * (self.size + 1)
)
for i in xrange(self.size):
K2 *= gamma(self.alpha + i + 1)
return K1 * K2 |
def to_pfull_from_phalf(arr, pfull_coord):
"""Compute data at full pressure levels from values at half levels."""
phalf_top = arr.isel(**{internal_names.PHALF_STR: slice(1, None)})
phalf_top = replace_coord(phalf_top, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
phalf_bot = arr.isel(**{internal_names.PHALF_STR: slice(None, -1)})
phalf_bot = replace_coord(phalf_bot, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
return 0.5*(phalf_bot + phalf_top) | Compute data at full pressure levels from values at half levels. | Below is the the instruction that describes the task:
### Input:
Compute data at full pressure levels from values at half levels.
### Response:
def to_pfull_from_phalf(arr, pfull_coord):
"""Compute data at full pressure levels from values at half levels."""
phalf_top = arr.isel(**{internal_names.PHALF_STR: slice(1, None)})
phalf_top = replace_coord(phalf_top, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
phalf_bot = arr.isel(**{internal_names.PHALF_STR: slice(None, -1)})
phalf_bot = replace_coord(phalf_bot, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
return 0.5*(phalf_bot + phalf_top) |
def compose_view(bg_svgs, fg_svgs, ref=0, out_file='report.svg'):
"""
Composes the input svgs into one standalone svg and inserts
the CSS code for the flickering animation
"""
import svgutils.transform as svgt
if fg_svgs is None:
fg_svgs = []
# Merge SVGs and get roots
svgs = bg_svgs + fg_svgs
roots = [f.getroot() for f in svgs]
# Query the size of each
sizes = []
for f in svgs:
viewbox = [float(v) for v in f.root.get("viewBox").split(" ")]
width = int(viewbox[2])
height = int(viewbox[3])
sizes.append((width, height))
nsvgs = len(bg_svgs)
sizes = np.array(sizes)
# Calculate the scale to fit all widths
width = sizes[ref, 0]
scales = width / sizes[:, 0]
heights = sizes[:, 1] * scales
# Compose the views panel: total size is the width of
# any element (used the first here) and the sum of heights
fig = svgt.SVGFigure(width, heights[:nsvgs].sum())
yoffset = 0
for i, r in enumerate(roots):
r.moveto(0, yoffset, scale=scales[i])
if i == (nsvgs - 1):
yoffset = 0
else:
yoffset += heights[i]
# Group background and foreground panels in two groups
if fg_svgs:
newroots = [
svgt.GroupElement(roots[:nsvgs], {'class': 'background-svg'}),
svgt.GroupElement(roots[nsvgs:], {'class': 'foreground-svg'})
]
else:
newroots = roots
fig.append(newroots)
fig.root.attrib.pop("width")
fig.root.attrib.pop("height")
fig.root.set("preserveAspectRatio", "xMidYMid meet")
out_file = op.abspath(out_file)
fig.save(out_file)
# Post processing
with open(out_file, 'r' if PY3 else 'rb') as f:
svg = f.read().split('\n')
# Remove <?xml... line
if svg[0].startswith("<?xml"):
svg = svg[1:]
# Add styles for the flicker animation
if fg_svgs:
svg.insert(2, """\
<style type="text/css">
@keyframes flickerAnimation%s { 0%% {opacity: 1;} 100%% { opacity: 0; }}
.foreground-svg { animation: 1s ease-in-out 0s alternate none infinite paused flickerAnimation%s;}
.foreground-svg:hover { animation-play-state: running;}
</style>""" % tuple([uuid4()] * 2))
with open(out_file, 'w' if PY3 else 'wb') as f:
f.write('\n'.join(svg))
return out_file | Composes the input svgs into one standalone svg and inserts
the CSS code for the flickering animation | Below is the the instruction that describes the task:
### Input:
Composes the input svgs into one standalone svg and inserts
the CSS code for the flickering animation
### Response:
def compose_view(bg_svgs, fg_svgs, ref=0, out_file='report.svg'):
"""
Composes the input svgs into one standalone svg and inserts
the CSS code for the flickering animation
"""
import svgutils.transform as svgt
if fg_svgs is None:
fg_svgs = []
# Merge SVGs and get roots
svgs = bg_svgs + fg_svgs
roots = [f.getroot() for f in svgs]
# Query the size of each
sizes = []
for f in svgs:
viewbox = [float(v) for v in f.root.get("viewBox").split(" ")]
width = int(viewbox[2])
height = int(viewbox[3])
sizes.append((width, height))
nsvgs = len(bg_svgs)
sizes = np.array(sizes)
# Calculate the scale to fit all widths
width = sizes[ref, 0]
scales = width / sizes[:, 0]
heights = sizes[:, 1] * scales
# Compose the views panel: total size is the width of
# any element (used the first here) and the sum of heights
fig = svgt.SVGFigure(width, heights[:nsvgs].sum())
yoffset = 0
for i, r in enumerate(roots):
r.moveto(0, yoffset, scale=scales[i])
if i == (nsvgs - 1):
yoffset = 0
else:
yoffset += heights[i]
# Group background and foreground panels in two groups
if fg_svgs:
newroots = [
svgt.GroupElement(roots[:nsvgs], {'class': 'background-svg'}),
svgt.GroupElement(roots[nsvgs:], {'class': 'foreground-svg'})
]
else:
newroots = roots
fig.append(newroots)
fig.root.attrib.pop("width")
fig.root.attrib.pop("height")
fig.root.set("preserveAspectRatio", "xMidYMid meet")
out_file = op.abspath(out_file)
fig.save(out_file)
# Post processing
with open(out_file, 'r' if PY3 else 'rb') as f:
svg = f.read().split('\n')
# Remove <?xml... line
if svg[0].startswith("<?xml"):
svg = svg[1:]
# Add styles for the flicker animation
if fg_svgs:
svg.insert(2, """\
<style type="text/css">
@keyframes flickerAnimation%s { 0%% {opacity: 1;} 100%% { opacity: 0; }}
.foreground-svg { animation: 1s ease-in-out 0s alternate none infinite paused flickerAnimation%s;}
.foreground-svg:hover { animation-play-state: running;}
</style>""" % tuple([uuid4()] * 2))
with open(out_file, 'w' if PY3 else 'wb') as f:
f.write('\n'.join(svg))
return out_file |
def api_version(created_ver, last_changed_ver, return_value_ver):
"""Version check decorator. Currently only checks Bigger Than."""
def api_min_version_decorator(function):
def wrapper(function, self, *args, **kwargs):
if not self.version_check_mode == "none":
if self.version_check_mode == "created":
version = created_ver
else:
version = bigger_version(last_changed_ver, return_value_ver)
major, minor, patch = parse_version_string(version)
if major > self.mastodon_major:
raise MastodonVersionError("Version check failed (Need version " + version + ")")
elif major == self.mastodon_major and minor > self.mastodon_minor:
print(self.mastodon_minor)
raise MastodonVersionError("Version check failed (Need version " + version + ")")
elif major == self.mastodon_major and minor == self.mastodon_minor and patch > self.mastodon_patch:
raise MastodonVersionError("Version check failed (Need version " + version + ", patch is " + str(self.mastodon_patch) + ")")
return function(self, *args, **kwargs)
function.__doc__ = function.__doc__ + "\n\n *Added: Mastodon v" + created_ver + ", last changed: Mastodon v" + last_changed_ver + "*"
return decorate(function, wrapper)
return api_min_version_decorator | Version check decorator. Currently only checks Bigger Than. | Below is the the instruction that describes the task:
### Input:
Version check decorator. Currently only checks Bigger Than.
### Response:
def api_version(created_ver, last_changed_ver, return_value_ver):
"""Version check decorator. Currently only checks Bigger Than."""
def api_min_version_decorator(function):
def wrapper(function, self, *args, **kwargs):
if not self.version_check_mode == "none":
if self.version_check_mode == "created":
version = created_ver
else:
version = bigger_version(last_changed_ver, return_value_ver)
major, minor, patch = parse_version_string(version)
if major > self.mastodon_major:
raise MastodonVersionError("Version check failed (Need version " + version + ")")
elif major == self.mastodon_major and minor > self.mastodon_minor:
print(self.mastodon_minor)
raise MastodonVersionError("Version check failed (Need version " + version + ")")
elif major == self.mastodon_major and minor == self.mastodon_minor and patch > self.mastodon_patch:
raise MastodonVersionError("Version check failed (Need version " + version + ", patch is " + str(self.mastodon_patch) + ")")
return function(self, *args, **kwargs)
function.__doc__ = function.__doc__ + "\n\n *Added: Mastodon v" + created_ver + ", last changed: Mastodon v" + last_changed_ver + "*"
return decorate(function, wrapper)
return api_min_version_decorator |
def BARzero(w_F, w_R, DeltaF):
"""A function that when zeroed is equivalent to the solution of
the Bennett acceptance ratio.
from http://journals.aps.org/prl/pdf/10.1103/PhysRevLett.91.140601
D_F = M + w_F - Delta F
D_R = M + w_R - Delta F
we want:
\sum_N_F (1+exp(D_F))^-1 = \sum N_R N_R <(1+exp(-D_R))^-1>
ln \sum N_F (1+exp(D_F))^-1>_F = \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R
ln \sum N_F (1+exp(D_F))^-1>_F - \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R = 0
Parameters
----------
w_F : np.ndarray
w_F[t] is the forward work value from snapshot t.
t = 0...(T_F-1) Length T_F is deduced from vector.
w_R : np.ndarray
w_R[t] is the reverse work value from snapshot t.
t = 0...(T_R-1) Length T_R is deduced from vector.
DeltaF : float
Our current guess
Returns
-------
fzero : float
a variable that is zeroed when DeltaF satisfies BAR.
Examples
--------
Compute free energy difference between two specified samples of work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> DeltaF = BARzero(w_F, w_R, 0.0)
"""
np.seterr(over='raise') # raise exceptions to overflows
w_F = np.array(w_F, np.float64)
w_R = np.array(w_R, np.float64)
DeltaF = float(DeltaF)
# Recommended stable implementation of BAR.
# Determine number of forward and reverse work values provided.
T_F = float(w_F.size) # number of forward work values
T_R = float(w_R.size) # number of reverse work values
# Compute log ratio of forward and reverse counts.
M = np.log(T_F / T_R)
# Compute log numerator. We have to watch out for overflows. We
# do this by making sure that 1+exp(x) doesn't overflow, choosing
# to always exponentiate a negative number.
# log f(W) = - log [1 + exp((M + W - DeltaF))]
# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
# = - maxarg - log(exp[-maxarg] + exp[(M + W - DeltaF) - maxarg])
# where maxarg = max((M + W - DeltaF), 0)
exp_arg_F = (M + w_F - DeltaF)
# use boolean logic to zero out the ones that are less than 0, but not if greater than zero.
max_arg_F = np.choose(np.less(0.0, exp_arg_F), (0.0, exp_arg_F))
try:
log_f_F = - max_arg_F - np.log(np.exp(-max_arg_F) + np.exp(exp_arg_F - max_arg_F))
except:
# give up; if there's overflow, return zero
print("The input data results in overflow in BAR")
return np.nan
log_numer = logsumexp(log_f_F)
# Compute log_denominator.
# log f(R) = - log [1 + exp(-(M + W - DeltaF))]
# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
# = - maxarg - log[exp[-maxarg] + (T_F/T_R) exp[(M + W - DeltaF) - maxarg]]
# where maxarg = max( -(M + W - DeltaF), 0)
exp_arg_R = -(M - w_R - DeltaF)
# use boolean logic to zero out the ones that are less than 0, but not if greater than zero.
max_arg_R = np.choose(np.less(0.0, exp_arg_R), (0.0, exp_arg_R))
try:
log_f_R = - max_arg_R - np.log(np.exp(-max_arg_R) + np.exp(exp_arg_R - max_arg_R))
except:
print("The input data results in overflow in BAR")
return np.nan
log_denom = logsumexp(log_f_R)
# This function must be zeroed to find a root
fzero = log_numer - log_denom
np.seterr(over='warn') # return options to standard settings so we don't disturb other functionality.
return fzero | A function that when zeroed is equivalent to the solution of
the Bennett acceptance ratio.
from http://journals.aps.org/prl/pdf/10.1103/PhysRevLett.91.140601
D_F = M + w_F - Delta F
D_R = M + w_R - Delta F
we want:
\sum_N_F (1+exp(D_F))^-1 = \sum N_R N_R <(1+exp(-D_R))^-1>
ln \sum N_F (1+exp(D_F))^-1>_F = \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R
ln \sum N_F (1+exp(D_F))^-1>_F - \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R = 0
Parameters
----------
w_F : np.ndarray
w_F[t] is the forward work value from snapshot t.
t = 0...(T_F-1) Length T_F is deduced from vector.
w_R : np.ndarray
w_R[t] is the reverse work value from snapshot t.
t = 0...(T_R-1) Length T_R is deduced from vector.
DeltaF : float
Our current guess
Returns
-------
fzero : float
a variable that is zeroed when DeltaF satisfies BAR.
Examples
--------
Compute free energy difference between two specified samples of work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> DeltaF = BARzero(w_F, w_R, 0.0) | Below is the the instruction that describes the task:
### Input:
A function that when zeroed is equivalent to the solution of
the Bennett acceptance ratio.
from http://journals.aps.org/prl/pdf/10.1103/PhysRevLett.91.140601
D_F = M + w_F - Delta F
D_R = M + w_R - Delta F
we want:
\sum_N_F (1+exp(D_F))^-1 = \sum N_R N_R <(1+exp(-D_R))^-1>
ln \sum N_F (1+exp(D_F))^-1>_F = \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R
ln \sum N_F (1+exp(D_F))^-1>_F - \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R = 0
Parameters
----------
w_F : np.ndarray
w_F[t] is the forward work value from snapshot t.
t = 0...(T_F-1) Length T_F is deduced from vector.
w_R : np.ndarray
w_R[t] is the reverse work value from snapshot t.
t = 0...(T_R-1) Length T_R is deduced from vector.
DeltaF : float
Our current guess
Returns
-------
fzero : float
a variable that is zeroed when DeltaF satisfies BAR.
Examples
--------
Compute free energy difference between two specified samples of work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> DeltaF = BARzero(w_F, w_R, 0.0)
### Response:
def BARzero(w_F, w_R, DeltaF):
"""A function that when zeroed is equivalent to the solution of
the Bennett acceptance ratio.
from http://journals.aps.org/prl/pdf/10.1103/PhysRevLett.91.140601
D_F = M + w_F - Delta F
D_R = M + w_R - Delta F
we want:
\sum_N_F (1+exp(D_F))^-1 = \sum N_R N_R <(1+exp(-D_R))^-1>
ln \sum N_F (1+exp(D_F))^-1>_F = \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R
ln \sum N_F (1+exp(D_F))^-1>_F - \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R = 0
Parameters
----------
w_F : np.ndarray
w_F[t] is the forward work value from snapshot t.
t = 0...(T_F-1) Length T_F is deduced from vector.
w_R : np.ndarray
w_R[t] is the reverse work value from snapshot t.
t = 0...(T_R-1) Length T_R is deduced from vector.
DeltaF : float
Our current guess
Returns
-------
fzero : float
a variable that is zeroed when DeltaF satisfies BAR.
Examples
--------
Compute free energy difference between two specified samples of work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> DeltaF = BARzero(w_F, w_R, 0.0)
"""
np.seterr(over='raise') # raise exceptions to overflows
w_F = np.array(w_F, np.float64)
w_R = np.array(w_R, np.float64)
DeltaF = float(DeltaF)
# Recommended stable implementation of BAR.
# Determine number of forward and reverse work values provided.
T_F = float(w_F.size) # number of forward work values
T_R = float(w_R.size) # number of reverse work values
# Compute log ratio of forward and reverse counts.
M = np.log(T_F / T_R)
# Compute log numerator. We have to watch out for overflows. We
# do this by making sure that 1+exp(x) doesn't overflow, choosing
# to always exponentiate a negative number.
# log f(W) = - log [1 + exp((M + W - DeltaF))]
# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
# = - maxarg - log(exp[-maxarg] + exp[(M + W - DeltaF) - maxarg])
# where maxarg = max((M + W - DeltaF), 0)
exp_arg_F = (M + w_F - DeltaF)
# use boolean logic to zero out the ones that are less than 0, but not if greater than zero.
max_arg_F = np.choose(np.less(0.0, exp_arg_F), (0.0, exp_arg_F))
try:
log_f_F = - max_arg_F - np.log(np.exp(-max_arg_F) + np.exp(exp_arg_F - max_arg_F))
except:
# give up; if there's overflow, return zero
print("The input data results in overflow in BAR")
return np.nan
log_numer = logsumexp(log_f_F)
# Compute log_denominator.
# log f(R) = - log [1 + exp(-(M + W - DeltaF))]
# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
# = - maxarg - log[exp[-maxarg] + (T_F/T_R) exp[(M + W - DeltaF) - maxarg]]
# where maxarg = max( -(M + W - DeltaF), 0)
exp_arg_R = -(M - w_R - DeltaF)
# use boolean logic to zero out the ones that are less than 0, but not if greater than zero.
max_arg_R = np.choose(np.less(0.0, exp_arg_R), (0.0, exp_arg_R))
try:
log_f_R = - max_arg_R - np.log(np.exp(-max_arg_R) + np.exp(exp_arg_R - max_arg_R))
except:
print("The input data results in overflow in BAR")
return np.nan
log_denom = logsumexp(log_f_R)
# This function must be zeroed to find a root
fzero = log_numer - log_denom
np.seterr(over='warn') # return options to standard settings so we don't disturb other functionality.
return fzero |
def _inv_cls(cls):
"""The inverse of this bidict type, i.e. one with *_fwdm_cls* and *_invm_cls* swapped."""
if cls._fwdm_cls is cls._invm_cls:
return cls
if not getattr(cls, '_inv_cls_', None):
class _Inv(cls):
_fwdm_cls = cls._invm_cls
_invm_cls = cls._fwdm_cls
_inv_cls_ = cls
_Inv.__name__ = cls.__name__ + 'Inv'
cls._inv_cls_ = _Inv
return cls._inv_cls_ | The inverse of this bidict type, i.e. one with *_fwdm_cls* and *_invm_cls* swapped. | Below is the the instruction that describes the task:
### Input:
The inverse of this bidict type, i.e. one with *_fwdm_cls* and *_invm_cls* swapped.
### Response:
def _inv_cls(cls):
"""The inverse of this bidict type, i.e. one with *_fwdm_cls* and *_invm_cls* swapped."""
if cls._fwdm_cls is cls._invm_cls:
return cls
if not getattr(cls, '_inv_cls_', None):
class _Inv(cls):
_fwdm_cls = cls._invm_cls
_invm_cls = cls._fwdm_cls
_inv_cls_ = cls
_Inv.__name__ = cls.__name__ + 'Inv'
cls._inv_cls_ = _Inv
return cls._inv_cls_ |
def stop(self, timeout: int = 5) -> None:
"""
Try to stop the transaction store in the given timeout or raise an
exception.
"""
self.running = False
start = time.perf_counter()
while True:
if self.getsCounter == 0:
return True
elif time.perf_counter() <= start + timeout:
time.sleep(.1)
else:
raise StopTimeout("Stop timed out waiting for {} gets to "
"complete.".format(self.getsCounter)) | Try to stop the transaction store in the given timeout or raise an
exception. | Below is the the instruction that describes the task:
### Input:
Try to stop the transaction store in the given timeout or raise an
exception.
### Response:
def stop(self, timeout: int = 5) -> None:
"""
Try to stop the transaction store in the given timeout or raise an
exception.
"""
self.running = False
start = time.perf_counter()
while True:
if self.getsCounter == 0:
return True
elif time.perf_counter() <= start + timeout:
time.sleep(.1)
else:
raise StopTimeout("Stop timed out waiting for {} gets to "
"complete.".format(self.getsCounter)) |
def is_nsphere(points):
"""
Check if a list of points is an nsphere.
Parameters
-----------
points : (n, dimension) float
Points in space
Returns
-----------
check : bool
True if input points are on an nsphere
"""
center, radius, error = fit_nsphere(points)
check = error < tol.merge
return check | Check if a list of points is an nsphere.
Parameters
-----------
points : (n, dimension) float
Points in space
Returns
-----------
check : bool
True if input points are on an nsphere | Below is the the instruction that describes the task:
### Input:
Check if a list of points is an nsphere.
Parameters
-----------
points : (n, dimension) float
Points in space
Returns
-----------
check : bool
True if input points are on an nsphere
### Response:
def is_nsphere(points):
"""
Check if a list of points is an nsphere.
Parameters
-----------
points : (n, dimension) float
Points in space
Returns
-----------
check : bool
True if input points are on an nsphere
"""
center, radius, error = fit_nsphere(points)
check = error < tol.merge
return check |
def spawn_program(self, name, arguments=[], timeout=30, exclusive=False):
"""Spawns a program in the working directory.
This method allows the interaction with the running program,
based on the returned RunningProgram object.
Args:
name (str): The name of the program to be executed.
arguments (tuple): Command-line arguments for the program.
timeout (int): The timeout for execution.
exclusive (bool): Prevent parallel validation runs on the
test machines, e.g. when doing performance
measurements for submitted code.
Returns:
RunningProgram: An object representing the running program.
"""
logger.debug("Spawning program for interaction ...")
if exclusive:
kill_longrunning(self.config)
return RunningProgram(self, name, arguments, timeout) | Spawns a program in the working directory.
This method allows the interaction with the running program,
based on the returned RunningProgram object.
Args:
name (str): The name of the program to be executed.
arguments (tuple): Command-line arguments for the program.
timeout (int): The timeout for execution.
exclusive (bool): Prevent parallel validation runs on the
test machines, e.g. when doing performance
measurements for submitted code.
Returns:
RunningProgram: An object representing the running program. | Below is the the instruction that describes the task:
### Input:
Spawns a program in the working directory.
This method allows the interaction with the running program,
based on the returned RunningProgram object.
Args:
name (str): The name of the program to be executed.
arguments (tuple): Command-line arguments for the program.
timeout (int): The timeout for execution.
exclusive (bool): Prevent parallel validation runs on the
test machines, e.g. when doing performance
measurements for submitted code.
Returns:
RunningProgram: An object representing the running program.
### Response:
def spawn_program(self, name, arguments=[], timeout=30, exclusive=False):
"""Spawns a program in the working directory.
This method allows the interaction with the running program,
based on the returned RunningProgram object.
Args:
name (str): The name of the program to be executed.
arguments (tuple): Command-line arguments for the program.
timeout (int): The timeout for execution.
exclusive (bool): Prevent parallel validation runs on the
test machines, e.g. when doing performance
measurements for submitted code.
Returns:
RunningProgram: An object representing the running program.
"""
logger.debug("Spawning program for interaction ...")
if exclusive:
kill_longrunning(self.config)
return RunningProgram(self, name, arguments, timeout) |
def posterior_mode(self, observations, name=None):
"""Compute maximum likelihood sequence of hidden states.
When this function is provided with a sequence of observations
`x[0], ..., x[num_steps - 1]`, it returns the sequence of hidden
states `z[0], ..., z[num_steps - 1]`, drawn from the underlying
Markov chain, that is most likely to yield those observations.
It uses the [Viterbi algorithm](
https://en.wikipedia.org/wiki/Viterbi_algorithm).
Note: the behavior of this function is undefined if the
`observations` argument represents impossible observations
from the model.
Note: if there isn't a unique most likely sequence then one
of the equally most likely sequences is chosen.
Args:
observations: A tensor representing a batch of observations made on the
hidden Markov model. The rightmost dimensions of this tensor correspond
to the dimensions of the observation distributions of the underlying
Markov chain. The next dimension from the right indexes the steps in a
sequence of observations from a single sample from the hidden Markov
model. The size of this dimension should match the `num_steps`
parameter of the hidden Markov model object. The other dimensions are
the dimensions of the batch and these are broadcast with the hidden
Markov model's parameters.
name: Python `str` name prefixed to Ops created by this class.
Default value: "HiddenMarkovModel".
Returns:
posterior_mode: A `Tensor` representing the most likely sequence of hidden
states. The rightmost dimension of this tensor will equal the
`num_steps` parameter providing one hidden state for each step. The
other dimensions are those of the batch.
Raises:
ValueError: if the `observations` tensor does not consist of
sequences of `num_steps` observations.
#### Examples
```python
tfd = tfp.distributions
# A simple weather model.
# Represent a cold day with 0 and a hot day with 1.
# Suppose the first day of a sequence has a 0.8 chance of being cold.
initial_distribution = tfd.Categorical(probs=[0.8, 0.2])
# Suppose a cold day has a 30% chance of being followed by a hot day
# and a hot day has a 20% chance of being followed by a cold day.
transition_distribution = tfd.Categorical(probs=[[0.7, 0.3],
[0.2, 0.8]])
# Suppose additionally that on each day the temperature is
# normally distributed with mean and standard deviation 0 and 5 on
# a cold day and mean and standard deviation 15 and 10 on a hot day.
observation_distribution = tfd.Normal(loc=[0., 15.], scale=[5., 10.])
# This gives the hidden Markov model:
model = tfd.HiddenMarkovModel(
initial_distribution=initial_distribution,
transition_distribution=transition_distribution,
observation_distribution=observation_distribution,
num_steps=7)
# Suppose we observe gradually rising temperatures over a week:
temps = [-2., 0., 2., 4., 6., 8., 10.]
# We can now compute the most probable sequence of hidden states:
model.posterior_mode(temps)
# The result is [0 0 0 0 0 1 1] telling us that the transition
# from "cold" to "hot" most likely happened between the
# 5th and 6th days.
```
"""
with tf.name_scope(name or "posterior_mode"):
with tf.control_dependencies(self._runtime_assertions):
observation_tensor_shape = tf.shape(input=observations)
with self._observation_shape_preconditions(observation_tensor_shape):
observation_batch_shape = observation_tensor_shape[
:-1 - self._underlying_event_rank]
observation_event_shape = observation_tensor_shape[
-1 - self._underlying_event_rank:]
batch_shape = tf.broadcast_dynamic_shape(observation_batch_shape,
self.batch_shape_tensor())
log_init = tf.broadcast_to(self._log_init,
tf.concat([batch_shape,
[self._num_states]],
axis=0))
observations = tf.broadcast_to(observations,
tf.concat([batch_shape,
observation_event_shape],
axis=0))
observation_rank = tf.rank(observations)
underlying_event_rank = self._underlying_event_rank
observations = distribution_util.move_dimension(
observations, observation_rank - underlying_event_rank - 1, 0)
# We need to compute the probability of each observation for
# each possible state.
# This requires inserting an extra index just before the
# observation event indices that will be broadcast with the
# last batch index in `observation_distribution`.
observations = tf.expand_dims(
observations,
observation_rank - underlying_event_rank)
observation_log_probs = self._observation_distribution.log_prob(
observations)
log_prob = log_init + observation_log_probs[0]
if self._num_steps == 1:
most_likely_end = tf.argmax(input=log_prob, axis=-1)
return most_likely_end[..., tf.newaxis]
def forward_step(previous_step_pair, log_prob_observation):
log_prob_previous = previous_step_pair[0]
log_prob = (log_prob_previous[..., tf.newaxis] +
self._log_trans +
log_prob_observation[..., tf.newaxis, :])
most_likely_given_successor = tf.argmax(input=log_prob, axis=-2)
max_log_p_given_successor = tf.reduce_max(input_tensor=log_prob,
axis=-2)
return (max_log_p_given_successor, most_likely_given_successor)
forward_log_probs, all_most_likely_given_successor = tf.scan(
forward_step,
observation_log_probs[1:],
initializer=(log_prob,
tf.zeros(tf.shape(input=log_init), dtype=tf.int64)),
name="forward_log_probs")
most_likely_end = tf.argmax(input=forward_log_probs[-1], axis=-1)
# We require the operation that gives C from A and B where
# C[i...j] = A[i...j, B[i...j]]
# and A = most_likely_given_successor
# B = most_likely_successor.
# tf.gather requires indices of known shape so instead we use
# reduction with tf.one_hot(B) to pick out elements from B
def backward_step(most_likely_successor, most_likely_given_successor):
return tf.reduce_sum(
input_tensor=(most_likely_given_successor *
tf.one_hot(most_likely_successor,
self._num_states,
dtype=tf.int64)),
axis=-1)
backward_scan = tf.scan(
backward_step,
all_most_likely_given_successor,
most_likely_end,
reverse=True)
most_likely_sequences = tf.concat([backward_scan, [most_likely_end]],
axis=0)
return distribution_util.move_dimension(most_likely_sequences, 0, -1) | Compute maximum likelihood sequence of hidden states.
When this function is provided with a sequence of observations
`x[0], ..., x[num_steps - 1]`, it returns the sequence of hidden
states `z[0], ..., z[num_steps - 1]`, drawn from the underlying
Markov chain, that is most likely to yield those observations.
It uses the [Viterbi algorithm](
https://en.wikipedia.org/wiki/Viterbi_algorithm).
Note: the behavior of this function is undefined if the
`observations` argument represents impossible observations
from the model.
Note: if there isn't a unique most likely sequence then one
of the equally most likely sequences is chosen.
Args:
observations: A tensor representing a batch of observations made on the
hidden Markov model. The rightmost dimensions of this tensor correspond
to the dimensions of the observation distributions of the underlying
Markov chain. The next dimension from the right indexes the steps in a
sequence of observations from a single sample from the hidden Markov
model. The size of this dimension should match the `num_steps`
parameter of the hidden Markov model object. The other dimensions are
the dimensions of the batch and these are broadcast with the hidden
Markov model's parameters.
name: Python `str` name prefixed to Ops created by this class.
Default value: "HiddenMarkovModel".
Returns:
posterior_mode: A `Tensor` representing the most likely sequence of hidden
states. The rightmost dimension of this tensor will equal the
`num_steps` parameter providing one hidden state for each step. The
other dimensions are those of the batch.
Raises:
ValueError: if the `observations` tensor does not consist of
sequences of `num_steps` observations.
#### Examples
```python
tfd = tfp.distributions
# A simple weather model.
# Represent a cold day with 0 and a hot day with 1.
# Suppose the first day of a sequence has a 0.8 chance of being cold.
initial_distribution = tfd.Categorical(probs=[0.8, 0.2])
# Suppose a cold day has a 30% chance of being followed by a hot day
# and a hot day has a 20% chance of being followed by a cold day.
transition_distribution = tfd.Categorical(probs=[[0.7, 0.3],
[0.2, 0.8]])
# Suppose additionally that on each day the temperature is
# normally distributed with mean and standard deviation 0 and 5 on
# a cold day and mean and standard deviation 15 and 10 on a hot day.
observation_distribution = tfd.Normal(loc=[0., 15.], scale=[5., 10.])
# This gives the hidden Markov model:
model = tfd.HiddenMarkovModel(
initial_distribution=initial_distribution,
transition_distribution=transition_distribution,
observation_distribution=observation_distribution,
num_steps=7)
# Suppose we observe gradually rising temperatures over a week:
temps = [-2., 0., 2., 4., 6., 8., 10.]
# We can now compute the most probable sequence of hidden states:
model.posterior_mode(temps)
# The result is [0 0 0 0 0 1 1] telling us that the transition
# from "cold" to "hot" most likely happened between the
# 5th and 6th days.
``` | Below is the the instruction that describes the task:
### Input:
Compute maximum likelihood sequence of hidden states.
When this function is provided with a sequence of observations
`x[0], ..., x[num_steps - 1]`, it returns the sequence of hidden
states `z[0], ..., z[num_steps - 1]`, drawn from the underlying
Markov chain, that is most likely to yield those observations.
It uses the [Viterbi algorithm](
https://en.wikipedia.org/wiki/Viterbi_algorithm).
Note: the behavior of this function is undefined if the
`observations` argument represents impossible observations
from the model.
Note: if there isn't a unique most likely sequence then one
of the equally most likely sequences is chosen.
Args:
observations: A tensor representing a batch of observations made on the
hidden Markov model. The rightmost dimensions of this tensor correspond
to the dimensions of the observation distributions of the underlying
Markov chain. The next dimension from the right indexes the steps in a
sequence of observations from a single sample from the hidden Markov
model. The size of this dimension should match the `num_steps`
parameter of the hidden Markov model object. The other dimensions are
the dimensions of the batch and these are broadcast with the hidden
Markov model's parameters.
name: Python `str` name prefixed to Ops created by this class.
Default value: "HiddenMarkovModel".
Returns:
posterior_mode: A `Tensor` representing the most likely sequence of hidden
states. The rightmost dimension of this tensor will equal the
`num_steps` parameter providing one hidden state for each step. The
other dimensions are those of the batch.
Raises:
ValueError: if the `observations` tensor does not consist of
sequences of `num_steps` observations.
#### Examples
```python
tfd = tfp.distributions
# A simple weather model.
# Represent a cold day with 0 and a hot day with 1.
# Suppose the first day of a sequence has a 0.8 chance of being cold.
initial_distribution = tfd.Categorical(probs=[0.8, 0.2])
# Suppose a cold day has a 30% chance of being followed by a hot day
# and a hot day has a 20% chance of being followed by a cold day.
transition_distribution = tfd.Categorical(probs=[[0.7, 0.3],
[0.2, 0.8]])
# Suppose additionally that on each day the temperature is
# normally distributed with mean and standard deviation 0 and 5 on
# a cold day and mean and standard deviation 15 and 10 on a hot day.
observation_distribution = tfd.Normal(loc=[0., 15.], scale=[5., 10.])
# This gives the hidden Markov model:
model = tfd.HiddenMarkovModel(
initial_distribution=initial_distribution,
transition_distribution=transition_distribution,
observation_distribution=observation_distribution,
num_steps=7)
# Suppose we observe gradually rising temperatures over a week:
temps = [-2., 0., 2., 4., 6., 8., 10.]
# We can now compute the most probable sequence of hidden states:
model.posterior_mode(temps)
# The result is [0 0 0 0 0 1 1] telling us that the transition
# from "cold" to "hot" most likely happened between the
# 5th and 6th days.
```
### Response:
def posterior_mode(self, observations, name=None):
"""Compute maximum likelihood sequence of hidden states.
When this function is provided with a sequence of observations
`x[0], ..., x[num_steps - 1]`, it returns the sequence of hidden
states `z[0], ..., z[num_steps - 1]`, drawn from the underlying
Markov chain, that is most likely to yield those observations.
It uses the [Viterbi algorithm](
https://en.wikipedia.org/wiki/Viterbi_algorithm).
Note: the behavior of this function is undefined if the
`observations` argument represents impossible observations
from the model.
Note: if there isn't a unique most likely sequence then one
of the equally most likely sequences is chosen.
Args:
observations: A tensor representing a batch of observations made on the
hidden Markov model. The rightmost dimensions of this tensor correspond
to the dimensions of the observation distributions of the underlying
Markov chain. The next dimension from the right indexes the steps in a
sequence of observations from a single sample from the hidden Markov
model. The size of this dimension should match the `num_steps`
parameter of the hidden Markov model object. The other dimensions are
the dimensions of the batch and these are broadcast with the hidden
Markov model's parameters.
name: Python `str` name prefixed to Ops created by this class.
Default value: "HiddenMarkovModel".
Returns:
posterior_mode: A `Tensor` representing the most likely sequence of hidden
states. The rightmost dimension of this tensor will equal the
`num_steps` parameter providing one hidden state for each step. The
other dimensions are those of the batch.
Raises:
ValueError: if the `observations` tensor does not consist of
sequences of `num_steps` observations.
#### Examples
```python
tfd = tfp.distributions
# A simple weather model.
# Represent a cold day with 0 and a hot day with 1.
# Suppose the first day of a sequence has a 0.8 chance of being cold.
initial_distribution = tfd.Categorical(probs=[0.8, 0.2])
# Suppose a cold day has a 30% chance of being followed by a hot day
# and a hot day has a 20% chance of being followed by a cold day.
transition_distribution = tfd.Categorical(probs=[[0.7, 0.3],
[0.2, 0.8]])
# Suppose additionally that on each day the temperature is
# normally distributed with mean and standard deviation 0 and 5 on
# a cold day and mean and standard deviation 15 and 10 on a hot day.
observation_distribution = tfd.Normal(loc=[0., 15.], scale=[5., 10.])
# This gives the hidden Markov model:
model = tfd.HiddenMarkovModel(
initial_distribution=initial_distribution,
transition_distribution=transition_distribution,
observation_distribution=observation_distribution,
num_steps=7)
# Suppose we observe gradually rising temperatures over a week:
temps = [-2., 0., 2., 4., 6., 8., 10.]
# We can now compute the most probable sequence of hidden states:
model.posterior_mode(temps)
# The result is [0 0 0 0 0 1 1] telling us that the transition
# from "cold" to "hot" most likely happened between the
# 5th and 6th days.
```
"""
with tf.name_scope(name or "posterior_mode"):
with tf.control_dependencies(self._runtime_assertions):
observation_tensor_shape = tf.shape(input=observations)
with self._observation_shape_preconditions(observation_tensor_shape):
observation_batch_shape = observation_tensor_shape[
:-1 - self._underlying_event_rank]
observation_event_shape = observation_tensor_shape[
-1 - self._underlying_event_rank:]
batch_shape = tf.broadcast_dynamic_shape(observation_batch_shape,
self.batch_shape_tensor())
log_init = tf.broadcast_to(self._log_init,
tf.concat([batch_shape,
[self._num_states]],
axis=0))
observations = tf.broadcast_to(observations,
tf.concat([batch_shape,
observation_event_shape],
axis=0))
observation_rank = tf.rank(observations)
underlying_event_rank = self._underlying_event_rank
observations = distribution_util.move_dimension(
observations, observation_rank - underlying_event_rank - 1, 0)
# We need to compute the probability of each observation for
# each possible state.
# This requires inserting an extra index just before the
# observation event indices that will be broadcast with the
# last batch index in `observation_distribution`.
observations = tf.expand_dims(
observations,
observation_rank - underlying_event_rank)
observation_log_probs = self._observation_distribution.log_prob(
observations)
log_prob = log_init + observation_log_probs[0]
if self._num_steps == 1:
most_likely_end = tf.argmax(input=log_prob, axis=-1)
return most_likely_end[..., tf.newaxis]
def forward_step(previous_step_pair, log_prob_observation):
log_prob_previous = previous_step_pair[0]
log_prob = (log_prob_previous[..., tf.newaxis] +
self._log_trans +
log_prob_observation[..., tf.newaxis, :])
most_likely_given_successor = tf.argmax(input=log_prob, axis=-2)
max_log_p_given_successor = tf.reduce_max(input_tensor=log_prob,
axis=-2)
return (max_log_p_given_successor, most_likely_given_successor)
forward_log_probs, all_most_likely_given_successor = tf.scan(
forward_step,
observation_log_probs[1:],
initializer=(log_prob,
tf.zeros(tf.shape(input=log_init), dtype=tf.int64)),
name="forward_log_probs")
most_likely_end = tf.argmax(input=forward_log_probs[-1], axis=-1)
# We require the operation that gives C from A and B where
# C[i...j] = A[i...j, B[i...j]]
# and A = most_likely_given_successor
# B = most_likely_successor.
# tf.gather requires indices of known shape so instead we use
# reduction with tf.one_hot(B) to pick out elements from B
def backward_step(most_likely_successor, most_likely_given_successor):
return tf.reduce_sum(
input_tensor=(most_likely_given_successor *
tf.one_hot(most_likely_successor,
self._num_states,
dtype=tf.int64)),
axis=-1)
backward_scan = tf.scan(
backward_step,
all_most_likely_given_successor,
most_likely_end,
reverse=True)
most_likely_sequences = tf.concat([backward_scan, [most_likely_end]],
axis=0)
return distribution_util.move_dimension(most_likely_sequences, 0, -1) |
def get_ga_client_id(self):
"""
Retrieve the client ID from the Google Analytics cookie, if available,
and save in the current session
"""
request = self.get_ga_request()
if not request or not hasattr(request, 'session'):
return super(GARequestErrorReportingMixin, self).get_ga_client_id()
if 'ga_client_id' not in request.session:
client_id = self.ga_cookie_re.match(request.COOKIES.get('_ga', ''))
client_id = client_id and client_id.group('cid') or str(uuid.uuid4())
request.session['ga_client_id'] = client_id
return request.session['ga_client_id'] | Retrieve the client ID from the Google Analytics cookie, if available,
and save in the current session | Below is the the instruction that describes the task:
### Input:
Retrieve the client ID from the Google Analytics cookie, if available,
and save in the current session
### Response:
def get_ga_client_id(self):
"""
Retrieve the client ID from the Google Analytics cookie, if available,
and save in the current session
"""
request = self.get_ga_request()
if not request or not hasattr(request, 'session'):
return super(GARequestErrorReportingMixin, self).get_ga_client_id()
if 'ga_client_id' not in request.session:
client_id = self.ga_cookie_re.match(request.COOKIES.get('_ga', ''))
client_id = client_id and client_id.group('cid') or str(uuid.uuid4())
request.session['ga_client_id'] = client_id
return request.session['ga_client_id'] |
def send_messages(self, messages):
"""Send one or more EmailMessage objects.
Returns:
int: Number of email messages sent.
"""
if not messages:
return
new_conn_created = self.open()
if not self.connection:
# We failed silently on open(). Trying to send would be pointless.
return
num_sent = 0
for message in messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent | Send one or more EmailMessage objects.
Returns:
int: Number of email messages sent. | Below is the the instruction that describes the task:
### Input:
Send one or more EmailMessage objects.
Returns:
int: Number of email messages sent.
### Response:
def send_messages(self, messages):
"""Send one or more EmailMessage objects.
Returns:
int: Number of email messages sent.
"""
if not messages:
return
new_conn_created = self.open()
if not self.connection:
# We failed silently on open(). Trying to send would be pointless.
return
num_sent = 0
for message in messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent |
def name_inner_event(cls):
"""Decorator to rename cls.Event 'Event' as 'cls.Event'"""
if hasattr(cls, 'Event'):
cls.Event._event_name = '{}.Event'.format(cls.__name__)
else:
warnings.warn('Class {} does not have a inner Event'.format(cls))
return cls | Decorator to rename cls.Event 'Event' as 'cls.Event | Below is the the instruction that describes the task:
### Input:
Decorator to rename cls.Event 'Event' as 'cls.Event
### Response:
def name_inner_event(cls):
"""Decorator to rename cls.Event 'Event' as 'cls.Event'"""
if hasattr(cls, 'Event'):
cls.Event._event_name = '{}.Event'.format(cls.__name__)
else:
warnings.warn('Class {} does not have a inner Event'.format(cls))
return cls |
def set(self, instance, value, **kwargs):
"""
Check if value is an actual date/time value. If not, attempt
to convert it to one; otherwise, set to None. Assign all
properties passed as kwargs to object.
"""
val = get_date(instance, value)
super(DateTimeField, self).set(instance, val, **kwargs) | Check if value is an actual date/time value. If not, attempt
to convert it to one; otherwise, set to None. Assign all
properties passed as kwargs to object. | Below is the the instruction that describes the task:
### Input:
Check if value is an actual date/time value. If not, attempt
to convert it to one; otherwise, set to None. Assign all
properties passed as kwargs to object.
### Response:
def set(self, instance, value, **kwargs):
"""
Check if value is an actual date/time value. If not, attempt
to convert it to one; otherwise, set to None. Assign all
properties passed as kwargs to object.
"""
val = get_date(instance, value)
super(DateTimeField, self).set(instance, val, **kwargs) |
def call(self, file_, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None,
open_modes=None):
"""Return a file-like object with the contents of the target
file object.
Args:
file_: Path to target file or a file descriptor.
mode: Additional file modes (all modes in `open()` are supported).
buffering: ignored. (Used for signature compliance with
__builtin__.open)
encoding: The encoding used to encode unicode strings / decode
bytes.
errors: (str) Defines how encoding errors are handled.
newline: Controls universal newlines, passed to stream object.
closefd: If a file descriptor rather than file name is passed,
and this is set to `False`, then the file descriptor is kept
open when file is closed.
opener: not supported.
open_modes: Modes for opening files if called from low-level API.
Returns:
A file-like object containing the contents of the target file.
Raises:
IOError, OSError depending on Python version / call mode:
- if the target object is a directory
- on an invalid path
- if the file does not exist when it should
- if the file exists but should not
- if permission is denied
ValueError: for an invalid mode or mode combination
"""
binary = 'b' in mode
newline, open_modes = self._handle_file_mode(mode, newline, open_modes)
file_object, file_path, filedes, real_path = self._handle_file_arg(
file_)
if not filedes:
closefd = True
error_fct = (self.filesystem.raise_os_error if self.raw_io
else self.filesystem.raise_io_error)
if (open_modes.must_not_exist and
(file_object or self.filesystem.islink(file_path) and
not self.filesystem.is_windows_fs)):
error_fct(errno.EEXIST, file_path)
if file_object:
if (not is_root() and
((open_modes.can_read and
not file_object.st_mode & PERM_READ)
or (open_modes.can_write and
not file_object.st_mode & PERM_WRITE))):
error_fct(errno.EACCES, file_path)
if open_modes.can_write:
if open_modes.truncate:
file_object.set_contents('')
else:
if open_modes.must_exist:
error_fct(errno.ENOENT, file_path)
if self.filesystem.islink(file_path):
link_object = self.filesystem.resolve(file_path,
follow_symlinks=False)
target_path = link_object.contents
else:
target_path = file_path
if self.filesystem.ends_with_path_separator(target_path):
error = (errno.EINVAL if self.filesystem.is_windows_fs
else errno.ENOENT if self.filesystem.is_macos
else errno.EISDIR)
error_fct(error, file_path)
file_object = self.filesystem.create_file_internally(
real_path, create_missing_dirs=False,
apply_umask=True, raw_io=self.raw_io)
if S_ISDIR(file_object.st_mode):
if self.filesystem.is_windows_fs:
error_fct(errno.EACCES, file_path)
else:
error_fct(errno.EISDIR, file_path)
# If you print obj.name, the argument to open() must be printed.
# Not the abspath, not the filename, but the actual argument.
file_object.opened_as = file_path
if open_modes.truncate:
current_time = time.time()
file_object.st_mtime = current_time
if not self.filesystem.is_windows_fs:
file_object.st_ctime = current_time
fakefile = FakeFileWrapper(file_object,
file_path,
update=open_modes.can_write,
read=open_modes.can_read,
append=open_modes.append,
delete_on_close=self._delete_on_close,
filesystem=self.filesystem,
newline=newline,
binary=binary,
closefd=closefd,
encoding=encoding,
errors=errors,
raw_io=self.raw_io,
use_io=self._use_io)
if filedes is not None:
fakefile.filedes = filedes
# replace the file wrapper
self.filesystem.open_files[filedes].append(fakefile)
else:
fakefile.filedes = self.filesystem._add_open_file(fakefile)
return fakefile | Return a file-like object with the contents of the target
file object.
Args:
file_: Path to target file or a file descriptor.
mode: Additional file modes (all modes in `open()` are supported).
buffering: ignored. (Used for signature compliance with
__builtin__.open)
encoding: The encoding used to encode unicode strings / decode
bytes.
errors: (str) Defines how encoding errors are handled.
newline: Controls universal newlines, passed to stream object.
closefd: If a file descriptor rather than file name is passed,
and this is set to `False`, then the file descriptor is kept
open when file is closed.
opener: not supported.
open_modes: Modes for opening files if called from low-level API.
Returns:
A file-like object containing the contents of the target file.
Raises:
IOError, OSError depending on Python version / call mode:
- if the target object is a directory
- on an invalid path
- if the file does not exist when it should
- if the file exists but should not
- if permission is denied
ValueError: for an invalid mode or mode combination | Below is the the instruction that describes the task:
### Input:
Return a file-like object with the contents of the target
file object.
Args:
file_: Path to target file or a file descriptor.
mode: Additional file modes (all modes in `open()` are supported).
buffering: ignored. (Used for signature compliance with
__builtin__.open)
encoding: The encoding used to encode unicode strings / decode
bytes.
errors: (str) Defines how encoding errors are handled.
newline: Controls universal newlines, passed to stream object.
closefd: If a file descriptor rather than file name is passed,
and this is set to `False`, then the file descriptor is kept
open when file is closed.
opener: not supported.
open_modes: Modes for opening files if called from low-level API.
Returns:
A file-like object containing the contents of the target file.
Raises:
IOError, OSError depending on Python version / call mode:
- if the target object is a directory
- on an invalid path
- if the file does not exist when it should
- if the file exists but should not
- if permission is denied
ValueError: for an invalid mode or mode combination
### Response:
def call(self, file_, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None,
open_modes=None):
"""Return a file-like object with the contents of the target
file object.
Args:
file_: Path to target file or a file descriptor.
mode: Additional file modes (all modes in `open()` are supported).
buffering: ignored. (Used for signature compliance with
__builtin__.open)
encoding: The encoding used to encode unicode strings / decode
bytes.
errors: (str) Defines how encoding errors are handled.
newline: Controls universal newlines, passed to stream object.
closefd: If a file descriptor rather than file name is passed,
and this is set to `False`, then the file descriptor is kept
open when file is closed.
opener: not supported.
open_modes: Modes for opening files if called from low-level API.
Returns:
A file-like object containing the contents of the target file.
Raises:
IOError, OSError depending on Python version / call mode:
- if the target object is a directory
- on an invalid path
- if the file does not exist when it should
- if the file exists but should not
- if permission is denied
ValueError: for an invalid mode or mode combination
"""
binary = 'b' in mode
newline, open_modes = self._handle_file_mode(mode, newline, open_modes)
file_object, file_path, filedes, real_path = self._handle_file_arg(
file_)
if not filedes:
closefd = True
error_fct = (self.filesystem.raise_os_error if self.raw_io
else self.filesystem.raise_io_error)
if (open_modes.must_not_exist and
(file_object or self.filesystem.islink(file_path) and
not self.filesystem.is_windows_fs)):
error_fct(errno.EEXIST, file_path)
if file_object:
if (not is_root() and
((open_modes.can_read and
not file_object.st_mode & PERM_READ)
or (open_modes.can_write and
not file_object.st_mode & PERM_WRITE))):
error_fct(errno.EACCES, file_path)
if open_modes.can_write:
if open_modes.truncate:
file_object.set_contents('')
else:
if open_modes.must_exist:
error_fct(errno.ENOENT, file_path)
if self.filesystem.islink(file_path):
link_object = self.filesystem.resolve(file_path,
follow_symlinks=False)
target_path = link_object.contents
else:
target_path = file_path
if self.filesystem.ends_with_path_separator(target_path):
error = (errno.EINVAL if self.filesystem.is_windows_fs
else errno.ENOENT if self.filesystem.is_macos
else errno.EISDIR)
error_fct(error, file_path)
file_object = self.filesystem.create_file_internally(
real_path, create_missing_dirs=False,
apply_umask=True, raw_io=self.raw_io)
if S_ISDIR(file_object.st_mode):
if self.filesystem.is_windows_fs:
error_fct(errno.EACCES, file_path)
else:
error_fct(errno.EISDIR, file_path)
# If you print obj.name, the argument to open() must be printed.
# Not the abspath, not the filename, but the actual argument.
file_object.opened_as = file_path
if open_modes.truncate:
current_time = time.time()
file_object.st_mtime = current_time
if not self.filesystem.is_windows_fs:
file_object.st_ctime = current_time
fakefile = FakeFileWrapper(file_object,
file_path,
update=open_modes.can_write,
read=open_modes.can_read,
append=open_modes.append,
delete_on_close=self._delete_on_close,
filesystem=self.filesystem,
newline=newline,
binary=binary,
closefd=closefd,
encoding=encoding,
errors=errors,
raw_io=self.raw_io,
use_io=self._use_io)
if filedes is not None:
fakefile.filedes = filedes
# replace the file wrapper
self.filesystem.open_files[filedes].append(fakefile)
else:
fakefile.filedes = self.filesystem._add_open_file(fakefile)
return fakefile |
def get_and_alter(self, function):
"""
Alters the currently stored value by applying a function on it on and gets the old value.
:param function: (Function), A stateful serializable object which represents the Function defined on
server side.
This object must have a serializable Function counter part registered on server side with the actual
``org.hazelcast.core.IFunction`` implementation.
:return: (long), the old value.
"""
check_not_none(function, "function can't be None")
return self._encode_invoke(atomic_long_get_and_alter_codec, function=self._to_data(function)) | Alters the currently stored value by applying a function on it on and gets the old value.
:param function: (Function), A stateful serializable object which represents the Function defined on
server side.
This object must have a serializable Function counter part registered on server side with the actual
``org.hazelcast.core.IFunction`` implementation.
:return: (long), the old value. | Below is the the instruction that describes the task:
### Input:
Alters the currently stored value by applying a function on it on and gets the old value.
:param function: (Function), A stateful serializable object which represents the Function defined on
server side.
This object must have a serializable Function counter part registered on server side with the actual
``org.hazelcast.core.IFunction`` implementation.
:return: (long), the old value.
### Response:
def get_and_alter(self, function):
"""
Alters the currently stored value by applying a function on it on and gets the old value.
:param function: (Function), A stateful serializable object which represents the Function defined on
server side.
This object must have a serializable Function counter part registered on server side with the actual
``org.hazelcast.core.IFunction`` implementation.
:return: (long), the old value.
"""
check_not_none(function, "function can't be None")
return self._encode_invoke(atomic_long_get_and_alter_codec, function=self._to_data(function)) |
def get_instance(self, payload):
"""
Build an instance of UserBindingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
"""
return UserBindingInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
user_sid=self._solution['user_sid'],
) | Build an instance of UserBindingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of UserBindingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of UserBindingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
"""
return UserBindingInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
user_sid=self._solution['user_sid'],
) |
def login(self, username, password=None, email=None, registry=None,
reauth=False, dockercfg_path=None):
"""
Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True) | Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | Below is the the instruction that describes the task:
### Input:
Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
### Response:
def login(self, username, password=None, email=None, registry=None,
reauth=False, dockercfg_path=None):
"""
Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True) |
def extract_exposure_metadata(dstore, what):
"""
Extract the loss categories and the tags of the exposure.
Use it as /extract/exposure_metadata
"""
dic = {}
dic1, dic2 = dstore['assetcol/tagcol'].__toh5__()
dic.update(dic1)
dic.update(dic2)
if 'asset_risk' in dstore:
dic['multi_risk'] = sorted(
set(dstore['asset_risk'].dtype.names) -
set(dstore['assetcol/array'].dtype.names))
names = [name for name in dstore['assetcol/array'].dtype.names
if name.startswith(('value-', 'number', 'occupants_'))
and not name.endswith('_None')]
return ArrayWrapper(numpy.array(names), dic) | Extract the loss categories and the tags of the exposure.
Use it as /extract/exposure_metadata | Below is the the instruction that describes the task:
### Input:
Extract the loss categories and the tags of the exposure.
Use it as /extract/exposure_metadata
### Response:
def extract_exposure_metadata(dstore, what):
"""
Extract the loss categories and the tags of the exposure.
Use it as /extract/exposure_metadata
"""
dic = {}
dic1, dic2 = dstore['assetcol/tagcol'].__toh5__()
dic.update(dic1)
dic.update(dic2)
if 'asset_risk' in dstore:
dic['multi_risk'] = sorted(
set(dstore['asset_risk'].dtype.names) -
set(dstore['assetcol/array'].dtype.names))
names = [name for name in dstore['assetcol/array'].dtype.names
if name.startswith(('value-', 'number', 'occupants_'))
and not name.endswith('_None')]
return ArrayWrapper(numpy.array(names), dic) |
def show_bare_metal_state_output_bare_metal_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_bare_metal_state = ET.Element("show_bare_metal_state")
config = show_bare_metal_state
output = ET.SubElement(show_bare_metal_state, "output")
bare_metal_state = ET.SubElement(output, "bare-metal-state")
bare_metal_state.text = kwargs.pop('bare_metal_state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_bare_metal_state_output_bare_metal_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_bare_metal_state = ET.Element("show_bare_metal_state")
config = show_bare_metal_state
output = ET.SubElement(show_bare_metal_state, "output")
bare_metal_state = ET.SubElement(output, "bare-metal-state")
bare_metal_state.text = kwargs.pop('bare_metal_state')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def subject_areas(self):
"""List of tuples of author subject areas in the form
(area, frequency, abbreviation, code), where frequency is the
number of publications in this subject area.
"""
areas = self.xml.findall('subject-areas/subject-area')
freqs = self.xml.findall('author-profile/classificationgroup/'
'classifications[@type="ASJC"]/classification')
c = {int(cls.text): int(cls.attrib['frequency']) for cls in freqs}
cats = [(a.text, c[int(a.get("code"))], a.get("abbrev"), a.get("code"))
for a in areas]
cats.sort(reverse=True, key=itemgetter(1))
return cats | List of tuples of author subject areas in the form
(area, frequency, abbreviation, code), where frequency is the
number of publications in this subject area. | Below is the the instruction that describes the task:
### Input:
List of tuples of author subject areas in the form
(area, frequency, abbreviation, code), where frequency is the
number of publications in this subject area.
### Response:
def subject_areas(self):
"""List of tuples of author subject areas in the form
(area, frequency, abbreviation, code), where frequency is the
number of publications in this subject area.
"""
areas = self.xml.findall('subject-areas/subject-area')
freqs = self.xml.findall('author-profile/classificationgroup/'
'classifications[@type="ASJC"]/classification')
c = {int(cls.text): int(cls.attrib['frequency']) for cls in freqs}
cats = [(a.text, c[int(a.get("code"))], a.get("abbrev"), a.get("code"))
for a in areas]
cats.sort(reverse=True, key=itemgetter(1))
return cats |
def hcenter_blit(target, source, dest = (0, 0), area=None, special_flags=0):
'''
The same as center_blit(), but only centers horizontally.
'''
loc = lambda d, s: (_vec(d.get_width() / 2, 0) -
_vec(s.get_width() / 2, 0))
_blitter(loc, target, source, dest, area, special_flags) | The same as center_blit(), but only centers horizontally. | Below is the the instruction that describes the task:
### Input:
The same as center_blit(), but only centers horizontally.
### Response:
def hcenter_blit(target, source, dest = (0, 0), area=None, special_flags=0):
'''
The same as center_blit(), but only centers horizontally.
'''
loc = lambda d, s: (_vec(d.get_width() / 2, 0) -
_vec(s.get_width() / 2, 0))
_blitter(loc, target, source, dest, area, special_flags) |
def gmv(a, b):
"""Geometric mean variance
"""
return np.exp(np.square(np.log(a) - np.log(b)).mean()) | Geometric mean variance | Below is the the instruction that describes the task:
### Input:
Geometric mean variance
### Response:
def gmv(a, b):
"""Geometric mean variance
"""
return np.exp(np.square(np.log(a) - np.log(b)).mean()) |
def setrange(self, name, offset, value):
"""
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger
than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
:param name: str the name of the redis key
:param offset: int
:param value: str
:return: Future()
"""
with self.pipe as pipe:
return pipe.setrange(self.redis_key(name), offset, value) | Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger
than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
:param name: str the name of the redis key
:param offset: int
:param value: str
:return: Future() | Below is the the instruction that describes the task:
### Input:
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger
than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
:param name: str the name of the redis key
:param offset: int
:param value: str
:return: Future()
### Response:
def setrange(self, name, offset, value):
"""
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger
than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
:param name: str the name of the redis key
:param offset: int
:param value: str
:return: Future()
"""
with self.pipe as pipe:
return pipe.setrange(self.redis_key(name), offset, value) |
def get_ceph_df(self, sentry_unit):
"""Return dict of ceph df json output, including ceph pool state.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:returns: Dict of ceph df output
"""
cmd = 'sudo ceph df --format=json'
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return json.loads(output) | Return dict of ceph df json output, including ceph pool state.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:returns: Dict of ceph df output | Below is the the instruction that describes the task:
### Input:
Return dict of ceph df json output, including ceph pool state.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:returns: Dict of ceph df output
### Response:
def get_ceph_df(self, sentry_unit):
"""Return dict of ceph df json output, including ceph pool state.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:returns: Dict of ceph df output
"""
cmd = 'sudo ceph df --format=json'
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return json.loads(output) |
def data(self):
"""return stored data
Returns:
unpickled data
"""
try:
bytestream = super(MimeData, self).data(self._mimeType).data()
return pickle.loads(bytestream)
except:
raise | return stored data
Returns:
unpickled data | Below is the the instruction that describes the task:
### Input:
return stored data
Returns:
unpickled data
### Response:
def data(self):
"""return stored data
Returns:
unpickled data
"""
try:
bytestream = super(MimeData, self).data(self._mimeType).data()
return pickle.loads(bytestream)
except:
raise |
def handle_exception (self):
"""
An exception occurred. Log it and set the cache flag.
"""
etype, evalue = sys.exc_info()[:2]
log.debug(LOG_CHECK, "Error in %s: %s %s", self.url, etype, evalue, exception=True)
# note: etype must be the exact class, not a subclass
if (etype in ExcNoCacheList) or \
(etype == socket.error and evalue.args[0]==errno.EBADF) or \
not evalue:
# EBADF occurs when operating on an already socket
self.caching = False
# format unicode message "<exception name>: <error message>"
errmsg = unicode(etype.__name__)
uvalue = strformat.unicode_safe(evalue)
if uvalue:
errmsg += u": %s" % uvalue
# limit length to 240
return strformat.limit(errmsg, length=240) | An exception occurred. Log it and set the cache flag. | Below is the the instruction that describes the task:
### Input:
An exception occurred. Log it and set the cache flag.
### Response:
def handle_exception (self):
"""
An exception occurred. Log it and set the cache flag.
"""
etype, evalue = sys.exc_info()[:2]
log.debug(LOG_CHECK, "Error in %s: %s %s", self.url, etype, evalue, exception=True)
# note: etype must be the exact class, not a subclass
if (etype in ExcNoCacheList) or \
(etype == socket.error and evalue.args[0]==errno.EBADF) or \
not evalue:
# EBADF occurs when operating on an already socket
self.caching = False
# format unicode message "<exception name>: <error message>"
errmsg = unicode(etype.__name__)
uvalue = strformat.unicode_safe(evalue)
if uvalue:
errmsg += u": %s" % uvalue
# limit length to 240
return strformat.limit(errmsg, length=240) |
def run(toolkit_name, options, verbose=True, show_progress=False):
"""
Internal function to execute toolkit on the turicreate server.
Parameters
----------
toolkit_name : string
The name of the toolkit.
options : dict
A map containing the required input for the toolkit function,
for example: {'graph': g, 'reset_prob': 0.15}.
verbose : bool
If true, enable progress log from server.
show_progress : bool
If true, display progress plot.
Returns
-------
out : dict
The toolkit specific model parameters.
Raises
------
RuntimeError
Raises RuntimeError if the server fail executing the toolkit.
"""
unity = glconnect.get_unity()
if (not verbose):
glconnect.get_server().set_log_progress(False)
(success, message, params) = unity.run_toolkit(toolkit_name, options)
if (len(message) > 0):
logging.getLogger(__name__).error("Toolkit error: " + message)
# set the verbose level back to default
glconnect.get_server().set_log_progress(True)
if success:
return params
else:
raise ToolkitError(str(message)) | Internal function to execute toolkit on the turicreate server.
Parameters
----------
toolkit_name : string
The name of the toolkit.
options : dict
A map containing the required input for the toolkit function,
for example: {'graph': g, 'reset_prob': 0.15}.
verbose : bool
If true, enable progress log from server.
show_progress : bool
If true, display progress plot.
Returns
-------
out : dict
The toolkit specific model parameters.
Raises
------
RuntimeError
Raises RuntimeError if the server fail executing the toolkit. | Below is the the instruction that describes the task:
### Input:
Internal function to execute toolkit on the turicreate server.
Parameters
----------
toolkit_name : string
The name of the toolkit.
options : dict
A map containing the required input for the toolkit function,
for example: {'graph': g, 'reset_prob': 0.15}.
verbose : bool
If true, enable progress log from server.
show_progress : bool
If true, display progress plot.
Returns
-------
out : dict
The toolkit specific model parameters.
Raises
------
RuntimeError
Raises RuntimeError if the server fail executing the toolkit.
### Response:
def run(toolkit_name, options, verbose=True, show_progress=False):
"""
Internal function to execute toolkit on the turicreate server.
Parameters
----------
toolkit_name : string
The name of the toolkit.
options : dict
A map containing the required input for the toolkit function,
for example: {'graph': g, 'reset_prob': 0.15}.
verbose : bool
If true, enable progress log from server.
show_progress : bool
If true, display progress plot.
Returns
-------
out : dict
The toolkit specific model parameters.
Raises
------
RuntimeError
Raises RuntimeError if the server fail executing the toolkit.
"""
unity = glconnect.get_unity()
if (not verbose):
glconnect.get_server().set_log_progress(False)
(success, message, params) = unity.run_toolkit(toolkit_name, options)
if (len(message) > 0):
logging.getLogger(__name__).error("Toolkit error: " + message)
# set the verbose level back to default
glconnect.get_server().set_log_progress(True)
if success:
return params
else:
raise ToolkitError(str(message)) |
def simple_paths_by_name(self, start_name, end_name):
"""Return a list of paths between start and end functions.
"""
cfg_start = self.find_function_by_name(start_name)
cfg_end = self.find_function_by_name(end_name)
if not cfg_start or not cfg_end:
raise Exception("Start/End function not found.")
start_address = cfg_start.start_address
end_address = cfg_end.start_address
paths = networkx.all_simple_paths(self._graph, source=start_address, target=end_address)
return ([self._cfg_by_addr[addr] for addr in path] for path in paths) | Return a list of paths between start and end functions. | Below is the the instruction that describes the task:
### Input:
Return a list of paths between start and end functions.
### Response:
def simple_paths_by_name(self, start_name, end_name):
"""Return a list of paths between start and end functions.
"""
cfg_start = self.find_function_by_name(start_name)
cfg_end = self.find_function_by_name(end_name)
if not cfg_start or not cfg_end:
raise Exception("Start/End function not found.")
start_address = cfg_start.start_address
end_address = cfg_end.start_address
paths = networkx.all_simple_paths(self._graph, source=start_address, target=end_address)
return ([self._cfg_by_addr[addr] for addr in path] for path in paths) |
def validate_config(self, values, argv=None, strict=False):
"""Validate all config values through the command-line parser.
This takes all supplied options (which could have been retrieved from a
number of sources (such as CLI, env vars, etc...) and then validates
them by running them through argparser (and raises SystemExit on
failure).
:returns dict: key/values for all config values (from all sources)
:raises: SystemExit
"""
options = []
for option in self._options:
kwargs = option.kwargs.copy()
if option.name in values:
if 'default' in kwargs:
# Since we're overriding defaults, we need to
# preserve the default value for the help text:
help_text = kwargs.get('help')
if help_text:
if '(default: ' not in help_text:
kwargs['help'] = '%s (default: %s)' % (
help_text, kwargs['default']
)
kwargs['default'] = values[option.name]
kwargs['required'] = False # since we have a value
temp = Option(*option.args, **kwargs)
options.append(temp)
parser = self.build_parser(options,
formatter_class=argparse.HelpFormatter)
if argv:
parsed, extras = parser.parse_known_args(argv[1:])
if extras:
valid, _ = self.parse_passthru_args(argv[1:])
parsed, extras = parser.parse_known_args(valid)
if extras and strict: # still
self.build_parser(options)
parser.parse_args(argv[1:])
else:
parsed = parser.parse_args([])
results = vars(parsed)
raise_for_group = {}
for option in self._options:
if option.kwargs.get('required'):
if option.dest not in results or results[option.dest] is None:
if getattr(option, '_mutexgroup', None):
raise_for_group.setdefault(option._mutexgroup, [])
raise_for_group[option._mutexgroup].append(
option._action)
else:
raise SystemExit("'%s' is required. See --help "
"for more info." % option.name)
else:
if getattr(option, '_mutexgroup', None):
raise_for_group.pop(option._mutexgroup, None)
if raise_for_group:
optstrings = [str(k.option_strings)
for k in raise_for_group.values()[0]]
msg = "One of %s required. " % " ,".join(optstrings)
raise SystemExit(msg + "See --help for more info.")
return results | Validate all config values through the command-line parser.
This takes all supplied options (which could have been retrieved from a
number of sources (such as CLI, env vars, etc...) and then validates
them by running them through argparser (and raises SystemExit on
failure).
:returns dict: key/values for all config values (from all sources)
:raises: SystemExit | Below is the the instruction that describes the task:
### Input:
Validate all config values through the command-line parser.
This takes all supplied options (which could have been retrieved from a
number of sources (such as CLI, env vars, etc...) and then validates
them by running them through argparser (and raises SystemExit on
failure).
:returns dict: key/values for all config values (from all sources)
:raises: SystemExit
### Response:
def validate_config(self, values, argv=None, strict=False):
"""Validate all config values through the command-line parser.
This takes all supplied options (which could have been retrieved from a
number of sources (such as CLI, env vars, etc...) and then validates
them by running them through argparser (and raises SystemExit on
failure).
:returns dict: key/values for all config values (from all sources)
:raises: SystemExit
"""
options = []
for option in self._options:
kwargs = option.kwargs.copy()
if option.name in values:
if 'default' in kwargs:
# Since we're overriding defaults, we need to
# preserve the default value for the help text:
help_text = kwargs.get('help')
if help_text:
if '(default: ' not in help_text:
kwargs['help'] = '%s (default: %s)' % (
help_text, kwargs['default']
)
kwargs['default'] = values[option.name]
kwargs['required'] = False # since we have a value
temp = Option(*option.args, **kwargs)
options.append(temp)
parser = self.build_parser(options,
formatter_class=argparse.HelpFormatter)
if argv:
parsed, extras = parser.parse_known_args(argv[1:])
if extras:
valid, _ = self.parse_passthru_args(argv[1:])
parsed, extras = parser.parse_known_args(valid)
if extras and strict: # still
self.build_parser(options)
parser.parse_args(argv[1:])
else:
parsed = parser.parse_args([])
results = vars(parsed)
raise_for_group = {}
for option in self._options:
if option.kwargs.get('required'):
if option.dest not in results or results[option.dest] is None:
if getattr(option, '_mutexgroup', None):
raise_for_group.setdefault(option._mutexgroup, [])
raise_for_group[option._mutexgroup].append(
option._action)
else:
raise SystemExit("'%s' is required. See --help "
"for more info." % option.name)
else:
if getattr(option, '_mutexgroup', None):
raise_for_group.pop(option._mutexgroup, None)
if raise_for_group:
optstrings = [str(k.option_strings)
for k in raise_for_group.values()[0]]
msg = "One of %s required. " % " ,".join(optstrings)
raise SystemExit(msg + "See --help for more info.")
return results |
def write_text(_command, txt_file):
"""Dump SQL command to a text file."""
command = _command.strip()
with open(txt_file, 'w') as txt:
txt.writelines(command) | Dump SQL command to a text file. | Below is the the instruction that describes the task:
### Input:
Dump SQL command to a text file.
### Response:
def write_text(_command, txt_file):
"""Dump SQL command to a text file."""
command = _command.strip()
with open(txt_file, 'w') as txt:
txt.writelines(command) |
def allowed(self):
'''
Check to see if the pop request is allowed
@return: True means the maximum was not been reached for the current
time window, thus allowing what ever operation follows
'''
# Expire old keys (hits)
expires = time.time() - self.window
self.redis_conn.zremrangebyscore(self.window_key, '-inf', expires)
# check if we are hitting too fast for moderation
if self.moderation:
with self.redis_conn.pipeline() as pipe:
try:
pipe.watch(self.moderate_key) # ---- LOCK
# from this point onward if no errors are raised we
# successfully incremented the counter
curr_time = time.time()
if self.is_moderated(curr_time, pipe) and not \
self.check_elastic():
return False
# passed the moderation limit, now check time window
# If we have less keys than max, update out moderate key
if self.test_hits():
# this is a valid transaction, set the new time
pipe.multi()
pipe.set(name=self.moderate_key,
value=str(curr_time),
ex=int(self.window * 2))
pipe.execute()
return True
except WatchError:
# watch was changed, another thread just incremented
# the value
return False
# If we currently have more keys than max,
# then limit the action
else:
return self.test_hits()
return False | Check to see if the pop request is allowed
@return: True means the maximum was not been reached for the current
time window, thus allowing what ever operation follows | Below is the the instruction that describes the task:
### Input:
Check to see if the pop request is allowed
@return: True means the maximum was not been reached for the current
time window, thus allowing what ever operation follows
### Response:
def allowed(self):
'''
Check to see if the pop request is allowed
@return: True means the maximum was not been reached for the current
time window, thus allowing what ever operation follows
'''
# Expire old keys (hits)
expires = time.time() - self.window
self.redis_conn.zremrangebyscore(self.window_key, '-inf', expires)
# check if we are hitting too fast for moderation
if self.moderation:
with self.redis_conn.pipeline() as pipe:
try:
pipe.watch(self.moderate_key) # ---- LOCK
# from this point onward if no errors are raised we
# successfully incremented the counter
curr_time = time.time()
if self.is_moderated(curr_time, pipe) and not \
self.check_elastic():
return False
# passed the moderation limit, now check time window
# If we have less keys than max, update out moderate key
if self.test_hits():
# this is a valid transaction, set the new time
pipe.multi()
pipe.set(name=self.moderate_key,
value=str(curr_time),
ex=int(self.window * 2))
pipe.execute()
return True
except WatchError:
# watch was changed, another thread just incremented
# the value
return False
# If we currently have more keys than max,
# then limit the action
else:
return self.test_hits()
return False |
def build_event_handler(self, runnable, regime, event_handler):
"""
Build event handler code.
@param event_handler: Event handler object
@type event_handler: lems.model.dynamics.EventHandler
@return: Generated event handler code.
@rtype: list(string)
"""
if isinstance(event_handler, OnCondition):
return self.build_on_condition(runnable, regime, event_handler)
elif isinstance(event_handler, OnEvent):
return self.build_on_event(runnable, regime, event_handler)
elif isinstance(event_handler, OnStart):
return self.build_on_start(runnable, regime, event_handler)
elif isinstance(event_handler, OnEntry):
return self.build_on_entry(runnable, regime, event_handler)
else:
return [] | Build event handler code.
@param event_handler: Event handler object
@type event_handler: lems.model.dynamics.EventHandler
@return: Generated event handler code.
@rtype: list(string) | Below is the the instruction that describes the task:
### Input:
Build event handler code.
@param event_handler: Event handler object
@type event_handler: lems.model.dynamics.EventHandler
@return: Generated event handler code.
@rtype: list(string)
### Response:
def build_event_handler(self, runnable, regime, event_handler):
"""
Build event handler code.
@param event_handler: Event handler object
@type event_handler: lems.model.dynamics.EventHandler
@return: Generated event handler code.
@rtype: list(string)
"""
if isinstance(event_handler, OnCondition):
return self.build_on_condition(runnable, regime, event_handler)
elif isinstance(event_handler, OnEvent):
return self.build_on_event(runnable, regime, event_handler)
elif isinstance(event_handler, OnStart):
return self.build_on_start(runnable, regime, event_handler)
elif isinstance(event_handler, OnEntry):
return self.build_on_entry(runnable, regime, event_handler)
else:
return [] |
def _distance(self, x0, y0, x1, y1):
"""Utitlity function to compute distance between points."""
dx = x1-x0
dy = y1-y0
# roll displacements across the borders
if self.pix:
dx[ dx > self.Lx/2 ] -= self.Lx
dx[ dx < -self.Lx/2 ] += self.Lx
if self.piy:
dy[ dy > self.Ly/2 ] -= self.Ly
dy[ dy < -self.Ly/2 ] += self.Ly
return dx, dy | Utitlity function to compute distance between points. | Below is the the instruction that describes the task:
### Input:
Utitlity function to compute distance between points.
### Response:
def _distance(self, x0, y0, x1, y1):
"""Utitlity function to compute distance between points."""
dx = x1-x0
dy = y1-y0
# roll displacements across the borders
if self.pix:
dx[ dx > self.Lx/2 ] -= self.Lx
dx[ dx < -self.Lx/2 ] += self.Lx
if self.piy:
dy[ dy > self.Ly/2 ] -= self.Ly
dy[ dy < -self.Ly/2 ] += self.Ly
return dx, dy |
def predict_density(self, Xnew, Ynew):
"""
Compute the (log) density of the data Ynew at the points Xnew
Note that this computes the log density of the data individually,
ignoring correlations between them. The result is a matrix the same
shape as Ynew containing the log densities.
"""
pred_f_mean, pred_f_var = self._build_predict(Xnew)
return self.likelihood.predict_density(pred_f_mean, pred_f_var, Ynew) | Compute the (log) density of the data Ynew at the points Xnew
Note that this computes the log density of the data individually,
ignoring correlations between them. The result is a matrix the same
shape as Ynew containing the log densities. | Below is the the instruction that describes the task:
### Input:
Compute the (log) density of the data Ynew at the points Xnew
Note that this computes the log density of the data individually,
ignoring correlations between them. The result is a matrix the same
shape as Ynew containing the log densities.
### Response:
def predict_density(self, Xnew, Ynew):
"""
Compute the (log) density of the data Ynew at the points Xnew
Note that this computes the log density of the data individually,
ignoring correlations between them. The result is a matrix the same
shape as Ynew containing the log densities.
"""
pred_f_mean, pred_f_var = self._build_predict(Xnew)
return self.likelihood.predict_density(pred_f_mean, pred_f_var, Ynew) |
def variant_case(store, case_obj, variant_obj):
"""Pre-process case for the variant view.
Adds information about files from case obj to variant
Args:
store(scout.adapter.MongoAdapter)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
"""
case_obj['bam_files'] = []
case_obj['mt_bams'] = []
case_obj['bai_files'] = []
case_obj['mt_bais'] = []
case_obj['sample_names'] = []
for individual in case_obj['individuals']:
bam_path = individual.get('bam_file')
mt_bam = individual.get('mt_bam')
case_obj['sample_names'].append(individual.get('display_name'))
if bam_path and os.path.exists(bam_path):
case_obj['bam_files'].append(individual['bam_file'])
case_obj['bai_files'].append(find_bai_file(individual['bam_file']))
if mt_bam and os.path.exists(mt_bam):
case_obj['mt_bams'].append(individual['mt_bam'])
case_obj['mt_bais'].append(find_bai_file(individual['mt_bam']))
else:
LOG.debug("%s: no bam file found", individual['individual_id'])
try:
genes = variant_obj.get('genes', [])
if len(genes) == 1:
hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id'])
if hgnc_gene_obj:
vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj)
case_obj['region_vcf_file'] = vcf_path
else:
case_obj['region_vcf_file'] = None
elif len(genes) > 1:
chrom = variant_obj['genes'][0]['common']['chromosome']
start = min(gene['common']['start'] for gene in variant_obj['genes'])
end = max(gene['common']['end'] for gene in variant_obj['genes'])
# Create a reduced VCF with variants in the region
vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end)
case_obj['region_vcf_file'] = vcf_path
except (SyntaxError, Exception):
LOG.warning("skip VCF region for alignment view") | Pre-process case for the variant view.
Adds information about files from case obj to variant
Args:
store(scout.adapter.MongoAdapter)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant) | Below is the the instruction that describes the task:
### Input:
Pre-process case for the variant view.
Adds information about files from case obj to variant
Args:
store(scout.adapter.MongoAdapter)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
### Response:
def variant_case(store, case_obj, variant_obj):
"""Pre-process case for the variant view.
Adds information about files from case obj to variant
Args:
store(scout.adapter.MongoAdapter)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
"""
case_obj['bam_files'] = []
case_obj['mt_bams'] = []
case_obj['bai_files'] = []
case_obj['mt_bais'] = []
case_obj['sample_names'] = []
for individual in case_obj['individuals']:
bam_path = individual.get('bam_file')
mt_bam = individual.get('mt_bam')
case_obj['sample_names'].append(individual.get('display_name'))
if bam_path and os.path.exists(bam_path):
case_obj['bam_files'].append(individual['bam_file'])
case_obj['bai_files'].append(find_bai_file(individual['bam_file']))
if mt_bam and os.path.exists(mt_bam):
case_obj['mt_bams'].append(individual['mt_bam'])
case_obj['mt_bais'].append(find_bai_file(individual['mt_bam']))
else:
LOG.debug("%s: no bam file found", individual['individual_id'])
try:
genes = variant_obj.get('genes', [])
if len(genes) == 1:
hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id'])
if hgnc_gene_obj:
vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj)
case_obj['region_vcf_file'] = vcf_path
else:
case_obj['region_vcf_file'] = None
elif len(genes) > 1:
chrom = variant_obj['genes'][0]['common']['chromosome']
start = min(gene['common']['start'] for gene in variant_obj['genes'])
end = max(gene['common']['end'] for gene in variant_obj['genes'])
# Create a reduced VCF with variants in the region
vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end)
case_obj['region_vcf_file'] = vcf_path
except (SyntaxError, Exception):
LOG.warning("skip VCF region for alignment view") |
def main(argv=None):
"""Run Tika from command line according to USAGE."""
global Verbose
global EncodeUtf8
global csvOutput
if argv is None:
argv = sys.argv
if (len(argv) < 3 and not (('-h' in argv) or ('--help' in argv))):
log.exception('Bad args')
raise TikaException('Bad args')
try:
opts, argv = getopt.getopt(argv[1:], 'hi:s:o:p:v:e:c',
['help', 'install=', 'server=', 'output=', 'port=', 'verbose', 'encode', 'csv'])
except getopt.GetoptError as opt_error:
msg, bad_opt = opt_error
log.exception("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
raise TikaException("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
tikaServerJar = TikaServerJar
serverHost = ServerHost
outDir = '.'
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--install'): tikaServerJar = val
elif opt in ('--server'): serverHost = val
elif opt in ('-o', '--output'): outDir = val
elif opt in ('--port'): port = val
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-e', '--encode'): EncodeUtf8 = 1
elif opt in ('-c', '--csv'): csvOutput = 1
else:
raise TikaException(USAGE)
cmd = argv[0]
option = argv[1]
try:
paths = argv[2:]
except:
paths = None
return runCommand(cmd, option, paths, port, outDir, serverHost=serverHost, tikaServerJar=tikaServerJar, verbose=Verbose, encode=EncodeUtf8) | Run Tika from command line according to USAGE. | Below is the the instruction that describes the task:
### Input:
Run Tika from command line according to USAGE.
### Response:
def main(argv=None):
"""Run Tika from command line according to USAGE."""
global Verbose
global EncodeUtf8
global csvOutput
if argv is None:
argv = sys.argv
if (len(argv) < 3 and not (('-h' in argv) or ('--help' in argv))):
log.exception('Bad args')
raise TikaException('Bad args')
try:
opts, argv = getopt.getopt(argv[1:], 'hi:s:o:p:v:e:c',
['help', 'install=', 'server=', 'output=', 'port=', 'verbose', 'encode', 'csv'])
except getopt.GetoptError as opt_error:
msg, bad_opt = opt_error
log.exception("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
raise TikaException("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
tikaServerJar = TikaServerJar
serverHost = ServerHost
outDir = '.'
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--install'): tikaServerJar = val
elif opt in ('--server'): serverHost = val
elif opt in ('-o', '--output'): outDir = val
elif opt in ('--port'): port = val
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-e', '--encode'): EncodeUtf8 = 1
elif opt in ('-c', '--csv'): csvOutput = 1
else:
raise TikaException(USAGE)
cmd = argv[0]
option = argv[1]
try:
paths = argv[2:]
except:
paths = None
return runCommand(cmd, option, paths, port, outDir, serverHost=serverHost, tikaServerJar=tikaServerJar, verbose=Verbose, encode=EncodeUtf8) |
def validate_profile_exists(self):
"""Validate the provided profiles name exists."""
if self.args.profile_name not in self.profiles:
self.handle_error('Could not find profile "{}"'.format(self.args.profile_name)) | Validate the provided profiles name exists. | Below is the the instruction that describes the task:
### Input:
Validate the provided profiles name exists.
### Response:
def validate_profile_exists(self):
"""Validate the provided profiles name exists."""
if self.args.profile_name not in self.profiles:
self.handle_error('Could not find profile "{}"'.format(self.args.profile_name)) |
def _async_call(self, uri, body=None, method="GET", error_class=None,
has_response=True, *args, **kwargs):
"""
Handles asynchronous call/responses for the DNS API.
Returns the response headers and body if the call was successful.
If an error status is returned, and the 'error_class' parameter is
specified, that class of error will be raised with the details from
the response. If no error class is specified, the response headers
and body will be returned to the calling method, which will have
to handle the result.
"""
api_methods = {
"GET": self._retry_get,
"POST": self.api.method_post,
"PUT": self.api.method_put,
"DELETE": self.api.method_delete,
}
api_method = api_methods[method]
try:
if body is None:
resp, resp_body = api_method(uri, *args, **kwargs)
else:
resp, resp_body = api_method(uri, body=body, *args, **kwargs)
except Exception as e:
if error_class:
raise error_class(e)
else:
raise
callbackURL = resp_body["callbackUrl"].split("/status/")[-1]
massagedURL = "/status/%s?showDetails=true" % callbackURL
start = time.time()
timed_out = False
while (resp_body["status"] == "RUNNING") and not timed_out:
resp_body = None
while resp_body is None and not timed_out:
resp, resp_body = self._retry_get(massagedURL)
if self._timeout:
timed_out = ((time.time() - start) > self._timeout)
time.sleep(self._delay)
if timed_out:
raise exc.DNSCallTimedOut("The API call to '%s' did not complete "
"after %s seconds." % (uri, self._timeout))
if error_class and (resp_body["status"] == "ERROR"):
# This call will handle raising the error.
self._process_async_error(resp_body, error_class)
if has_response:
ret = resp, resp_body["response"]
else:
ret = resp, resp_body
try:
resp_body = json.loads(resp_body)
except Exception:
pass
return ret | Handles asynchronous call/responses for the DNS API.
Returns the response headers and body if the call was successful.
If an error status is returned, and the 'error_class' parameter is
specified, that class of error will be raised with the details from
the response. If no error class is specified, the response headers
and body will be returned to the calling method, which will have
to handle the result. | Below is the the instruction that describes the task:
### Input:
Handles asynchronous call/responses for the DNS API.
Returns the response headers and body if the call was successful.
If an error status is returned, and the 'error_class' parameter is
specified, that class of error will be raised with the details from
the response. If no error class is specified, the response headers
and body will be returned to the calling method, which will have
to handle the result.
### Response:
def _async_call(self, uri, body=None, method="GET", error_class=None,
has_response=True, *args, **kwargs):
"""
Handles asynchronous call/responses for the DNS API.
Returns the response headers and body if the call was successful.
If an error status is returned, and the 'error_class' parameter is
specified, that class of error will be raised with the details from
the response. If no error class is specified, the response headers
and body will be returned to the calling method, which will have
to handle the result.
"""
api_methods = {
"GET": self._retry_get,
"POST": self.api.method_post,
"PUT": self.api.method_put,
"DELETE": self.api.method_delete,
}
api_method = api_methods[method]
try:
if body is None:
resp, resp_body = api_method(uri, *args, **kwargs)
else:
resp, resp_body = api_method(uri, body=body, *args, **kwargs)
except Exception as e:
if error_class:
raise error_class(e)
else:
raise
callbackURL = resp_body["callbackUrl"].split("/status/")[-1]
massagedURL = "/status/%s?showDetails=true" % callbackURL
start = time.time()
timed_out = False
while (resp_body["status"] == "RUNNING") and not timed_out:
resp_body = None
while resp_body is None and not timed_out:
resp, resp_body = self._retry_get(massagedURL)
if self._timeout:
timed_out = ((time.time() - start) > self._timeout)
time.sleep(self._delay)
if timed_out:
raise exc.DNSCallTimedOut("The API call to '%s' did not complete "
"after %s seconds." % (uri, self._timeout))
if error_class and (resp_body["status"] == "ERROR"):
# This call will handle raising the error.
self._process_async_error(resp_body, error_class)
if has_response:
ret = resp, resp_body["response"]
else:
ret = resp, resp_body
try:
resp_body = json.loads(resp_body)
except Exception:
pass
return ret |
def activate_left(self, token):
"""Make a copy of the received token and call `_activate_left`."""
watchers.MATCHER.debug(
"Node <%s> activated left with token %r", self, token)
return self._activate_left(token.copy()) | Make a copy of the received token and call `_activate_left`. | Below is the the instruction that describes the task:
### Input:
Make a copy of the received token and call `_activate_left`.
### Response:
def activate_left(self, token):
"""Make a copy of the received token and call `_activate_left`."""
watchers.MATCHER.debug(
"Node <%s> activated left with token %r", self, token)
return self._activate_left(token.copy()) |
def _kwargs_from_dict(cls, a_dict: dict) -> dict:
"""Modify __init__ arguments from an external dictionary.
Template method for from dict.
Override if necessary (like it's done in Histogram1D).
"""
from .binnings import BinningBase
kwargs = {
"binnings": [BinningBase.from_dict(binning_data) for binning_data in a_dict["binnings"]],
"dtype": np.dtype(a_dict["dtype"]),
"frequencies": a_dict.get("frequencies"),
"errors2": a_dict.get("errors2"),
}
if "missed" in a_dict:
kwargs["missed"] = a_dict["missed"]
kwargs.update(a_dict.get("meta_data", {}))
if len(kwargs["binnings"]) > 2:
kwargs["dimension"] = len(kwargs["binnings"])
return kwargs | Modify __init__ arguments from an external dictionary.
Template method for from dict.
Override if necessary (like it's done in Histogram1D). | Below is the the instruction that describes the task:
### Input:
Modify __init__ arguments from an external dictionary.
Template method for from dict.
Override if necessary (like it's done in Histogram1D).
### Response:
def _kwargs_from_dict(cls, a_dict: dict) -> dict:
"""Modify __init__ arguments from an external dictionary.
Template method for from dict.
Override if necessary (like it's done in Histogram1D).
"""
from .binnings import BinningBase
kwargs = {
"binnings": [BinningBase.from_dict(binning_data) for binning_data in a_dict["binnings"]],
"dtype": np.dtype(a_dict["dtype"]),
"frequencies": a_dict.get("frequencies"),
"errors2": a_dict.get("errors2"),
}
if "missed" in a_dict:
kwargs["missed"] = a_dict["missed"]
kwargs.update(a_dict.get("meta_data", {}))
if len(kwargs["binnings"]) > 2:
kwargs["dimension"] = len(kwargs["binnings"])
return kwargs |
def main(host='localhost', port=8086, nb_day=15):
"""Instantiate a connection to the backend."""
nb_day = 15 # number of day to generate time series
timeinterval_min = 5 # create an event every x minutes
total_minutes = 1440 * nb_day
total_records = int(total_minutes / timeinterval_min)
now = datetime.datetime.today()
metric = "server_data.cpu_idle"
series = []
for i in range(0, total_records):
past_date = now - datetime.timedelta(minutes=i * timeinterval_min)
value = random.randint(0, 200)
hostName = "server-%d" % random.randint(1, 5)
# pointValues = [int(past_date.strftime('%s')), value, hostName]
pointValues = {
"time": int(past_date.strftime('%s')),
"measurement": metric,
"fields": {
"value": value,
},
"tags": {
"hostName": hostName,
},
}
series.append(pointValues)
print(series)
client = InfluxDBClient(host, port, USER, PASSWORD, DBNAME)
print("Create database: " + DBNAME)
try:
client.create_database(DBNAME)
except InfluxDBClientError:
# Drop and create
client.drop_database(DBNAME)
client.create_database(DBNAME)
print("Create a retention policy")
retention_policy = 'server_data'
client.create_retention_policy(retention_policy, '3d', 3, default=True)
print("Write points #: {0}".format(total_records))
client.write_points(series, retention_policy=retention_policy)
time.sleep(2)
query = "SELECT MEAN(value) FROM {} WHERE \
time > now() - 10d GROUP BY time(500m)".format(metric)
result = client.query(query, database=DBNAME)
print(result)
print("Result: {0}".format(result))
print("Drop database: {}".format(DBNAME))
client.drop_database(DBNAME) | Instantiate a connection to the backend. | Below is the the instruction that describes the task:
### Input:
Instantiate a connection to the backend.
### Response:
def main(host='localhost', port=8086, nb_day=15):
"""Instantiate a connection to the backend."""
nb_day = 15 # number of day to generate time series
timeinterval_min = 5 # create an event every x minutes
total_minutes = 1440 * nb_day
total_records = int(total_minutes / timeinterval_min)
now = datetime.datetime.today()
metric = "server_data.cpu_idle"
series = []
for i in range(0, total_records):
past_date = now - datetime.timedelta(minutes=i * timeinterval_min)
value = random.randint(0, 200)
hostName = "server-%d" % random.randint(1, 5)
# pointValues = [int(past_date.strftime('%s')), value, hostName]
pointValues = {
"time": int(past_date.strftime('%s')),
"measurement": metric,
"fields": {
"value": value,
},
"tags": {
"hostName": hostName,
},
}
series.append(pointValues)
print(series)
client = InfluxDBClient(host, port, USER, PASSWORD, DBNAME)
print("Create database: " + DBNAME)
try:
client.create_database(DBNAME)
except InfluxDBClientError:
# Drop and create
client.drop_database(DBNAME)
client.create_database(DBNAME)
print("Create a retention policy")
retention_policy = 'server_data'
client.create_retention_policy(retention_policy, '3d', 3, default=True)
print("Write points #: {0}".format(total_records))
client.write_points(series, retention_policy=retention_policy)
time.sleep(2)
query = "SELECT MEAN(value) FROM {} WHERE \
time > now() - 10d GROUP BY time(500m)".format(metric)
result = client.query(query, database=DBNAME)
print(result)
print("Result: {0}".format(result))
print("Drop database: {}".format(DBNAME))
client.drop_database(DBNAME) |
def add_private_note(self, private_notes, source=None):
"""Add private notes.
:param private_notes: hidden notes for the current document
:type private_notes: string
:param source: source for the given private notes
:type source: string
"""
self._append_to('_private_notes', self._sourced_dict(
source,
value=private_notes,
)) | Add private notes.
:param private_notes: hidden notes for the current document
:type private_notes: string
:param source: source for the given private notes
:type source: string | Below is the the instruction that describes the task:
### Input:
Add private notes.
:param private_notes: hidden notes for the current document
:type private_notes: string
:param source: source for the given private notes
:type source: string
### Response:
def add_private_note(self, private_notes, source=None):
"""Add private notes.
:param private_notes: hidden notes for the current document
:type private_notes: string
:param source: source for the given private notes
:type source: string
"""
self._append_to('_private_notes', self._sourced_dict(
source,
value=private_notes,
)) |
def _set_lsp_secpath_autobw_template(self, v, load=False):
"""
Setter method for lsp_secpath_autobw_template, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_secpath_auto_bandwidth/lsp_secpath_autobw_template (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_secpath_autobw_template is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_secpath_autobw_template() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ReferenceType(referenced_path='../../../../autobw-template/autobw-template-name', caller=self._path() + ['lsp-secpath-autobw-template'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="lsp-secpath-autobw-template", rest_name="template", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Inherit Auto-bandwidth parameters from a template', u'alt-name': u'template'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_secpath_autobw_template must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=ReferenceType(referenced_path='../../../../autobw-template/autobw-template-name', caller=self._path() + ['lsp-secpath-autobw-template'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="lsp-secpath-autobw-template", rest_name="template", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Inherit Auto-bandwidth parameters from a template', u'alt-name': u'template'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='leafref', is_config=True)""",
})
self.__lsp_secpath_autobw_template = t
if hasattr(self, '_set'):
self._set() | Setter method for lsp_secpath_autobw_template, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_secpath_auto_bandwidth/lsp_secpath_autobw_template (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_secpath_autobw_template is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_secpath_autobw_template() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for lsp_secpath_autobw_template, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_secpath_auto_bandwidth/lsp_secpath_autobw_template (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_secpath_autobw_template is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_secpath_autobw_template() directly.
### Response:
def _set_lsp_secpath_autobw_template(self, v, load=False):
"""
Setter method for lsp_secpath_autobw_template, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_secpath_auto_bandwidth/lsp_secpath_autobw_template (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_secpath_autobw_template is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_secpath_autobw_template() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ReferenceType(referenced_path='../../../../autobw-template/autobw-template-name', caller=self._path() + ['lsp-secpath-autobw-template'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="lsp-secpath-autobw-template", rest_name="template", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Inherit Auto-bandwidth parameters from a template', u'alt-name': u'template'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_secpath_autobw_template must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=ReferenceType(referenced_path='../../../../autobw-template/autobw-template-name', caller=self._path() + ['lsp-secpath-autobw-template'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="lsp-secpath-autobw-template", rest_name="template", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Inherit Auto-bandwidth parameters from a template', u'alt-name': u'template'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='leafref', is_config=True)""",
})
self.__lsp_secpath_autobw_template = t
if hasattr(self, '_set'):
self._set() |
def import_smesh(file):
""" Generates NURBS surface(s) from surface mesh (smesh) file(s).
*smesh* files are some text files which contain a set of NURBS surfaces. Each file in the set corresponds to one
NURBS surface. Most of the time, you receive multiple *smesh* files corresponding to an complete object composed of
several NURBS surfaces. The files have the extensions of ``txt`` or ``dat`` and they are named as
* ``smesh.X.Y.txt``
* ``smesh.X.dat``
where *X* and *Y* correspond to some integer value which defines the set the surface belongs to and part number of
the surface inside the complete object.
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS surfaces
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
imported_elements = []
if os.path.isfile(file):
imported_elements.append(exch.import_surf_mesh(file))
elif os.path.isdir(file):
files = sorted([os.path.join(file, f) for f in os.listdir(file)])
for f in files:
imported_elements.append(exch.import_surf_mesh(f))
else:
raise exch.GeomdlException("Input is not a file or a directory")
return imported_elements | Generates NURBS surface(s) from surface mesh (smesh) file(s).
*smesh* files are some text files which contain a set of NURBS surfaces. Each file in the set corresponds to one
NURBS surface. Most of the time, you receive multiple *smesh* files corresponding to an complete object composed of
several NURBS surfaces. The files have the extensions of ``txt`` or ``dat`` and they are named as
* ``smesh.X.Y.txt``
* ``smesh.X.dat``
where *X* and *Y* correspond to some integer value which defines the set the surface belongs to and part number of
the surface inside the complete object.
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS surfaces
:rtype: list
:raises GeomdlException: an error occurred reading the file | Below is the the instruction that describes the task:
### Input:
Generates NURBS surface(s) from surface mesh (smesh) file(s).
*smesh* files are some text files which contain a set of NURBS surfaces. Each file in the set corresponds to one
NURBS surface. Most of the time, you receive multiple *smesh* files corresponding to an complete object composed of
several NURBS surfaces. The files have the extensions of ``txt`` or ``dat`` and they are named as
* ``smesh.X.Y.txt``
* ``smesh.X.dat``
where *X* and *Y* correspond to some integer value which defines the set the surface belongs to and part number of
the surface inside the complete object.
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS surfaces
:rtype: list
:raises GeomdlException: an error occurred reading the file
### Response:
def import_smesh(file):
""" Generates NURBS surface(s) from surface mesh (smesh) file(s).
*smesh* files are some text files which contain a set of NURBS surfaces. Each file in the set corresponds to one
NURBS surface. Most of the time, you receive multiple *smesh* files corresponding to an complete object composed of
several NURBS surfaces. The files have the extensions of ``txt`` or ``dat`` and they are named as
* ``smesh.X.Y.txt``
* ``smesh.X.dat``
where *X* and *Y* correspond to some integer value which defines the set the surface belongs to and part number of
the surface inside the complete object.
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS surfaces
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
imported_elements = []
if os.path.isfile(file):
imported_elements.append(exch.import_surf_mesh(file))
elif os.path.isdir(file):
files = sorted([os.path.join(file, f) for f in os.listdir(file)])
for f in files:
imported_elements.append(exch.import_surf_mesh(f))
else:
raise exch.GeomdlException("Input is not a file or a directory")
return imported_elements |
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message | Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode | Below is the the instruction that describes the task:
### Input:
Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
### Response:
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message |
def _get_qsize(tuning, width):
"""Return a reasonable quarter note size for 'tuning' and 'width'."""
names = [x.to_shorthand() for x in tuning.tuning]
basesize = len(max(names)) + 3
barsize = ((width - basesize) - 2) - 1
# x * 4 + 0.5x - barsize = 0 4.5x = barsize x = barsize / 4.5
return max(0, int(barsize / 4.5)) | Return a reasonable quarter note size for 'tuning' and 'width'. | Below is the the instruction that describes the task:
### Input:
Return a reasonable quarter note size for 'tuning' and 'width'.
### Response:
def _get_qsize(tuning, width):
"""Return a reasonable quarter note size for 'tuning' and 'width'."""
names = [x.to_shorthand() for x in tuning.tuning]
basesize = len(max(names)) + 3
barsize = ((width - basesize) - 2) - 1
# x * 4 + 0.5x - barsize = 0 4.5x = barsize x = barsize / 4.5
return max(0, int(barsize / 4.5)) |
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
canvas = self.canvas
dc =wx.ClientDC(canvas)
# Set logical function to XOR for rubberbanding
dc.SetLogicalFunction(wx.XOR)
# Set dc brush and pen
# Here I set brush and pen to white and grey respectively
# You can set it to your own choices
# The brush setting is not really needed since we
# dont do any filling of the dc. It is set just for
# the sake of completion.
wbrush =wx.Brush(wx.Colour(255,255,255), wx.TRANSPARENT)
wpen =wx.Pen(wx.Colour(200, 200, 200), 1, wx.SOLID)
dc.SetBrush(wbrush)
dc.SetPen(wpen)
dc.ResetBoundingBox()
dc.BeginDrawing()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1<y0: y0, y1 = y1, y0
if x1<y0: x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = int(x0), int(y0), int(w), int(h)
try: lastrect = self.lastrect
except AttributeError: pass
else: dc.DrawRectangle(*lastrect) #erase last
self.lastrect = rect
dc.DrawRectangle(*rect)
dc.EndDrawing() | adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744 | Below is the the instruction that describes the task:
### Input:
adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744
### Response:
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
canvas = self.canvas
dc =wx.ClientDC(canvas)
# Set logical function to XOR for rubberbanding
dc.SetLogicalFunction(wx.XOR)
# Set dc brush and pen
# Here I set brush and pen to white and grey respectively
# You can set it to your own choices
# The brush setting is not really needed since we
# dont do any filling of the dc. It is set just for
# the sake of completion.
wbrush =wx.Brush(wx.Colour(255,255,255), wx.TRANSPARENT)
wpen =wx.Pen(wx.Colour(200, 200, 200), 1, wx.SOLID)
dc.SetBrush(wbrush)
dc.SetPen(wpen)
dc.ResetBoundingBox()
dc.BeginDrawing()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1<y0: y0, y1 = y1, y0
if x1<y0: x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = int(x0), int(y0), int(w), int(h)
try: lastrect = self.lastrect
except AttributeError: pass
else: dc.DrawRectangle(*lastrect) #erase last
self.lastrect = rect
dc.DrawRectangle(*rect)
dc.EndDrawing() |
def get_family_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the family lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.FamilyLookupSession) - a
``FamilyLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_family_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_family_lookup()`` is ``true``.*
"""
if not self.supports_family_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.FamilyLookupSession(proxy=proxy, runtime=self._runtime) | Gets the ``OsidSession`` associated with the family lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.FamilyLookupSession) - a
``FamilyLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_family_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_family_lookup()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the family lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.FamilyLookupSession) - a
``FamilyLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_family_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_family_lookup()`` is ``true``.*
### Response:
def get_family_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the family lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.FamilyLookupSession) - a
``FamilyLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_family_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_family_lookup()`` is ``true``.*
"""
if not self.supports_family_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.FamilyLookupSession(proxy=proxy, runtime=self._runtime) |
def _send(self, message, fail_silently=False):
"""Save message to a file for debugging
"""
seeds = '1234567890qwertyuiopasdfghjklzxcvbnm'
file_part1 = datetime.now().strftime('%Y%m%d%H%M%S')
file_part2 = ''.join(sample(seeds, 4))
filename = join(self.tld, '%s_%s.msg' % (file_part1, file_part2))
with open(filename, 'w') as fd:
fd.write(str(message.to_message())) | Save message to a file for debugging | Below is the the instruction that describes the task:
### Input:
Save message to a file for debugging
### Response:
def _send(self, message, fail_silently=False):
"""Save message to a file for debugging
"""
seeds = '1234567890qwertyuiopasdfghjklzxcvbnm'
file_part1 = datetime.now().strftime('%Y%m%d%H%M%S')
file_part2 = ''.join(sample(seeds, 4))
filename = join(self.tld, '%s_%s.msg' % (file_part1, file_part2))
with open(filename, 'w') as fd:
fd.write(str(message.to_message())) |
def evaluate(self, brain_info):
"""
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo object containing inputs.
:return: Outputs from network as defined by self.inference_dict.
"""
feed_dict = {self.model.batch_size: len(brain_info.vector_observations),
self.model.sequence_length: 1}
epsilon = None
if self.use_recurrent:
if not self.use_continuous_act:
feed_dict[self.model.prev_action] = brain_info.previous_vector_actions.reshape(
[-1, len(self.model.act_size)])
if brain_info.memories.shape[1] == 0:
brain_info.memories = self.make_empty_memory(len(brain_info.agents))
feed_dict[self.model.memory_in] = brain_info.memories
if self.use_continuous_act:
epsilon = np.random.normal(
size=(len(brain_info.vector_observations), self.model.act_size[0]))
feed_dict[self.model.epsilon] = epsilon
feed_dict = self._fill_eval_dict(feed_dict, brain_info)
run_out = self._execute_model(feed_dict, self.inference_dict)
if self.use_continuous_act:
run_out['random_normal_epsilon'] = epsilon
return run_out | Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo object containing inputs.
:return: Outputs from network as defined by self.inference_dict. | Below is the the instruction that describes the task:
### Input:
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo object containing inputs.
:return: Outputs from network as defined by self.inference_dict.
### Response:
def evaluate(self, brain_info):
"""
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo object containing inputs.
:return: Outputs from network as defined by self.inference_dict.
"""
feed_dict = {self.model.batch_size: len(brain_info.vector_observations),
self.model.sequence_length: 1}
epsilon = None
if self.use_recurrent:
if not self.use_continuous_act:
feed_dict[self.model.prev_action] = brain_info.previous_vector_actions.reshape(
[-1, len(self.model.act_size)])
if brain_info.memories.shape[1] == 0:
brain_info.memories = self.make_empty_memory(len(brain_info.agents))
feed_dict[self.model.memory_in] = brain_info.memories
if self.use_continuous_act:
epsilon = np.random.normal(
size=(len(brain_info.vector_observations), self.model.act_size[0]))
feed_dict[self.model.epsilon] = epsilon
feed_dict = self._fill_eval_dict(feed_dict, brain_info)
run_out = self._execute_model(feed_dict, self.inference_dict)
if self.use_continuous_act:
run_out['random_normal_epsilon'] = epsilon
return run_out |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.