code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def jaccardIndex(s1, s2, stranded=False):
"""
Compute the Jaccard index for two collections of genomic intervals
:param s1: the first set of genomic intervals
:param s2: the second set of genomic intervals
:param stranded: if True, treat regions on different strands as not
intersecting each other, even if they occupy the same
genomic region.
:return: Jaccard index
"""
def count(s):
""" sum the size of regions in s. """
tot = 0
for r in s:
tot += len(r)
return tot
if stranded:
raise GenomicIntervalError("Sorry, stranded mode for computing Jaccard " +
"index hasn't been implemented yet.")
s1 = collapseRegions(s1)
s2 = collapseRegions(s2)
intersection = regionsIntersection(s1, s2)
c_i = count(intersection)
return c_i / float(count(s1) + count(s2) - c_i) | Compute the Jaccard index for two collections of genomic intervals
:param s1: the first set of genomic intervals
:param s2: the second set of genomic intervals
:param stranded: if True, treat regions on different strands as not
intersecting each other, even if they occupy the same
genomic region.
:return: Jaccard index | Below is the the instruction that describes the task:
### Input:
Compute the Jaccard index for two collections of genomic intervals
:param s1: the first set of genomic intervals
:param s2: the second set of genomic intervals
:param stranded: if True, treat regions on different strands as not
intersecting each other, even if they occupy the same
genomic region.
:return: Jaccard index
### Response:
def jaccardIndex(s1, s2, stranded=False):
"""
Compute the Jaccard index for two collections of genomic intervals
:param s1: the first set of genomic intervals
:param s2: the second set of genomic intervals
:param stranded: if True, treat regions on different strands as not
intersecting each other, even if they occupy the same
genomic region.
:return: Jaccard index
"""
def count(s):
""" sum the size of regions in s. """
tot = 0
for r in s:
tot += len(r)
return tot
if stranded:
raise GenomicIntervalError("Sorry, stranded mode for computing Jaccard " +
"index hasn't been implemented yet.")
s1 = collapseRegions(s1)
s2 = collapseRegions(s2)
intersection = regionsIntersection(s1, s2)
c_i = count(intersection)
return c_i / float(count(s1) + count(s2) - c_i) |
def transform_to_mods_multimono(marc_xml, uuid, url):
"""
Convert `marc_xml` to multimonograph MODS data format.
Args:
marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
uuid (str): UUID string giving the package ID.
url (str): URL of the publication (public or not).
Returns:
list: Collection of transformed xml strings.
"""
marc_xml = _read_content_or_path(marc_xml)
transformed = xslt_transformation(
marc_xml,
_absolute_template_path("MARC21toMultiMonographTitle.xsl")
)
return _apply_postprocessing(
marc_xml=marc_xml,
xml=transformed,
func=mods_postprocessor.postprocess_multi_mono,
uuid=uuid,
url=url,
) | Convert `marc_xml` to multimonograph MODS data format.
Args:
marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
uuid (str): UUID string giving the package ID.
url (str): URL of the publication (public or not).
Returns:
list: Collection of transformed xml strings. | Below is the the instruction that describes the task:
### Input:
Convert `marc_xml` to multimonograph MODS data format.
Args:
marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
uuid (str): UUID string giving the package ID.
url (str): URL of the publication (public or not).
Returns:
list: Collection of transformed xml strings.
### Response:
def transform_to_mods_multimono(marc_xml, uuid, url):
"""
Convert `marc_xml` to multimonograph MODS data format.
Args:
marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
uuid (str): UUID string giving the package ID.
url (str): URL of the publication (public or not).
Returns:
list: Collection of transformed xml strings.
"""
marc_xml = _read_content_or_path(marc_xml)
transformed = xslt_transformation(
marc_xml,
_absolute_template_path("MARC21toMultiMonographTitle.xsl")
)
return _apply_postprocessing(
marc_xml=marc_xml,
xml=transformed,
func=mods_postprocessor.postprocess_multi_mono,
uuid=uuid,
url=url,
) |
def load_calibration(self, calibration_file=None):
"""Load calibration data for IMU(s) connected to this SK8.
This method attempts to load a set of calibration data from a .ini file
produced by the sk8_calibration_gui application (TODO link!).
By default, it will look for a file name "sk8calib.ini" in the current working
directory. This can be overridden using the `calibration_file` parameter.
Args:
calibration_file (str): Path to a user-specified calibration file (ini format).
Returns:
True if any calibration data was loaded, False if none was. Note that True will be
returned even if for example only 1 IMU had any calibration data available.
"""
logger.debug('Loading calibration for {}'.format(self.addr))
calibration_data = ConfigParser()
path = calibration_file or os.path.join(os.getcwd(), 'sk8calib.ini')
logger.debug('Attempting to load calibration from {}'.format(path))
calibration_data.read(path)
success = False
for i in range(MAX_IMUS):
s = '{}_IMU{}'.format(self.name, i)
if s in calibration_data.sections():
logger.debug('Calibration data for device {} was detected, extracting...'.format(s))
success = success or self.imus[i]._load_calibration(calibration_data[s])
return success | Load calibration data for IMU(s) connected to this SK8.
This method attempts to load a set of calibration data from a .ini file
produced by the sk8_calibration_gui application (TODO link!).
By default, it will look for a file name "sk8calib.ini" in the current working
directory. This can be overridden using the `calibration_file` parameter.
Args:
calibration_file (str): Path to a user-specified calibration file (ini format).
Returns:
True if any calibration data was loaded, False if none was. Note that True will be
returned even if for example only 1 IMU had any calibration data available. | Below is the the instruction that describes the task:
### Input:
Load calibration data for IMU(s) connected to this SK8.
This method attempts to load a set of calibration data from a .ini file
produced by the sk8_calibration_gui application (TODO link!).
By default, it will look for a file name "sk8calib.ini" in the current working
directory. This can be overridden using the `calibration_file` parameter.
Args:
calibration_file (str): Path to a user-specified calibration file (ini format).
Returns:
True if any calibration data was loaded, False if none was. Note that True will be
returned even if for example only 1 IMU had any calibration data available.
### Response:
def load_calibration(self, calibration_file=None):
"""Load calibration data for IMU(s) connected to this SK8.
This method attempts to load a set of calibration data from a .ini file
produced by the sk8_calibration_gui application (TODO link!).
By default, it will look for a file name "sk8calib.ini" in the current working
directory. This can be overridden using the `calibration_file` parameter.
Args:
calibration_file (str): Path to a user-specified calibration file (ini format).
Returns:
True if any calibration data was loaded, False if none was. Note that True will be
returned even if for example only 1 IMU had any calibration data available.
"""
logger.debug('Loading calibration for {}'.format(self.addr))
calibration_data = ConfigParser()
path = calibration_file or os.path.join(os.getcwd(), 'sk8calib.ini')
logger.debug('Attempting to load calibration from {}'.format(path))
calibration_data.read(path)
success = False
for i in range(MAX_IMUS):
s = '{}_IMU{}'.format(self.name, i)
if s in calibration_data.sections():
logger.debug('Calibration data for device {} was detected, extracting...'.format(s))
success = success or self.imus[i]._load_calibration(calibration_data[s])
return success |
def add(self, rule):
"""Add a new classifier rule to the classifier set. Return a list
containing zero or more rules that were deleted from the classifier
by the algorithm in order to make room for the new rule. The rule
argument should be a ClassifierRule instance. The behavior of this
method depends on whether the rule already exists in the
classifier set. When a rule is already present, the rule's
numerosity is added to that of the version of the rule already
present in the population. Otherwise, the new rule is captured.
Note that this means that for rules already present in the
classifier set, the metadata of the existing rule is not
overwritten by that of the one passed in as an argument.
Usage:
displaced_rules = model.add(rule)
Arguments:
rule: A ClassifierRule instance which is to be added to this
classifier set.
Return:
A possibly empty list of ClassifierRule instances which were
removed altogether from the classifier set (as opposed to
simply having their numerosities decremented) in order to make
room for the newly added rule.
"""
assert isinstance(rule, ClassifierRule)
condition = rule.condition
action = rule.action
# If the rule already exists in the population, then we virtually
# add the rule by incrementing the existing rule's numerosity. This
# prevents redundancy in the rule set. Otherwise we capture the
# new rule.
if condition not in self._population:
self._population[condition] = {}
if action in self._population[condition]:
existing_rule = self._population[condition][action]
existing_rule.numerosity += rule.numerosity
else:
self._population[condition][action] = rule
# Any time we add a rule, we need to call this to keep the
# population size under control.
return self._algorithm.prune(self) | Add a new classifier rule to the classifier set. Return a list
containing zero or more rules that were deleted from the classifier
by the algorithm in order to make room for the new rule. The rule
argument should be a ClassifierRule instance. The behavior of this
method depends on whether the rule already exists in the
classifier set. When a rule is already present, the rule's
numerosity is added to that of the version of the rule already
present in the population. Otherwise, the new rule is captured.
Note that this means that for rules already present in the
classifier set, the metadata of the existing rule is not
overwritten by that of the one passed in as an argument.
Usage:
displaced_rules = model.add(rule)
Arguments:
rule: A ClassifierRule instance which is to be added to this
classifier set.
Return:
A possibly empty list of ClassifierRule instances which were
removed altogether from the classifier set (as opposed to
simply having their numerosities decremented) in order to make
room for the newly added rule. | Below is the the instruction that describes the task:
### Input:
Add a new classifier rule to the classifier set. Return a list
containing zero or more rules that were deleted from the classifier
by the algorithm in order to make room for the new rule. The rule
argument should be a ClassifierRule instance. The behavior of this
method depends on whether the rule already exists in the
classifier set. When a rule is already present, the rule's
numerosity is added to that of the version of the rule already
present in the population. Otherwise, the new rule is captured.
Note that this means that for rules already present in the
classifier set, the metadata of the existing rule is not
overwritten by that of the one passed in as an argument.
Usage:
displaced_rules = model.add(rule)
Arguments:
rule: A ClassifierRule instance which is to be added to this
classifier set.
Return:
A possibly empty list of ClassifierRule instances which were
removed altogether from the classifier set (as opposed to
simply having their numerosities decremented) in order to make
room for the newly added rule.
### Response:
def add(self, rule):
"""Add a new classifier rule to the classifier set. Return a list
containing zero or more rules that were deleted from the classifier
by the algorithm in order to make room for the new rule. The rule
argument should be a ClassifierRule instance. The behavior of this
method depends on whether the rule already exists in the
classifier set. When a rule is already present, the rule's
numerosity is added to that of the version of the rule already
present in the population. Otherwise, the new rule is captured.
Note that this means that for rules already present in the
classifier set, the metadata of the existing rule is not
overwritten by that of the one passed in as an argument.
Usage:
displaced_rules = model.add(rule)
Arguments:
rule: A ClassifierRule instance which is to be added to this
classifier set.
Return:
A possibly empty list of ClassifierRule instances which were
removed altogether from the classifier set (as opposed to
simply having their numerosities decremented) in order to make
room for the newly added rule.
"""
assert isinstance(rule, ClassifierRule)
condition = rule.condition
action = rule.action
# If the rule already exists in the population, then we virtually
# add the rule by incrementing the existing rule's numerosity. This
# prevents redundancy in the rule set. Otherwise we capture the
# new rule.
if condition not in self._population:
self._population[condition] = {}
if action in self._population[condition]:
existing_rule = self._population[condition][action]
existing_rule.numerosity += rule.numerosity
else:
self._population[condition][action] = rule
# Any time we add a rule, we need to call this to keep the
# population size under control.
return self._algorithm.prune(self) |
def reset_server_env(self, server_name, configure):
"""
reset server env to server-name
:param server_name:
:param configure:
:return:
"""
env.host_string = configure[server_name]['host']
env.user = configure[server_name]['user']
env.password = configure[server_name]['passwd'] | reset server env to server-name
:param server_name:
:param configure:
:return: | Below is the the instruction that describes the task:
### Input:
reset server env to server-name
:param server_name:
:param configure:
:return:
### Response:
def reset_server_env(self, server_name, configure):
"""
reset server env to server-name
:param server_name:
:param configure:
:return:
"""
env.host_string = configure[server_name]['host']
env.user = configure[server_name]['user']
env.password = configure[server_name]['passwd'] |
async def stop(self):
"""Stop the rpc queue from inside the event loop."""
if self._rpc_task is not None:
self._rpc_task.cancel()
try:
await self._rpc_task
except asyncio.CancelledError:
pass
self._rpc_task = None | Stop the rpc queue from inside the event loop. | Below is the the instruction that describes the task:
### Input:
Stop the rpc queue from inside the event loop.
### Response:
async def stop(self):
"""Stop the rpc queue from inside the event loop."""
if self._rpc_task is not None:
self._rpc_task.cancel()
try:
await self._rpc_task
except asyncio.CancelledError:
pass
self._rpc_task = None |
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a condition into this
object.
'''
self.sequence = int(node.getAttributeNS(RTS_NS, 'sequence'))
c = node.getElementsByTagNameNS(RTS_NS, 'TargetComponent')
if c.length != 1:
raise InvalidParticipantNodeError
self.target_component = TargetExecutionContext().parse_xml_node(c[0])
for c in get_direct_child_elements_xml(node, prefix=RTS_EXT_NS,
local_name='Properties'):
name, value = parse_properties_xml(c)
self._properties[name] = value
return self | Parse an xml.dom Node object representing a condition into this
object. | Below is the the instruction that describes the task:
### Input:
Parse an xml.dom Node object representing a condition into this
object.
### Response:
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a condition into this
object.
'''
self.sequence = int(node.getAttributeNS(RTS_NS, 'sequence'))
c = node.getElementsByTagNameNS(RTS_NS, 'TargetComponent')
if c.length != 1:
raise InvalidParticipantNodeError
self.target_component = TargetExecutionContext().parse_xml_node(c[0])
for c in get_direct_child_elements_xml(node, prefix=RTS_EXT_NS,
local_name='Properties'):
name, value = parse_properties_xml(c)
self._properties[name] = value
return self |
def Power(base: vertex_constructor_param_types, exponent: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Raises a vertex to the power of another
:param base: the base vertex
:param exponent: the exponent vertex
"""
return Double(context.jvm_view().PowerVertex, label, cast_to_double_vertex(base), cast_to_double_vertex(exponent)) | Raises a vertex to the power of another
:param base: the base vertex
:param exponent: the exponent vertex | Below is the the instruction that describes the task:
### Input:
Raises a vertex to the power of another
:param base: the base vertex
:param exponent: the exponent vertex
### Response:
def Power(base: vertex_constructor_param_types, exponent: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Raises a vertex to the power of another
:param base: the base vertex
:param exponent: the exponent vertex
"""
return Double(context.jvm_view().PowerVertex, label, cast_to_double_vertex(base), cast_to_double_vertex(exponent)) |
def versionok_for_gui():
''' Return True if running Python is suitable for GUI Event Integration and deeper IPython integration '''
# We require Python 2.6+ ...
if sys.hexversion < 0x02060000:
return False
# Or Python 3.2+
if sys.hexversion >= 0x03000000 and sys.hexversion < 0x03020000:
return False
# Not supported under Jython nor IronPython
if sys.platform.startswith("java") or sys.platform.startswith('cli'):
return False
return True | Return True if running Python is suitable for GUI Event Integration and deeper IPython integration | Below is the the instruction that describes the task:
### Input:
Return True if running Python is suitable for GUI Event Integration and deeper IPython integration
### Response:
def versionok_for_gui():
''' Return True if running Python is suitable for GUI Event Integration and deeper IPython integration '''
# We require Python 2.6+ ...
if sys.hexversion < 0x02060000:
return False
# Or Python 3.2+
if sys.hexversion >= 0x03000000 and sys.hexversion < 0x03020000:
return False
# Not supported under Jython nor IronPython
if sys.platform.startswith("java") or sys.platform.startswith('cli'):
return False
return True |
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle them if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute
# ) : end the unsaved grouping
# ) : end the unsaved grouping
# \s* : match any amount of white space
# (\w+) : match the module name that is being USE'd
#
#
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
# The INCLUDE statement regex matches the following:
#
# INCLUDE 'some_Text'
# INCLUDE "some_Text"
# INCLUDE "some_Text" ; INCLUDE "some_Text"
# INCLUDE kind_"some_Text"
# INCLUDE kind_'some_Text"
#
# where some_Text can include any alphanumeric and/or special character
# as defined by the Fortran 2003 standard.
#
# Limitations:
#
# -- The Fortran standard dictates that a " or ' in the INCLUDE'd
# string must be represented as a "" or '', if the quotes that wrap
# the entire string are either a ' or ", respectively. While the
# regular expression below can detect the ' or " characters just fine,
# the scanning logic, presently is unable to detect them and reduce
# them to a single instance. This probably isn't an issue since,
# in practice, ' or " are not generally used in filenames.
#
# -- This regex will not properly deal with multiple INCLUDE statements
# when the entire line has been commented out, ala
#
# ! INCLUDE 'some_file' ; INCLUDE 'some_file'
#
# In such cases, it will properly ignore the first INCLUDE file,
# but will actually still pick up the second. Interestingly enough,
# the regex will properly deal with these cases:
#
# INCLUDE 'some_file'
# INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# To get around the above limitation, the FORTRAN programmer could
# simply comment each INCLUDE statement separately, like this
#
# ! INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# The way I see it, the only way to get around this limitation would
# be to modify the scanning logic to replace the calls to re.findall
# with a custom loop that processes each line separately, throwing
# away fully commented out lines before attempting to match against
# the INCLUDE syntax.
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# (?: : begin a non-saving group that matches the following:
# ^ : either the start of the line
# | : or
# ['">]\s*; : a semicolon that follows a single quote,
# double quote or greater than symbol (with any
# amount of whitespace in between). This will
# allow the regex to match multiple INCLUDE
# statements per line (although it also requires
# the positive lookahead assertion that is
# used below). It will even properly deal with
# (i.e. ignore) cases in which the additional
# INCLUDES are part of an in-line comment, ala
# " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' "
# ) : end of non-saving group
# \s* : any amount of white space
# INCLUDE : match the string INCLUDE, case insensitive
# \s+ : match one or more white space characters
# (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard
# [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol
# (.+?) : match one or more characters that make up
# the included path and file name and save it
# in a group. The Fortran standard allows for
# any non-control character to be used. The dot
# operator will pick up any character, including
# control codes, but I can't conceive of anyone
# putting control codes in their file names.
# The question mark indicates it is non-greedy so
# that regex will match only up to the next quote,
# double quote, or greater than symbol
# (?=["'>]) : positive lookahead assertion to match the include
# delimiter - an apostrophe, double quote, or
# greater than symbol. This level of complexity
# is required so that the include delimiter is
# not consumed by the match, thus allowing the
# sub-regex discussed above to uniquely match a
# set of semicolon-separated INCLUDE statements
# (as allowed by the F2003 standard)
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
# The MODULE statement regex finds module definitions by matching
# the following:
#
# MODULE module_name
#
# but *not* the following:
#
# MODULE PROCEDURE procedure_name
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^\s* : any amount of white space
# MODULE : match the string MODULE, case insensitive
# \s+ : match one or more white space characters
# (?!PROCEDURE) : but *don't* match if the next word matches
# PROCEDURE (negative lookahead assertion),
# case insensitive
# (\w+) : match one or more alphanumeric characters
# that make up the defined module name and
# save it in a group
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
scanner = F90Scanner("FortranScan",
"$FORTRANSUFFIXES",
path_variable,
use_regex,
include_regex,
def_regex)
return scanner | Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements | Below is the the instruction that describes the task:
### Input:
Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements
### Response:
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle them if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute
# ) : end the unsaved grouping
# ) : end the unsaved grouping
# \s* : match any amount of white space
# (\w+) : match the module name that is being USE'd
#
#
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
# The INCLUDE statement regex matches the following:
#
# INCLUDE 'some_Text'
# INCLUDE "some_Text"
# INCLUDE "some_Text" ; INCLUDE "some_Text"
# INCLUDE kind_"some_Text"
# INCLUDE kind_'some_Text"
#
# where some_Text can include any alphanumeric and/or special character
# as defined by the Fortran 2003 standard.
#
# Limitations:
#
# -- The Fortran standard dictates that a " or ' in the INCLUDE'd
# string must be represented as a "" or '', if the quotes that wrap
# the entire string are either a ' or ", respectively. While the
# regular expression below can detect the ' or " characters just fine,
# the scanning logic, presently is unable to detect them and reduce
# them to a single instance. This probably isn't an issue since,
# in practice, ' or " are not generally used in filenames.
#
# -- This regex will not properly deal with multiple INCLUDE statements
# when the entire line has been commented out, ala
#
# ! INCLUDE 'some_file' ; INCLUDE 'some_file'
#
# In such cases, it will properly ignore the first INCLUDE file,
# but will actually still pick up the second. Interestingly enough,
# the regex will properly deal with these cases:
#
# INCLUDE 'some_file'
# INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# To get around the above limitation, the FORTRAN programmer could
# simply comment each INCLUDE statement separately, like this
#
# ! INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# The way I see it, the only way to get around this limitation would
# be to modify the scanning logic to replace the calls to re.findall
# with a custom loop that processes each line separately, throwing
# away fully commented out lines before attempting to match against
# the INCLUDE syntax.
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# (?: : begin a non-saving group that matches the following:
# ^ : either the start of the line
# | : or
# ['">]\s*; : a semicolon that follows a single quote,
# double quote or greater than symbol (with any
# amount of whitespace in between). This will
# allow the regex to match multiple INCLUDE
# statements per line (although it also requires
# the positive lookahead assertion that is
# used below). It will even properly deal with
# (i.e. ignore) cases in which the additional
# INCLUDES are part of an in-line comment, ala
# " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' "
# ) : end of non-saving group
# \s* : any amount of white space
# INCLUDE : match the string INCLUDE, case insensitive
# \s+ : match one or more white space characters
# (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard
# [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol
# (.+?) : match one or more characters that make up
# the included path and file name and save it
# in a group. The Fortran standard allows for
# any non-control character to be used. The dot
# operator will pick up any character, including
# control codes, but I can't conceive of anyone
# putting control codes in their file names.
# The question mark indicates it is non-greedy so
# that regex will match only up to the next quote,
# double quote, or greater than symbol
# (?=["'>]) : positive lookahead assertion to match the include
# delimiter - an apostrophe, double quote, or
# greater than symbol. This level of complexity
# is required so that the include delimiter is
# not consumed by the match, thus allowing the
# sub-regex discussed above to uniquely match a
# set of semicolon-separated INCLUDE statements
# (as allowed by the F2003 standard)
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
# The MODULE statement regex finds module definitions by matching
# the following:
#
# MODULE module_name
#
# but *not* the following:
#
# MODULE PROCEDURE procedure_name
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^\s* : any amount of white space
# MODULE : match the string MODULE, case insensitive
# \s+ : match one or more white space characters
# (?!PROCEDURE) : but *don't* match if the next word matches
# PROCEDURE (negative lookahead assertion),
# case insensitive
# (\w+) : match one or more alphanumeric characters
# that make up the defined module name and
# save it in a group
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
scanner = F90Scanner("FortranScan",
"$FORTRANSUFFIXES",
path_variable,
use_regex,
include_regex,
def_regex)
return scanner |
def _parse_settings_bond_1(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond1.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '1'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except Exception:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond | Filters given options and outputs valid settings for bond1.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting. | Below is the the instruction that describes the task:
### Input:
Filters given options and outputs valid settings for bond1.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
### Response:
def _parse_settings_bond_1(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond1.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '1'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except Exception:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond |
def get_applicable_values(self):
"""Return selected values that will affect the search result"""
return [v for v in self._values if v.is_active and not v.is_all_results] | Return selected values that will affect the search result | Below is the the instruction that describes the task:
### Input:
Return selected values that will affect the search result
### Response:
def get_applicable_values(self):
"""Return selected values that will affect the search result"""
return [v for v in self._values if v.is_active and not v.is_all_results] |
def transform_header(mtype_name):
'''Add header to json output to wrap around distribution data.
'''
head_dict = OrderedDict()
head_dict["m-type"] = mtype_name
head_dict["components"] = defaultdict(OrderedDict)
return head_dict | Add header to json output to wrap around distribution data. | Below is the the instruction that describes the task:
### Input:
Add header to json output to wrap around distribution data.
### Response:
def transform_header(mtype_name):
'''Add header to json output to wrap around distribution data.
'''
head_dict = OrderedDict()
head_dict["m-type"] = mtype_name
head_dict["components"] = defaultdict(OrderedDict)
return head_dict |
def delete_specific(self, id, **kwargs):
"""
Removes a specific Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_specific_with_http_info(id, **kwargs)
else:
(data) = self.delete_specific_with_http_info(id, **kwargs)
return data | Removes a specific Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Removes a specific Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_specific(self, id, **kwargs):
"""
Removes a specific Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_specific_with_http_info(id, **kwargs)
else:
(data) = self.delete_specific_with_http_info(id, **kwargs)
return data |
def decodeMotorInput(self, motorInputPattern):
"""
Decode motor command from bit vector.
@param motorInputPattern (1D numpy.array)
Encoded motor command.
@return (1D numpy.array)
Decoded motor command.
"""
key = self.motorEncoder.decode(motorInputPattern)[0].keys()[0]
motorCommand = self.motorEncoder.decode(motorInputPattern)[0][key][1][0]
return motorCommand | Decode motor command from bit vector.
@param motorInputPattern (1D numpy.array)
Encoded motor command.
@return (1D numpy.array)
Decoded motor command. | Below is the the instruction that describes the task:
### Input:
Decode motor command from bit vector.
@param motorInputPattern (1D numpy.array)
Encoded motor command.
@return (1D numpy.array)
Decoded motor command.
### Response:
def decodeMotorInput(self, motorInputPattern):
"""
Decode motor command from bit vector.
@param motorInputPattern (1D numpy.array)
Encoded motor command.
@return (1D numpy.array)
Decoded motor command.
"""
key = self.motorEncoder.decode(motorInputPattern)[0].keys()[0]
motorCommand = self.motorEncoder.decode(motorInputPattern)[0][key][1][0]
return motorCommand |
def _setFlag(self, name, val, defVal):
"""set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal"""
if not hasattr(self, "flags"):
self.flags = {}
if val != defVal:
self.flags[name] = val | set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal | Below is the the instruction that describes the task:
### Input:
set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal
### Response:
def _setFlag(self, name, val, defVal):
"""set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal"""
if not hasattr(self, "flags"):
self.flags = {}
if val != defVal:
self.flags[name] = val |
def clear(self):
"""
Clear the content of `self.xmlnode` removing all <item/>, <status/>, etc.
"""
if not self.xmlnode.children:
return
n=self.xmlnode.children
while n:
ns=n.ns()
if ns and ns.getContent()!=MUC_USER_NS:
pass
else:
n.unlinkNode()
n.freeNode()
n=n.next | Clear the content of `self.xmlnode` removing all <item/>, <status/>, etc. | Below is the the instruction that describes the task:
### Input:
Clear the content of `self.xmlnode` removing all <item/>, <status/>, etc.
### Response:
def clear(self):
"""
Clear the content of `self.xmlnode` removing all <item/>, <status/>, etc.
"""
if not self.xmlnode.children:
return
n=self.xmlnode.children
while n:
ns=n.ns()
if ns and ns.getContent()!=MUC_USER_NS:
pass
else:
n.unlinkNode()
n.freeNode()
n=n.next |
def infer_call_result(self, caller, context):
"""
The boundnode of the regular context with a function called
on ``object.__new__`` will be of type ``object``,
which is incorrect for the argument in general.
If no context is given the ``object.__new__`` call argument will
correctly inferred except when inside a call that requires
the additional context (such as a classmethod) of the boundnode
to determine which class the method was called from
"""
# If we're unbound method __new__ of builtin object, the result is an
# instance of the class given as first argument.
if (
self._proxied.name == "__new__"
and self._proxied.parent.frame().qname() == "%s.object" % BUILTINS
):
if caller.args:
node_context = context.extra_context.get(caller.args[0])
infer = caller.args[0].infer(context=node_context)
else:
infer = []
return (Instance(x) if x is not util.Uninferable else x for x in infer)
return self._proxied.infer_call_result(caller, context) | The boundnode of the regular context with a function called
on ``object.__new__`` will be of type ``object``,
which is incorrect for the argument in general.
If no context is given the ``object.__new__`` call argument will
correctly inferred except when inside a call that requires
the additional context (such as a classmethod) of the boundnode
to determine which class the method was called from | Below is the the instruction that describes the task:
### Input:
The boundnode of the regular context with a function called
on ``object.__new__`` will be of type ``object``,
which is incorrect for the argument in general.
If no context is given the ``object.__new__`` call argument will
correctly inferred except when inside a call that requires
the additional context (such as a classmethod) of the boundnode
to determine which class the method was called from
### Response:
def infer_call_result(self, caller, context):
"""
The boundnode of the regular context with a function called
on ``object.__new__`` will be of type ``object``,
which is incorrect for the argument in general.
If no context is given the ``object.__new__`` call argument will
correctly inferred except when inside a call that requires
the additional context (such as a classmethod) of the boundnode
to determine which class the method was called from
"""
# If we're unbound method __new__ of builtin object, the result is an
# instance of the class given as first argument.
if (
self._proxied.name == "__new__"
and self._proxied.parent.frame().qname() == "%s.object" % BUILTINS
):
if caller.args:
node_context = context.extra_context.get(caller.args[0])
infer = caller.args[0].infer(context=node_context)
else:
infer = []
return (Instance(x) if x is not util.Uninferable else x for x in infer)
return self._proxied.infer_call_result(caller, context) |
def compounding(start, stop, compound, t=0.0):
"""Yield an infinite series of compounding values. Each time the
generator is called, a value is produced by multiplying the previous
value by the compound rate.
EXAMPLE:
>>> sizes = compounding(1., 10., 1.5)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * 1.5
>>> assert next(sizes) == 1.5 * 1.5
"""
curr = float(start)
while True:
yield _clip(curr, start, stop)
curr *= compound | Yield an infinite series of compounding values. Each time the
generator is called, a value is produced by multiplying the previous
value by the compound rate.
EXAMPLE:
>>> sizes = compounding(1., 10., 1.5)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * 1.5
>>> assert next(sizes) == 1.5 * 1.5 | Below is the the instruction that describes the task:
### Input:
Yield an infinite series of compounding values. Each time the
generator is called, a value is produced by multiplying the previous
value by the compound rate.
EXAMPLE:
>>> sizes = compounding(1., 10., 1.5)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * 1.5
>>> assert next(sizes) == 1.5 * 1.5
### Response:
def compounding(start, stop, compound, t=0.0):
"""Yield an infinite series of compounding values. Each time the
generator is called, a value is produced by multiplying the previous
value by the compound rate.
EXAMPLE:
>>> sizes = compounding(1., 10., 1.5)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * 1.5
>>> assert next(sizes) == 1.5 * 1.5
"""
curr = float(start)
while True:
yield _clip(curr, start, stop)
curr *= compound |
def show_print_dialog(self):
"""Open the print dialog"""
if not self.impact_function:
# Now try to read the keywords and show them in the dock
try:
active_layer = self.iface.activeLayer()
keywords = self.keyword_io.read_keywords(active_layer)
provenances = keywords.get('provenance_data', {})
extra_keywords = keywords.get('extra_keywords', {})
is_multi_exposure = (
extra_keywords.get(extra_keyword_analysis_type['key']) == (
MULTI_EXPOSURE_ANALYSIS_FLAG))
if provenances and is_multi_exposure:
self.impact_function = (
MultiExposureImpactFunction.load_from_output_metadata(
keywords))
else:
self.impact_function = (
ImpactFunction.load_from_output_metadata(keywords))
except (KeywordNotFoundError,
HashNotFoundError,
InvalidParameterError,
NoKeywordsFoundError,
MetadataReadError,
# AttributeError This is hiding some real error. ET
) as e:
# Added this check in 3.2 for #1861
active_layer = self.iface.activeLayer()
LOGGER.debug(e)
if active_layer is None:
if self.conflicting_plugin_detected:
send_static_message(self, conflicting_plugin_message())
else:
send_static_message(self, getting_started_message())
else:
show_no_keywords_message(self)
except Exception as e: # pylint: disable=broad-except
error_message = get_error_message(e)
send_error_message(self, error_message)
if self.impact_function:
dialog = PrintReportDialog(
self.impact_function, self.iface, dock=self, parent=self)
dialog.show()
else:
display_critical_message_bar(
"InaSAFE",
self.tr('Please select a valid layer before printing. '
'No Impact Function found.'),
iface_object=self
) | Open the print dialog | Below is the the instruction that describes the task:
### Input:
Open the print dialog
### Response:
def show_print_dialog(self):
"""Open the print dialog"""
if not self.impact_function:
# Now try to read the keywords and show them in the dock
try:
active_layer = self.iface.activeLayer()
keywords = self.keyword_io.read_keywords(active_layer)
provenances = keywords.get('provenance_data', {})
extra_keywords = keywords.get('extra_keywords', {})
is_multi_exposure = (
extra_keywords.get(extra_keyword_analysis_type['key']) == (
MULTI_EXPOSURE_ANALYSIS_FLAG))
if provenances and is_multi_exposure:
self.impact_function = (
MultiExposureImpactFunction.load_from_output_metadata(
keywords))
else:
self.impact_function = (
ImpactFunction.load_from_output_metadata(keywords))
except (KeywordNotFoundError,
HashNotFoundError,
InvalidParameterError,
NoKeywordsFoundError,
MetadataReadError,
# AttributeError This is hiding some real error. ET
) as e:
# Added this check in 3.2 for #1861
active_layer = self.iface.activeLayer()
LOGGER.debug(e)
if active_layer is None:
if self.conflicting_plugin_detected:
send_static_message(self, conflicting_plugin_message())
else:
send_static_message(self, getting_started_message())
else:
show_no_keywords_message(self)
except Exception as e: # pylint: disable=broad-except
error_message = get_error_message(e)
send_error_message(self, error_message)
if self.impact_function:
dialog = PrintReportDialog(
self.impact_function, self.iface, dock=self, parent=self)
dialog.show()
else:
display_critical_message_bar(
"InaSAFE",
self.tr('Please select a valid layer before printing. '
'No Impact Function found.'),
iface_object=self
) |
def lookup_zone(conn, zone):
"""Look up a zone ID for a zone string.
Args: conn: boto.route53.Route53Connection
zone: string eg. foursquare.com
Returns: zone ID eg. ZE2DYFZDWGSL4.
Raises: ZoneNotFoundError if zone not found."""
all_zones = conn.get_all_hosted_zones()
for resp in all_zones['ListHostedZonesResponse']['HostedZones']:
if resp['Name'].rstrip('.') == zone.rstrip('.'):
return resp['Id'].replace('/hostedzone/', '')
raise ZoneNotFoundError('zone %s not found in response' % zone) | Look up a zone ID for a zone string.
Args: conn: boto.route53.Route53Connection
zone: string eg. foursquare.com
Returns: zone ID eg. ZE2DYFZDWGSL4.
Raises: ZoneNotFoundError if zone not found. | Below is the the instruction that describes the task:
### Input:
Look up a zone ID for a zone string.
Args: conn: boto.route53.Route53Connection
zone: string eg. foursquare.com
Returns: zone ID eg. ZE2DYFZDWGSL4.
Raises: ZoneNotFoundError if zone not found.
### Response:
def lookup_zone(conn, zone):
"""Look up a zone ID for a zone string.
Args: conn: boto.route53.Route53Connection
zone: string eg. foursquare.com
Returns: zone ID eg. ZE2DYFZDWGSL4.
Raises: ZoneNotFoundError if zone not found."""
all_zones = conn.get_all_hosted_zones()
for resp in all_zones['ListHostedZonesResponse']['HostedZones']:
if resp['Name'].rstrip('.') == zone.rstrip('.'):
return resp['Id'].replace('/hostedzone/', '')
raise ZoneNotFoundError('zone %s not found in response' % zone) |
def p2sh_input_and_witness(outpoint, stack_script,
redeem_script, sequence=None):
'''
OutPoint, str, str, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts
'''
if sequence is None:
sequence = guess_sequence(redeem_script)
stack_script = script_ser.serialize(stack_script)
redeem_script = script_ser.hex_serialize(redeem_script)
redeem_script = script_ser.serialize(redeem_script)
return tb.make_legacy_input_and_empty_witness(
outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=sequence) | OutPoint, str, str, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts | Below is the the instruction that describes the task:
### Input:
OutPoint, str, str, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts
### Response:
def p2sh_input_and_witness(outpoint, stack_script,
redeem_script, sequence=None):
'''
OutPoint, str, str, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts
'''
if sequence is None:
sequence = guess_sequence(redeem_script)
stack_script = script_ser.serialize(stack_script)
redeem_script = script_ser.hex_serialize(redeem_script)
redeem_script = script_ser.serialize(redeem_script)
return tb.make_legacy_input_and_empty_witness(
outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=sequence) |
def _send_packet_safe(self, cr, packet):
"""
Adds 1bit counter to CRTP header to guarantee that no ack (downlink)
payload are lost and no uplink packet are duplicated.
The caller should resend packet if not acked (ie. same as with a
direct call to crazyradio.send_packet)
"""
# packet = bytearray(packet)
packet[0] &= 0xF3
packet[0] |= self._curr_up << 3 | self._curr_down << 2
resp = cr.send_packet(packet)
if resp and resp.ack and len(resp.data) and \
(resp.data[0] & 0x04) == (self._curr_down << 2):
self._curr_down = 1 - self._curr_down
if resp and resp.ack:
self._curr_up = 1 - self._curr_up
return resp | Adds 1bit counter to CRTP header to guarantee that no ack (downlink)
payload are lost and no uplink packet are duplicated.
The caller should resend packet if not acked (ie. same as with a
direct call to crazyradio.send_packet) | Below is the the instruction that describes the task:
### Input:
Adds 1bit counter to CRTP header to guarantee that no ack (downlink)
payload are lost and no uplink packet are duplicated.
The caller should resend packet if not acked (ie. same as with a
direct call to crazyradio.send_packet)
### Response:
def _send_packet_safe(self, cr, packet):
"""
Adds 1bit counter to CRTP header to guarantee that no ack (downlink)
payload are lost and no uplink packet are duplicated.
The caller should resend packet if not acked (ie. same as with a
direct call to crazyradio.send_packet)
"""
# packet = bytearray(packet)
packet[0] &= 0xF3
packet[0] |= self._curr_up << 3 | self._curr_down << 2
resp = cr.send_packet(packet)
if resp and resp.ack and len(resp.data) and \
(resp.data[0] & 0x04) == (self._curr_down << 2):
self._curr_down = 1 - self._curr_down
if resp and resp.ack:
self._curr_up = 1 - self._curr_up
return resp |
def get_stages(self):
'''
:return: dictionary of information regarding the stages in the fuzzing session
.. note::
structure: { current: ['stage1', 'stage2', 'stage3'], 'stages': {'source1': ['dest1', 'dest2'], 'source2': ['dest1', 'dest3']}}
'''
sequence = self.get_sequence()
return {
'current': [e.dst.get_name() for e in sequence],
'stages': {e.src.get_name(): [e.dst.get_name()] for e in sequence}
} | :return: dictionary of information regarding the stages in the fuzzing session
.. note::
structure: { current: ['stage1', 'stage2', 'stage3'], 'stages': {'source1': ['dest1', 'dest2'], 'source2': ['dest1', 'dest3']}} | Below is the the instruction that describes the task:
### Input:
:return: dictionary of information regarding the stages in the fuzzing session
.. note::
structure: { current: ['stage1', 'stage2', 'stage3'], 'stages': {'source1': ['dest1', 'dest2'], 'source2': ['dest1', 'dest3']}}
### Response:
def get_stages(self):
'''
:return: dictionary of information regarding the stages in the fuzzing session
.. note::
structure: { current: ['stage1', 'stage2', 'stage3'], 'stages': {'source1': ['dest1', 'dest2'], 'source2': ['dest1', 'dest3']}}
'''
sequence = self.get_sequence()
return {
'current': [e.dst.get_name() for e in sequence],
'stages': {e.src.get_name(): [e.dst.get_name()] for e in sequence}
} |
def print_single(line, rev):
"""
print single reads to stderr
"""
if rev is True:
seq = rc(['', line[9]])[1]
qual = line[10][::-1]
else:
seq = line[9]
qual = line[10]
fq = ['@%s' % line[0], seq, '+%s' % line[0], qual]
print('\n'.join(fq), file = sys.stderr) | print single reads to stderr | Below is the the instruction that describes the task:
### Input:
print single reads to stderr
### Response:
def print_single(line, rev):
"""
print single reads to stderr
"""
if rev is True:
seq = rc(['', line[9]])[1]
qual = line[10][::-1]
else:
seq = line[9]
qual = line[10]
fq = ['@%s' % line[0], seq, '+%s' % line[0], qual]
print('\n'.join(fq), file = sys.stderr) |
def gain_to_loss_ratio(self):
"""Gain-to-loss ratio, ratio of positive to negative returns.
Formula:
(n pos. / n neg.) * (avg. up-month return / avg. down-month return)
[Source: CFA Institute]
Returns
-------
float
"""
gt = self > 0
lt = self < 0
return (nansum(gt) / nansum(lt)) * (self[gt].mean() / self[lt].mean()) | Gain-to-loss ratio, ratio of positive to negative returns.
Formula:
(n pos. / n neg.) * (avg. up-month return / avg. down-month return)
[Source: CFA Institute]
Returns
-------
float | Below is the the instruction that describes the task:
### Input:
Gain-to-loss ratio, ratio of positive to negative returns.
Formula:
(n pos. / n neg.) * (avg. up-month return / avg. down-month return)
[Source: CFA Institute]
Returns
-------
float
### Response:
def gain_to_loss_ratio(self):
"""Gain-to-loss ratio, ratio of positive to negative returns.
Formula:
(n pos. / n neg.) * (avg. up-month return / avg. down-month return)
[Source: CFA Institute]
Returns
-------
float
"""
gt = self > 0
lt = self < 0
return (nansum(gt) / nansum(lt)) * (self[gt].mean() / self[lt].mean()) |
def amp_pick_event(event, st, respdir, chans=['Z'], var_wintype=True,
winlen=0.9, pre_pick=0.2, pre_filt=True, lowcut=1.0,
highcut=20.0, corners=4, min_snr=1.0, plot=False,
remove_old=False, ps_multiplier=0.34, velocity=False):
"""
Pick amplitudes for local magnitude for a single event.
Looks for maximum peak-to-trough amplitude for a channel in a stream, and
picks this amplitude and period. There are a few things it does
internally to stabilise the result:
1. Applies a given filter to the data - very necessary for small
magnitude earthquakes;
2. Keeps track of the poles and zeros of this filter and removes them
from the picked amplitude;
3. Picks the peak-to-trough amplitude, but records half of this: the
specification for the local magnitude is to use a peak amplitude on
a horizontal, however, with modern digital seismometers, the peak
amplitude often has an additional, DC-shift applied to it, to
stabilise this, and to remove possible issues with de-meaning data
recorded during the wave-train of an event (e.g. the mean may not be
the same as it would be for longer durations), we use half the
peak-to-trough amplitude;
4. Despite the original definition of local magnitude requiring the
use of a horizontal channel, more recent work has shown that the
vertical channels give more consistent magnitude estimations between
stations, due to a reduction in site-amplification effects, we
therefore use the vertical channels by default, but allow the user
to chose which channels they deem appropriate;
5. We do not specify that the maximum amplitude should be the
S-phase: The original definition holds that the maximum body-wave
amplitude should be used - while this is often the S-phase, we do not
discriminate against the P-phase. We do note that, unless the user
takes care when assigning winlen and filters, they may end up with
amplitude picks for surface waves;
6. We use a variable window-length by default that takes into account
P-S times if available, this is in an effort to include only the
body waves. When P-S times are not available we us the ps_multiplier
variable, which defaults to 0.34 x hypocentral distance.
:type event: obspy.core.event.event.Event
:param event: Event to pick
:type st: obspy.core.stream.Stream
:param st: Stream associated with event
:type respdir: str
:param respdir: Path to the response information directory
:type chans: list
:param chans:
List of the channels to pick on, defaults to ['Z'] - should just be
the orientations, e.g. Z, 1, 2, N, E
:type var_wintype: bool
:param var_wintype:
If True, the winlen will be multiplied by the P-S time if both P and
S picks are available, otherwise it will be multiplied by the
hypocentral distance*ps_multiplier, defaults to True
:type winlen: float
:param winlen:
Length of window, see above parameter, if var_wintype is False then
this will be in seconds, otherwise it is the multiplier to the
p-s time, defaults to 0.9.
:type pre_pick: float
:param pre_pick:
Time before the s-pick to start the cut window, defaults to 0.2.
:type pre_filt: bool
:param pre_filt: To apply a pre-filter or not, defaults to True
:type lowcut: float
:param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0
:type highcut: float
:param highcut: Highcut in Hz for the pre-filter, defaults to 20.0
:type corners: int
:param corners: Number of corners to use in the pre-filter
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio to allow a pick - see note below on
signal-to-noise ratio calculation.
:type plot: bool
:param plot: Turn plotting on or off.
:type remove_old: bool
:param remove_old:
If True, will remove old amplitude picks from event and overwrite
with new picks. Defaults to False.
:type ps_multiplier: float
:param ps_multiplier:
A p-s time multiplier of hypocentral distance - defaults to 0.34,
based on p-s ratio of 1.68 and an S-velocity 0f 1.5km/s, deliberately
chosen to be quite slow.
:type velocity: bool
:param velocity:
Whether to make the pick in velocity space or not. Original definition
of local magnitude used displacement of Wood-Anderson, MLv in seiscomp
and Antelope uses a velocity measurement.
:returns: Picked event
:rtype: :class:`obspy.core.event.Event`
.. Note::
Signal-to-noise ratio is calculated using the filtered data by
dividing the maximum amplitude in the signal window (pick window)
by the normalized noise amplitude (taken from the whole window
supplied).
.. Warning::
Works in place on data - will filter and remove response from data,
you are recommended to give this function a copy of the data if you
are using it in a loop.
"""
# Convert these picks into a lists
stations = [] # List of stations
channels = [] # List of channels
picktimes = [] # List of pick times
picktypes = [] # List of pick types
picks_out = []
try:
depth = _get_origin(event).depth
except MatchFilterError:
depth = 0
if remove_old and event.amplitudes:
for amp in event.amplitudes:
# Find the pick and remove it too
pick = [p for p in event.picks if p.resource_id == amp.pick_id][0]
event.picks.remove(pick)
event.amplitudes = []
for pick in event.picks:
if pick.phase_hint in ['P', 'S']:
picks_out.append(pick) # Need to be able to remove this if there
# isn't data for a station!
stations.append(pick.waveform_id.station_code)
channels.append(pick.waveform_id.channel_code)
picktimes.append(pick.time)
picktypes.append(pick.phase_hint)
if len(picktypes) == 0:
warnings.warn('No P or S picks found')
st.merge() # merge the data, just in case!
# For each station cut the window
uniq_stas = list(set(stations))
for sta in uniq_stas:
for chan in chans:
print('Working on ' + sta + ' ' + chan)
tr = st.select(station=sta, channel='*' + chan)
if not tr:
warnings.warn(
'There is no station and channel match in the wavefile!')
continue
else:
tr = tr[0]
# Apply the pre-filter
if pre_filt:
try:
tr.split().detrend('simple').merge(fill_value=0)
except:
print('Some issue splitting this one')
dummy = tr.split()
dummy.detrend('simple')
tr = dummy.merge(fill_value=0)
try:
tr.filter('bandpass', freqmin=lowcut, freqmax=highcut,
corners=corners)
except NotImplementedError:
print('For some reason trace is not continuous:')
print(tr)
continue
# Find the response information
resp_info = _find_resp(
tr.stats.station, tr.stats.channel, tr.stats.network,
tr.stats.starttime, tr.stats.delta, respdir)
PAZ = []
seedresp = []
if resp_info and 'gain' in resp_info:
PAZ = resp_info
elif resp_info:
seedresp = resp_info
# Simulate a Wood Anderson Seismograph
if PAZ and len(tr.data) > 10:
# Set ten data points to be the minimum to pass
tr = _sim_WA(tr, PAZ, None, 10, velocity=velocity)
elif seedresp and len(tr.data) > 10:
tr = _sim_WA(tr, None, seedresp, 10, velocity=velocity)
elif len(tr.data) > 10:
warnings.warn('No PAZ for ' + tr.stats.station + ' ' +
tr.stats.channel + ' at time: ' +
str(tr.stats.starttime))
continue
sta_picks = [i for i in range(len(stations))
if stations[i] == sta]
pick_id = event.picks[sta_picks[0]].resource_id
arrival = [arrival for arrival in event.origins[0].arrivals
if arrival.pick_id == pick_id][0]
hypo_dist = np.sqrt(
np.square(degrees2kilometers(arrival.distance)) +
np.square(depth / 1000))
if var_wintype and hypo_dist:
if 'S' in [picktypes[i] for i in sta_picks] and\
'P' in [picktypes[i] for i in sta_picks]:
# If there is an S-pick we can use this :D
s_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'S']
s_pick = min(s_pick)
p_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'P']
p_pick = min(p_pick)
try:
tr.trim(starttime=s_pick - pre_pick,
endtime=s_pick + (s_pick - p_pick) * winlen)
except ValueError:
continue
elif 'S' in [picktypes[i] for i in sta_picks]:
s_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'S']
s_pick = min(s_pick)
p_modelled = s_pick - (hypo_dist * ps_multiplier)
try:
tr.trim(starttime=s_pick - pre_pick,
endtime=s_pick + (s_pick - p_modelled) *
winlen)
except ValueError:
continue
else:
# In this case we only have a P pick
p_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'P']
p_pick = min(p_pick)
s_modelled = p_pick + (hypo_dist * ps_multiplier)
print('P_pick=%s' % str(p_pick))
print('hypo_dist: %s' % str(hypo_dist))
print('S modelled=%s' % str(s_modelled))
try:
tr.trim(starttime=s_modelled - pre_pick,
endtime=s_modelled + (s_modelled - p_pick) *
winlen)
print(tr)
except ValueError:
continue
# Work out the window length based on p-s time or distance
elif 'S' in [picktypes[i] for i in sta_picks]:
# If the window is fixed we still need to find the start time,
# which can be based either on the S-pick (this elif), or
# on the hypocentral distance and the P-pick
# Take the minimum S-pick time if more than one S-pick is
# available
s_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'S']
s_pick = min(s_pick)
try:
tr.trim(starttime=s_pick - pre_pick,
endtime=s_pick + winlen)
except ValueError:
continue
else:
# In this case, there is no S-pick and the window length is
# fixed we need to calculate an expected S_pick based on the
# hypocentral distance, this will be quite hand-wavey as we
# are not using any kind of velocity model.
p_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'P']
print(picktimes)
p_pick = min(p_pick)
s_modelled = p_pick + hypo_dist * ps_multiplier
try:
tr.trim(starttime=s_modelled - pre_pick,
endtime=s_modelled + winlen)
except ValueError:
continue
if len(tr.data) <= 10:
warnings.warn('No data found for: ' + tr.stats.station)
continue
# Get the amplitude
try:
amplitude, period, delay = _max_p2t(tr.data, tr.stats.delta)
except ValueError:
print('No amplitude picked for tr %s' % str(tr))
continue
# Calculate the normalized noise amplitude
noise_amplitude = np.sqrt(np.mean(np.square(tr.data)))
if amplitude == 0.0:
continue
if amplitude / noise_amplitude < min_snr:
print('Signal to noise ratio of %s is below threshold.' %
(amplitude / noise_amplitude))
continue
if plot:
plt.plot(np.arange(len(tr.data)), tr.data, 'k')
plt.scatter(tr.stats.sampling_rate * delay, amplitude / 2)
plt.scatter(tr.stats.sampling_rate * (delay + period),
-amplitude / 2)
plt.show()
print('Amplitude picked: ' + str(amplitude))
print('Signal-to-noise ratio is: %s' %
(amplitude / noise_amplitude))
# Note, amplitude should be in meters at the moment!
# Remove the pre-filter response
if pre_filt:
# Generate poles and zeros for the filter we used earlier: this
# is how the filter is designed in the convenience methods of
# filtering in obspy.
z, p, k = iirfilter(
corners, [lowcut / (0.5 * tr.stats.sampling_rate),
highcut / (0.5 * tr.stats.sampling_rate)],
btype='band', ftype='butter', output='zpk')
filt_paz = {'poles': list(p), 'zeros': list(z), 'gain': k,
'sensitivity': 1.0}
amplitude /= (paz_2_amplitude_value_of_freq_resp(
filt_paz, 1 / period) * filt_paz['sensitivity'])
if PAZ:
amplitude /= 1000
if seedresp: # Seedresp method returns mm
amplitude *= 1000000
# Write out the half amplitude, approximately the peak amplitude as
# used directly in magnitude calculations
amplitude *= 0.5
# Append an amplitude reading to the event
_waveform_id = WaveformStreamID(
station_code=tr.stats.station, channel_code=tr.stats.channel,
network_code=tr.stats.network)
pick_ind = len(event.picks)
event.picks.append(Pick(
waveform_id=_waveform_id, phase_hint='IAML',
polarity='undecidable', time=tr.stats.starttime + delay,
evaluation_mode='automatic'))
if not velocity:
event.amplitudes.append(Amplitude(
generic_amplitude=amplitude / 1e9, period=period,
pick_id=event.picks[pick_ind].resource_id,
waveform_id=event.picks[pick_ind].waveform_id, unit='m',
magnitude_hint='ML', type='AML', category='point'))
else:
event.amplitudes.append(Amplitude(
generic_amplitude=amplitude / 1e9, period=period,
pick_id=event.picks[pick_ind].resource_id,
waveform_id=event.picks[pick_ind].waveform_id, unit='m/s',
magnitude_hint='ML', type='AML', category='point'))
return event | Pick amplitudes for local magnitude for a single event.
Looks for maximum peak-to-trough amplitude for a channel in a stream, and
picks this amplitude and period. There are a few things it does
internally to stabilise the result:
1. Applies a given filter to the data - very necessary for small
magnitude earthquakes;
2. Keeps track of the poles and zeros of this filter and removes them
from the picked amplitude;
3. Picks the peak-to-trough amplitude, but records half of this: the
specification for the local magnitude is to use a peak amplitude on
a horizontal, however, with modern digital seismometers, the peak
amplitude often has an additional, DC-shift applied to it, to
stabilise this, and to remove possible issues with de-meaning data
recorded during the wave-train of an event (e.g. the mean may not be
the same as it would be for longer durations), we use half the
peak-to-trough amplitude;
4. Despite the original definition of local magnitude requiring the
use of a horizontal channel, more recent work has shown that the
vertical channels give more consistent magnitude estimations between
stations, due to a reduction in site-amplification effects, we
therefore use the vertical channels by default, but allow the user
to chose which channels they deem appropriate;
5. We do not specify that the maximum amplitude should be the
S-phase: The original definition holds that the maximum body-wave
amplitude should be used - while this is often the S-phase, we do not
discriminate against the P-phase. We do note that, unless the user
takes care when assigning winlen and filters, they may end up with
amplitude picks for surface waves;
6. We use a variable window-length by default that takes into account
P-S times if available, this is in an effort to include only the
body waves. When P-S times are not available we us the ps_multiplier
variable, which defaults to 0.34 x hypocentral distance.
:type event: obspy.core.event.event.Event
:param event: Event to pick
:type st: obspy.core.stream.Stream
:param st: Stream associated with event
:type respdir: str
:param respdir: Path to the response information directory
:type chans: list
:param chans:
List of the channels to pick on, defaults to ['Z'] - should just be
the orientations, e.g. Z, 1, 2, N, E
:type var_wintype: bool
:param var_wintype:
If True, the winlen will be multiplied by the P-S time if both P and
S picks are available, otherwise it will be multiplied by the
hypocentral distance*ps_multiplier, defaults to True
:type winlen: float
:param winlen:
Length of window, see above parameter, if var_wintype is False then
this will be in seconds, otherwise it is the multiplier to the
p-s time, defaults to 0.9.
:type pre_pick: float
:param pre_pick:
Time before the s-pick to start the cut window, defaults to 0.2.
:type pre_filt: bool
:param pre_filt: To apply a pre-filter or not, defaults to True
:type lowcut: float
:param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0
:type highcut: float
:param highcut: Highcut in Hz for the pre-filter, defaults to 20.0
:type corners: int
:param corners: Number of corners to use in the pre-filter
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio to allow a pick - see note below on
signal-to-noise ratio calculation.
:type plot: bool
:param plot: Turn plotting on or off.
:type remove_old: bool
:param remove_old:
If True, will remove old amplitude picks from event and overwrite
with new picks. Defaults to False.
:type ps_multiplier: float
:param ps_multiplier:
A p-s time multiplier of hypocentral distance - defaults to 0.34,
based on p-s ratio of 1.68 and an S-velocity 0f 1.5km/s, deliberately
chosen to be quite slow.
:type velocity: bool
:param velocity:
Whether to make the pick in velocity space or not. Original definition
of local magnitude used displacement of Wood-Anderson, MLv in seiscomp
and Antelope uses a velocity measurement.
:returns: Picked event
:rtype: :class:`obspy.core.event.Event`
.. Note::
Signal-to-noise ratio is calculated using the filtered data by
dividing the maximum amplitude in the signal window (pick window)
by the normalized noise amplitude (taken from the whole window
supplied).
.. Warning::
Works in place on data - will filter and remove response from data,
you are recommended to give this function a copy of the data if you
are using it in a loop. | Below is the the instruction that describes the task:
### Input:
Pick amplitudes for local magnitude for a single event.
Looks for maximum peak-to-trough amplitude for a channel in a stream, and
picks this amplitude and period. There are a few things it does
internally to stabilise the result:
1. Applies a given filter to the data - very necessary for small
magnitude earthquakes;
2. Keeps track of the poles and zeros of this filter and removes them
from the picked amplitude;
3. Picks the peak-to-trough amplitude, but records half of this: the
specification for the local magnitude is to use a peak amplitude on
a horizontal, however, with modern digital seismometers, the peak
amplitude often has an additional, DC-shift applied to it, to
stabilise this, and to remove possible issues with de-meaning data
recorded during the wave-train of an event (e.g. the mean may not be
the same as it would be for longer durations), we use half the
peak-to-trough amplitude;
4. Despite the original definition of local magnitude requiring the
use of a horizontal channel, more recent work has shown that the
vertical channels give more consistent magnitude estimations between
stations, due to a reduction in site-amplification effects, we
therefore use the vertical channels by default, but allow the user
to chose which channels they deem appropriate;
5. We do not specify that the maximum amplitude should be the
S-phase: The original definition holds that the maximum body-wave
amplitude should be used - while this is often the S-phase, we do not
discriminate against the P-phase. We do note that, unless the user
takes care when assigning winlen and filters, they may end up with
amplitude picks for surface waves;
6. We use a variable window-length by default that takes into account
P-S times if available, this is in an effort to include only the
body waves. When P-S times are not available we us the ps_multiplier
variable, which defaults to 0.34 x hypocentral distance.
:type event: obspy.core.event.event.Event
:param event: Event to pick
:type st: obspy.core.stream.Stream
:param st: Stream associated with event
:type respdir: str
:param respdir: Path to the response information directory
:type chans: list
:param chans:
List of the channels to pick on, defaults to ['Z'] - should just be
the orientations, e.g. Z, 1, 2, N, E
:type var_wintype: bool
:param var_wintype:
If True, the winlen will be multiplied by the P-S time if both P and
S picks are available, otherwise it will be multiplied by the
hypocentral distance*ps_multiplier, defaults to True
:type winlen: float
:param winlen:
Length of window, see above parameter, if var_wintype is False then
this will be in seconds, otherwise it is the multiplier to the
p-s time, defaults to 0.9.
:type pre_pick: float
:param pre_pick:
Time before the s-pick to start the cut window, defaults to 0.2.
:type pre_filt: bool
:param pre_filt: To apply a pre-filter or not, defaults to True
:type lowcut: float
:param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0
:type highcut: float
:param highcut: Highcut in Hz for the pre-filter, defaults to 20.0
:type corners: int
:param corners: Number of corners to use in the pre-filter
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio to allow a pick - see note below on
signal-to-noise ratio calculation.
:type plot: bool
:param plot: Turn plotting on or off.
:type remove_old: bool
:param remove_old:
If True, will remove old amplitude picks from event and overwrite
with new picks. Defaults to False.
:type ps_multiplier: float
:param ps_multiplier:
A p-s time multiplier of hypocentral distance - defaults to 0.34,
based on p-s ratio of 1.68 and an S-velocity 0f 1.5km/s, deliberately
chosen to be quite slow.
:type velocity: bool
:param velocity:
Whether to make the pick in velocity space or not. Original definition
of local magnitude used displacement of Wood-Anderson, MLv in seiscomp
and Antelope uses a velocity measurement.
:returns: Picked event
:rtype: :class:`obspy.core.event.Event`
.. Note::
Signal-to-noise ratio is calculated using the filtered data by
dividing the maximum amplitude in the signal window (pick window)
by the normalized noise amplitude (taken from the whole window
supplied).
.. Warning::
Works in place on data - will filter and remove response from data,
you are recommended to give this function a copy of the data if you
are using it in a loop.
### Response:
def amp_pick_event(event, st, respdir, chans=['Z'], var_wintype=True,
winlen=0.9, pre_pick=0.2, pre_filt=True, lowcut=1.0,
highcut=20.0, corners=4, min_snr=1.0, plot=False,
remove_old=False, ps_multiplier=0.34, velocity=False):
"""
Pick amplitudes for local magnitude for a single event.
Looks for maximum peak-to-trough amplitude for a channel in a stream, and
picks this amplitude and period. There are a few things it does
internally to stabilise the result:
1. Applies a given filter to the data - very necessary for small
magnitude earthquakes;
2. Keeps track of the poles and zeros of this filter and removes them
from the picked amplitude;
3. Picks the peak-to-trough amplitude, but records half of this: the
specification for the local magnitude is to use a peak amplitude on
a horizontal, however, with modern digital seismometers, the peak
amplitude often has an additional, DC-shift applied to it, to
stabilise this, and to remove possible issues with de-meaning data
recorded during the wave-train of an event (e.g. the mean may not be
the same as it would be for longer durations), we use half the
peak-to-trough amplitude;
4. Despite the original definition of local magnitude requiring the
use of a horizontal channel, more recent work has shown that the
vertical channels give more consistent magnitude estimations between
stations, due to a reduction in site-amplification effects, we
therefore use the vertical channels by default, but allow the user
to chose which channels they deem appropriate;
5. We do not specify that the maximum amplitude should be the
S-phase: The original definition holds that the maximum body-wave
amplitude should be used - while this is often the S-phase, we do not
discriminate against the P-phase. We do note that, unless the user
takes care when assigning winlen and filters, they may end up with
amplitude picks for surface waves;
6. We use a variable window-length by default that takes into account
P-S times if available, this is in an effort to include only the
body waves. When P-S times are not available we us the ps_multiplier
variable, which defaults to 0.34 x hypocentral distance.
:type event: obspy.core.event.event.Event
:param event: Event to pick
:type st: obspy.core.stream.Stream
:param st: Stream associated with event
:type respdir: str
:param respdir: Path to the response information directory
:type chans: list
:param chans:
List of the channels to pick on, defaults to ['Z'] - should just be
the orientations, e.g. Z, 1, 2, N, E
:type var_wintype: bool
:param var_wintype:
If True, the winlen will be multiplied by the P-S time if both P and
S picks are available, otherwise it will be multiplied by the
hypocentral distance*ps_multiplier, defaults to True
:type winlen: float
:param winlen:
Length of window, see above parameter, if var_wintype is False then
this will be in seconds, otherwise it is the multiplier to the
p-s time, defaults to 0.9.
:type pre_pick: float
:param pre_pick:
Time before the s-pick to start the cut window, defaults to 0.2.
:type pre_filt: bool
:param pre_filt: To apply a pre-filter or not, defaults to True
:type lowcut: float
:param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0
:type highcut: float
:param highcut: Highcut in Hz for the pre-filter, defaults to 20.0
:type corners: int
:param corners: Number of corners to use in the pre-filter
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio to allow a pick - see note below on
signal-to-noise ratio calculation.
:type plot: bool
:param plot: Turn plotting on or off.
:type remove_old: bool
:param remove_old:
If True, will remove old amplitude picks from event and overwrite
with new picks. Defaults to False.
:type ps_multiplier: float
:param ps_multiplier:
A p-s time multiplier of hypocentral distance - defaults to 0.34,
based on p-s ratio of 1.68 and an S-velocity 0f 1.5km/s, deliberately
chosen to be quite slow.
:type velocity: bool
:param velocity:
Whether to make the pick in velocity space or not. Original definition
of local magnitude used displacement of Wood-Anderson, MLv in seiscomp
and Antelope uses a velocity measurement.
:returns: Picked event
:rtype: :class:`obspy.core.event.Event`
.. Note::
Signal-to-noise ratio is calculated using the filtered data by
dividing the maximum amplitude in the signal window (pick window)
by the normalized noise amplitude (taken from the whole window
supplied).
.. Warning::
Works in place on data - will filter and remove response from data,
you are recommended to give this function a copy of the data if you
are using it in a loop.
"""
# Convert these picks into a lists
stations = [] # List of stations
channels = [] # List of channels
picktimes = [] # List of pick times
picktypes = [] # List of pick types
picks_out = []
try:
depth = _get_origin(event).depth
except MatchFilterError:
depth = 0
if remove_old and event.amplitudes:
for amp in event.amplitudes:
# Find the pick and remove it too
pick = [p for p in event.picks if p.resource_id == amp.pick_id][0]
event.picks.remove(pick)
event.amplitudes = []
for pick in event.picks:
if pick.phase_hint in ['P', 'S']:
picks_out.append(pick) # Need to be able to remove this if there
# isn't data for a station!
stations.append(pick.waveform_id.station_code)
channels.append(pick.waveform_id.channel_code)
picktimes.append(pick.time)
picktypes.append(pick.phase_hint)
if len(picktypes) == 0:
warnings.warn('No P or S picks found')
st.merge() # merge the data, just in case!
# For each station cut the window
uniq_stas = list(set(stations))
for sta in uniq_stas:
for chan in chans:
print('Working on ' + sta + ' ' + chan)
tr = st.select(station=sta, channel='*' + chan)
if not tr:
warnings.warn(
'There is no station and channel match in the wavefile!')
continue
else:
tr = tr[0]
# Apply the pre-filter
if pre_filt:
try:
tr.split().detrend('simple').merge(fill_value=0)
except:
print('Some issue splitting this one')
dummy = tr.split()
dummy.detrend('simple')
tr = dummy.merge(fill_value=0)
try:
tr.filter('bandpass', freqmin=lowcut, freqmax=highcut,
corners=corners)
except NotImplementedError:
print('For some reason trace is not continuous:')
print(tr)
continue
# Find the response information
resp_info = _find_resp(
tr.stats.station, tr.stats.channel, tr.stats.network,
tr.stats.starttime, tr.stats.delta, respdir)
PAZ = []
seedresp = []
if resp_info and 'gain' in resp_info:
PAZ = resp_info
elif resp_info:
seedresp = resp_info
# Simulate a Wood Anderson Seismograph
if PAZ and len(tr.data) > 10:
# Set ten data points to be the minimum to pass
tr = _sim_WA(tr, PAZ, None, 10, velocity=velocity)
elif seedresp and len(tr.data) > 10:
tr = _sim_WA(tr, None, seedresp, 10, velocity=velocity)
elif len(tr.data) > 10:
warnings.warn('No PAZ for ' + tr.stats.station + ' ' +
tr.stats.channel + ' at time: ' +
str(tr.stats.starttime))
continue
sta_picks = [i for i in range(len(stations))
if stations[i] == sta]
pick_id = event.picks[sta_picks[0]].resource_id
arrival = [arrival for arrival in event.origins[0].arrivals
if arrival.pick_id == pick_id][0]
hypo_dist = np.sqrt(
np.square(degrees2kilometers(arrival.distance)) +
np.square(depth / 1000))
if var_wintype and hypo_dist:
if 'S' in [picktypes[i] for i in sta_picks] and\
'P' in [picktypes[i] for i in sta_picks]:
# If there is an S-pick we can use this :D
s_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'S']
s_pick = min(s_pick)
p_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'P']
p_pick = min(p_pick)
try:
tr.trim(starttime=s_pick - pre_pick,
endtime=s_pick + (s_pick - p_pick) * winlen)
except ValueError:
continue
elif 'S' in [picktypes[i] for i in sta_picks]:
s_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'S']
s_pick = min(s_pick)
p_modelled = s_pick - (hypo_dist * ps_multiplier)
try:
tr.trim(starttime=s_pick - pre_pick,
endtime=s_pick + (s_pick - p_modelled) *
winlen)
except ValueError:
continue
else:
# In this case we only have a P pick
p_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'P']
p_pick = min(p_pick)
s_modelled = p_pick + (hypo_dist * ps_multiplier)
print('P_pick=%s' % str(p_pick))
print('hypo_dist: %s' % str(hypo_dist))
print('S modelled=%s' % str(s_modelled))
try:
tr.trim(starttime=s_modelled - pre_pick,
endtime=s_modelled + (s_modelled - p_pick) *
winlen)
print(tr)
except ValueError:
continue
# Work out the window length based on p-s time or distance
elif 'S' in [picktypes[i] for i in sta_picks]:
# If the window is fixed we still need to find the start time,
# which can be based either on the S-pick (this elif), or
# on the hypocentral distance and the P-pick
# Take the minimum S-pick time if more than one S-pick is
# available
s_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'S']
s_pick = min(s_pick)
try:
tr.trim(starttime=s_pick - pre_pick,
endtime=s_pick + winlen)
except ValueError:
continue
else:
# In this case, there is no S-pick and the window length is
# fixed we need to calculate an expected S_pick based on the
# hypocentral distance, this will be quite hand-wavey as we
# are not using any kind of velocity model.
p_pick = [picktimes[i] for i in sta_picks
if picktypes[i] == 'P']
print(picktimes)
p_pick = min(p_pick)
s_modelled = p_pick + hypo_dist * ps_multiplier
try:
tr.trim(starttime=s_modelled - pre_pick,
endtime=s_modelled + winlen)
except ValueError:
continue
if len(tr.data) <= 10:
warnings.warn('No data found for: ' + tr.stats.station)
continue
# Get the amplitude
try:
amplitude, period, delay = _max_p2t(tr.data, tr.stats.delta)
except ValueError:
print('No amplitude picked for tr %s' % str(tr))
continue
# Calculate the normalized noise amplitude
noise_amplitude = np.sqrt(np.mean(np.square(tr.data)))
if amplitude == 0.0:
continue
if amplitude / noise_amplitude < min_snr:
print('Signal to noise ratio of %s is below threshold.' %
(amplitude / noise_amplitude))
continue
if plot:
plt.plot(np.arange(len(tr.data)), tr.data, 'k')
plt.scatter(tr.stats.sampling_rate * delay, amplitude / 2)
plt.scatter(tr.stats.sampling_rate * (delay + period),
-amplitude / 2)
plt.show()
print('Amplitude picked: ' + str(amplitude))
print('Signal-to-noise ratio is: %s' %
(amplitude / noise_amplitude))
# Note, amplitude should be in meters at the moment!
# Remove the pre-filter response
if pre_filt:
# Generate poles and zeros for the filter we used earlier: this
# is how the filter is designed in the convenience methods of
# filtering in obspy.
z, p, k = iirfilter(
corners, [lowcut / (0.5 * tr.stats.sampling_rate),
highcut / (0.5 * tr.stats.sampling_rate)],
btype='band', ftype='butter', output='zpk')
filt_paz = {'poles': list(p), 'zeros': list(z), 'gain': k,
'sensitivity': 1.0}
amplitude /= (paz_2_amplitude_value_of_freq_resp(
filt_paz, 1 / period) * filt_paz['sensitivity'])
if PAZ:
amplitude /= 1000
if seedresp: # Seedresp method returns mm
amplitude *= 1000000
# Write out the half amplitude, approximately the peak amplitude as
# used directly in magnitude calculations
amplitude *= 0.5
# Append an amplitude reading to the event
_waveform_id = WaveformStreamID(
station_code=tr.stats.station, channel_code=tr.stats.channel,
network_code=tr.stats.network)
pick_ind = len(event.picks)
event.picks.append(Pick(
waveform_id=_waveform_id, phase_hint='IAML',
polarity='undecidable', time=tr.stats.starttime + delay,
evaluation_mode='automatic'))
if not velocity:
event.amplitudes.append(Amplitude(
generic_amplitude=amplitude / 1e9, period=period,
pick_id=event.picks[pick_ind].resource_id,
waveform_id=event.picks[pick_ind].waveform_id, unit='m',
magnitude_hint='ML', type='AML', category='point'))
else:
event.amplitudes.append(Amplitude(
generic_amplitude=amplitude / 1e9, period=period,
pick_id=event.picks[pick_ind].resource_id,
waveform_id=event.picks[pick_ind].waveform_id, unit='m/s',
magnitude_hint='ML', type='AML', category='point'))
return event |
def pdf_to_img(pdf_file, page_num, page_width, page_height):
"""
Converts pdf file into image
:param pdf_file: path to the pdf file
:param page_num: page number to convert (index starting at 1)
:return: wand image object
"""
img = Image(filename="{}[{}]".format(pdf_file, page_num - 1))
img.resize(page_width, page_height)
return img | Converts pdf file into image
:param pdf_file: path to the pdf file
:param page_num: page number to convert (index starting at 1)
:return: wand image object | Below is the the instruction that describes the task:
### Input:
Converts pdf file into image
:param pdf_file: path to the pdf file
:param page_num: page number to convert (index starting at 1)
:return: wand image object
### Response:
def pdf_to_img(pdf_file, page_num, page_width, page_height):
"""
Converts pdf file into image
:param pdf_file: path to the pdf file
:param page_num: page number to convert (index starting at 1)
:return: wand image object
"""
img = Image(filename="{}[{}]".format(pdf_file, page_num - 1))
img.resize(page_width, page_height)
return img |
def find(cls, text):
"""This method should return an iterable containing matches of this element."""
if isinstance(cls.pattern, string_types):
cls.pattern = re.compile(cls.pattern)
return cls.pattern.finditer(text) | This method should return an iterable containing matches of this element. | Below is the the instruction that describes the task:
### Input:
This method should return an iterable containing matches of this element.
### Response:
def find(cls, text):
"""This method should return an iterable containing matches of this element."""
if isinstance(cls.pattern, string_types):
cls.pattern = re.compile(cls.pattern)
return cls.pattern.finditer(text) |
def is_valid_github_uri(uri: URI, expected_path_terms: Tuple[str, ...]) -> bool:
"""
Return a bool indicating whether or not the URI fulfills the following specs
Valid Github URIs *must*:
- Have 'https' scheme
- Have 'api.github.com' authority
- Have a path that contains all "expected_path_terms"
"""
if not is_text(uri):
return False
parsed = parse.urlparse(uri)
path, scheme, authority = parsed.path, parsed.scheme, parsed.netloc
if not all((path, scheme, authority)):
return False
if any(term for term in expected_path_terms if term not in path):
return False
if scheme != "https":
return False
if authority != GITHUB_API_AUTHORITY:
return False
return True | Return a bool indicating whether or not the URI fulfills the following specs
Valid Github URIs *must*:
- Have 'https' scheme
- Have 'api.github.com' authority
- Have a path that contains all "expected_path_terms" | Below is the the instruction that describes the task:
### Input:
Return a bool indicating whether or not the URI fulfills the following specs
Valid Github URIs *must*:
- Have 'https' scheme
- Have 'api.github.com' authority
- Have a path that contains all "expected_path_terms"
### Response:
def is_valid_github_uri(uri: URI, expected_path_terms: Tuple[str, ...]) -> bool:
"""
Return a bool indicating whether or not the URI fulfills the following specs
Valid Github URIs *must*:
- Have 'https' scheme
- Have 'api.github.com' authority
- Have a path that contains all "expected_path_terms"
"""
if not is_text(uri):
return False
parsed = parse.urlparse(uri)
path, scheme, authority = parsed.path, parsed.scheme, parsed.netloc
if not all((path, scheme, authority)):
return False
if any(term for term in expected_path_terms if term not in path):
return False
if scheme != "https":
return False
if authority != GITHUB_API_AUTHORITY:
return False
return True |
def generate_account_shared_access_signature(self, resource_types, permission,
expiry, start=None, ip=None, protocol=None):
'''
Generates a shared access signature for the table service.
Use the returned signature with the sas_token parameter of TableService.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account SAS.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = TableSharedAccessSignature(self.account_name, self.account_key)
return sas.generate_account(TableServices(), resource_types, permission,
expiry, start=start, ip=ip, protocol=protocol) | Generates a shared access signature for the table service.
Use the returned signature with the sas_token parameter of TableService.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account SAS.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:return: A Shared Access Signature (sas) token.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Generates a shared access signature for the table service.
Use the returned signature with the sas_token parameter of TableService.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account SAS.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:return: A Shared Access Signature (sas) token.
:rtype: str
### Response:
def generate_account_shared_access_signature(self, resource_types, permission,
expiry, start=None, ip=None, protocol=None):
'''
Generates a shared access signature for the table service.
Use the returned signature with the sas_token parameter of TableService.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account SAS.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = TableSharedAccessSignature(self.account_name, self.account_key)
return sas.generate_account(TableServices(), resource_types, permission,
expiry, start=start, ip=ip, protocol=protocol) |
def _backup(self):
"""
Backup the mined informations.
"""
if PyFunceble.CONFIGURATION["mining"]:
# The mining is activated.
# We backup our mined informations.
Dict(PyFunceble.INTERN["mined"]).to_json(self.file) | Backup the mined informations. | Below is the the instruction that describes the task:
### Input:
Backup the mined informations.
### Response:
def _backup(self):
"""
Backup the mined informations.
"""
if PyFunceble.CONFIGURATION["mining"]:
# The mining is activated.
# We backup our mined informations.
Dict(PyFunceble.INTERN["mined"]).to_json(self.file) |
def commit_format(self):
"""
Formats the analysis into a simpler dictionary with the line, file and message values to
be commented on a commit.
Returns a list of dictionaries
"""
formatted_analyses = []
for analyze in self.analysis['messages']:
formatted_analyses.append({
'message': f"{analyze['source']}: {analyze['message']}. Code: {analyze['code']}",
'file': analyze['location']['path'],
'line': analyze['location']['line'],
})
return formatted_analyses | Formats the analysis into a simpler dictionary with the line, file and message values to
be commented on a commit.
Returns a list of dictionaries | Below is the the instruction that describes the task:
### Input:
Formats the analysis into a simpler dictionary with the line, file and message values to
be commented on a commit.
Returns a list of dictionaries
### Response:
def commit_format(self):
"""
Formats the analysis into a simpler dictionary with the line, file and message values to
be commented on a commit.
Returns a list of dictionaries
"""
formatted_analyses = []
for analyze in self.analysis['messages']:
formatted_analyses.append({
'message': f"{analyze['source']}: {analyze['message']}. Code: {analyze['code']}",
'file': analyze['location']['path'],
'line': analyze['location']['line'],
})
return formatted_analyses |
def permute(self, ordering: np.ndarray, *, axis: int) -> None:
"""
Permute the view, by permuting its layers, attributes and graphs
Args:
ordering (np.ndarray): The desired ordering along the axis
axis (int): 0, permute rows; 1, permute columns
"""
if axis not in (0, 1):
raise ValueError("Axis must be 0 (rows) or 1 (columns)")
for layer in self.layers.values():
layer._permute(ordering, axis=axis)
if axis == 0:
if self.row_graphs is not None:
for g in self.row_graphs.values():
g._permute(ordering)
for a in self.row_attrs.values():
a._permute(ordering)
elif axis == 1:
if self.col_graphs is not None:
for g in self.col_graphs.values():
g._permute(ordering)
for a in self.col_attrs.values():
a._permute(ordering) | Permute the view, by permuting its layers, attributes and graphs
Args:
ordering (np.ndarray): The desired ordering along the axis
axis (int): 0, permute rows; 1, permute columns | Below is the the instruction that describes the task:
### Input:
Permute the view, by permuting its layers, attributes and graphs
Args:
ordering (np.ndarray): The desired ordering along the axis
axis (int): 0, permute rows; 1, permute columns
### Response:
def permute(self, ordering: np.ndarray, *, axis: int) -> None:
"""
Permute the view, by permuting its layers, attributes and graphs
Args:
ordering (np.ndarray): The desired ordering along the axis
axis (int): 0, permute rows; 1, permute columns
"""
if axis not in (0, 1):
raise ValueError("Axis must be 0 (rows) or 1 (columns)")
for layer in self.layers.values():
layer._permute(ordering, axis=axis)
if axis == 0:
if self.row_graphs is not None:
for g in self.row_graphs.values():
g._permute(ordering)
for a in self.row_attrs.values():
a._permute(ordering)
elif axis == 1:
if self.col_graphs is not None:
for g in self.col_graphs.values():
g._permute(ordering)
for a in self.col_attrs.values():
a._permute(ordering) |
def level(self):
"""Extract level number from compliance profile URI.
Returns integer level number or raises IIIFInfoError
"""
m = re.match(
self.compliance_prefix +
r'(\d)' +
self.compliance_suffix +
r'$',
self.compliance)
if (m):
return int(m.group(1))
raise IIIFInfoError(
"Bad compliance profile URI, failed to extract level number") | Extract level number from compliance profile URI.
Returns integer level number or raises IIIFInfoError | Below is the the instruction that describes the task:
### Input:
Extract level number from compliance profile URI.
Returns integer level number or raises IIIFInfoError
### Response:
def level(self):
"""Extract level number from compliance profile URI.
Returns integer level number or raises IIIFInfoError
"""
m = re.match(
self.compliance_prefix +
r'(\d)' +
self.compliance_suffix +
r'$',
self.compliance)
if (m):
return int(m.group(1))
raise IIIFInfoError(
"Bad compliance profile URI, failed to extract level number") |
def keepLastValue(requestContext, seriesList, limit=INF):
"""
Takes one metric or a wildcard seriesList, and optionally a limit to the
number of 'None' values to skip over. Continues the line with the last
received value when gaps ('None' values) appear in your data, rather than
breaking your line.
Example::
&target=keepLastValue(Server01.connections.handled)
&target=keepLastValue(Server01.connections.handled, 10)
"""
for series in seriesList:
series.name = "keepLastValue(%s)" % (series.name)
series.pathExpression = series.name
consecutiveNones = 0
for i, value in enumerate(series):
series[i] = value
# No 'keeping' can be done on the first value because we have no
# idea what came before it.
if i == 0:
continue
if value is None:
consecutiveNones += 1
else:
if 0 < consecutiveNones <= limit:
# If a non-None value is seen before the limit of Nones is
# hit, backfill all the missing datapoints with the last
# known value.
for index in range(i - consecutiveNones, i):
series[index] = series[i - consecutiveNones - 1]
consecutiveNones = 0
# If the series ends with some None values, try to backfill a bit to
# cover it.
if 0 < consecutiveNones <= limit:
for index in range(len(series) - consecutiveNones, len(series)):
series[index] = series[len(series) - consecutiveNones - 1]
return seriesList | Takes one metric or a wildcard seriesList, and optionally a limit to the
number of 'None' values to skip over. Continues the line with the last
received value when gaps ('None' values) appear in your data, rather than
breaking your line.
Example::
&target=keepLastValue(Server01.connections.handled)
&target=keepLastValue(Server01.connections.handled, 10) | Below is the the instruction that describes the task:
### Input:
Takes one metric or a wildcard seriesList, and optionally a limit to the
number of 'None' values to skip over. Continues the line with the last
received value when gaps ('None' values) appear in your data, rather than
breaking your line.
Example::
&target=keepLastValue(Server01.connections.handled)
&target=keepLastValue(Server01.connections.handled, 10)
### Response:
def keepLastValue(requestContext, seriesList, limit=INF):
"""
Takes one metric or a wildcard seriesList, and optionally a limit to the
number of 'None' values to skip over. Continues the line with the last
received value when gaps ('None' values) appear in your data, rather than
breaking your line.
Example::
&target=keepLastValue(Server01.connections.handled)
&target=keepLastValue(Server01.connections.handled, 10)
"""
for series in seriesList:
series.name = "keepLastValue(%s)" % (series.name)
series.pathExpression = series.name
consecutiveNones = 0
for i, value in enumerate(series):
series[i] = value
# No 'keeping' can be done on the first value because we have no
# idea what came before it.
if i == 0:
continue
if value is None:
consecutiveNones += 1
else:
if 0 < consecutiveNones <= limit:
# If a non-None value is seen before the limit of Nones is
# hit, backfill all the missing datapoints with the last
# known value.
for index in range(i - consecutiveNones, i):
series[index] = series[i - consecutiveNones - 1]
consecutiveNones = 0
# If the series ends with some None values, try to backfill a bit to
# cover it.
if 0 < consecutiveNones <= limit:
for index in range(len(series) - consecutiveNones, len(series)):
series[index] = series[len(series) - consecutiveNones - 1]
return seriesList |
def _consume_scope(self):
"""Returns a pair (scope, list of flags encountered in that scope).
Note that the flag may be explicitly scoped, and therefore not actually belong to this scope.
For example, in:
./pants --compile-java-partition-size-hint=100 compile <target>
--compile-java-partition-size-hint should be treated as if it were --partition-size-hint=100
in the compile.java scope.
"""
if not self._at_scope():
return None, []
scope = self._unconsumed_args.pop()
flags = self._consume_flags()
return scope, flags | Returns a pair (scope, list of flags encountered in that scope).
Note that the flag may be explicitly scoped, and therefore not actually belong to this scope.
For example, in:
./pants --compile-java-partition-size-hint=100 compile <target>
--compile-java-partition-size-hint should be treated as if it were --partition-size-hint=100
in the compile.java scope. | Below is the the instruction that describes the task:
### Input:
Returns a pair (scope, list of flags encountered in that scope).
Note that the flag may be explicitly scoped, and therefore not actually belong to this scope.
For example, in:
./pants --compile-java-partition-size-hint=100 compile <target>
--compile-java-partition-size-hint should be treated as if it were --partition-size-hint=100
in the compile.java scope.
### Response:
def _consume_scope(self):
"""Returns a pair (scope, list of flags encountered in that scope).
Note that the flag may be explicitly scoped, and therefore not actually belong to this scope.
For example, in:
./pants --compile-java-partition-size-hint=100 compile <target>
--compile-java-partition-size-hint should be treated as if it were --partition-size-hint=100
in the compile.java scope.
"""
if not self._at_scope():
return None, []
scope = self._unconsumed_args.pop()
flags = self._consume_flags()
return scope, flags |
def import_submodules(package, parent_package=None, exclude_submodules=None):
"""
Generator which imports all submodules of a module, recursively, including subpackages
:param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided
:type package: str
:param parent_package: parent package name (e.g 'chisel')
:type package: str
:rtype: iterator of modules
"""
exclude_submodules_dot = [x + '.' for x in exclude_submodules] if exclude_submodules else exclude_submodules
package = importlib.import_module(package, parent_package)
for _, name, _ in pkgutil.walk_packages(package.__path__, package.__name__ + '.'):
if exclude_submodules and (name in exclude_submodules or any(name.startswith(x) for x in exclude_submodules_dot)):
continue
yield importlib.import_module(name) | Generator which imports all submodules of a module, recursively, including subpackages
:param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided
:type package: str
:param parent_package: parent package name (e.g 'chisel')
:type package: str
:rtype: iterator of modules | Below is the the instruction that describes the task:
### Input:
Generator which imports all submodules of a module, recursively, including subpackages
:param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided
:type package: str
:param parent_package: parent package name (e.g 'chisel')
:type package: str
:rtype: iterator of modules
### Response:
def import_submodules(package, parent_package=None, exclude_submodules=None):
"""
Generator which imports all submodules of a module, recursively, including subpackages
:param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided
:type package: str
:param parent_package: parent package name (e.g 'chisel')
:type package: str
:rtype: iterator of modules
"""
exclude_submodules_dot = [x + '.' for x in exclude_submodules] if exclude_submodules else exclude_submodules
package = importlib.import_module(package, parent_package)
for _, name, _ in pkgutil.walk_packages(package.__path__, package.__name__ + '.'):
if exclude_submodules and (name in exclude_submodules or any(name.startswith(x) for x in exclude_submodules_dot)):
continue
yield importlib.import_module(name) |
def _create_datadict(cls, internal_name):
"""Creates an object depending on `internal_name`
Args:
internal_name (str): IDD name
Raises:
ValueError: if `internal_name` cannot be matched to a data dictionary object
"""
if internal_name == "LOCATION":
return Location()
if internal_name == "DESIGN CONDITIONS":
return DesignConditions()
if internal_name == "TYPICAL/EXTREME PERIODS":
return TypicalOrExtremePeriods()
if internal_name == "GROUND TEMPERATURES":
return GroundTemperatures()
if internal_name == "HOLIDAYS/DAYLIGHT SAVINGS":
return HolidaysOrDaylightSavings()
if internal_name == "COMMENTS 1":
return Comments1()
if internal_name == "COMMENTS 2":
return Comments2()
if internal_name == "DATA PERIODS":
return DataPeriods()
raise ValueError(
"No DataDictionary known for {}".format(internal_name)) | Creates an object depending on `internal_name`
Args:
internal_name (str): IDD name
Raises:
ValueError: if `internal_name` cannot be matched to a data dictionary object | Below is the the instruction that describes the task:
### Input:
Creates an object depending on `internal_name`
Args:
internal_name (str): IDD name
Raises:
ValueError: if `internal_name` cannot be matched to a data dictionary object
### Response:
def _create_datadict(cls, internal_name):
"""Creates an object depending on `internal_name`
Args:
internal_name (str): IDD name
Raises:
ValueError: if `internal_name` cannot be matched to a data dictionary object
"""
if internal_name == "LOCATION":
return Location()
if internal_name == "DESIGN CONDITIONS":
return DesignConditions()
if internal_name == "TYPICAL/EXTREME PERIODS":
return TypicalOrExtremePeriods()
if internal_name == "GROUND TEMPERATURES":
return GroundTemperatures()
if internal_name == "HOLIDAYS/DAYLIGHT SAVINGS":
return HolidaysOrDaylightSavings()
if internal_name == "COMMENTS 1":
return Comments1()
if internal_name == "COMMENTS 2":
return Comments2()
if internal_name == "DATA PERIODS":
return DataPeriods()
raise ValueError(
"No DataDictionary known for {}".format(internal_name)) |
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs | Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1). | Below is the the instruction that describes the task:
### Input:
Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
### Response:
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs |
def get_term_freq(self, go_id):
'''
Returns the frequency at which a particular GO term has
been observed in the annotations.
'''
num_ns = float(self.get_total_count(self.go2obj[go_id].namespace))
return float(self.get_count(go_id))/num_ns if num_ns != 0 else 0 | Returns the frequency at which a particular GO term has
been observed in the annotations. | Below is the the instruction that describes the task:
### Input:
Returns the frequency at which a particular GO term has
been observed in the annotations.
### Response:
def get_term_freq(self, go_id):
'''
Returns the frequency at which a particular GO term has
been observed in the annotations.
'''
num_ns = float(self.get_total_count(self.go2obj[go_id].namespace))
return float(self.get_count(go_id))/num_ns if num_ns != 0 else 0 |
def _dict_to_bson(doc, check_keys, opts, top_level=True):
"""Encode a document to BSON."""
if _raw_document_class(doc):
return doc.raw
try:
elements = []
if top_level and "_id" in doc:
elements.append(_name_value_to_bson(b"_id\x00", doc["_id"],
check_keys, opts))
for (key, value) in iteritems(doc):
if not top_level or key != "_id":
elements.append(_element_to_bson(key, value,
check_keys, opts))
except AttributeError:
raise TypeError("encoder expected a mapping type but got: %r" % (doc,))
encoded = b"".join(elements)
return _PACK_INT(len(encoded) + 5) + encoded + b"\x00" | Encode a document to BSON. | Below is the the instruction that describes the task:
### Input:
Encode a document to BSON.
### Response:
def _dict_to_bson(doc, check_keys, opts, top_level=True):
"""Encode a document to BSON."""
if _raw_document_class(doc):
return doc.raw
try:
elements = []
if top_level and "_id" in doc:
elements.append(_name_value_to_bson(b"_id\x00", doc["_id"],
check_keys, opts))
for (key, value) in iteritems(doc):
if not top_level or key != "_id":
elements.append(_element_to_bson(key, value,
check_keys, opts))
except AttributeError:
raise TypeError("encoder expected a mapping type but got: %r" % (doc,))
encoded = b"".join(elements)
return _PACK_INT(len(encoded) + 5) + encoded + b"\x00" |
def from_file(cls, f):
"""Load the history of a ``NeuralNet`` from a json file.
Parameters
----------
f : file-like object or str
"""
with open_file_like(f, 'r') as fp:
return cls(json.load(fp)) | Load the history of a ``NeuralNet`` from a json file.
Parameters
----------
f : file-like object or str | Below is the the instruction that describes the task:
### Input:
Load the history of a ``NeuralNet`` from a json file.
Parameters
----------
f : file-like object or str
### Response:
def from_file(cls, f):
"""Load the history of a ``NeuralNet`` from a json file.
Parameters
----------
f : file-like object or str
"""
with open_file_like(f, 'r') as fp:
return cls(json.load(fp)) |
def connect_generators(self, debug=False):
""" Connects generators (graph nodes) to grid (graph) for every MV and LV Grid District
Args
----
debug: bool, defaults to False
If True, information is printed during process.
"""
for mv_grid_district in self.mv_grid_districts():
mv_grid_district.mv_grid.connect_generators(debug=debug)
# get predefined random seed and initialize random generator
seed = int(cfg_ding0.get('random', 'seed'))
random.seed(a=seed)
for load_area in mv_grid_district.lv_load_areas():
if not load_area.is_aggregated:
for lv_grid_district in load_area.lv_grid_districts():
lv_grid_district.lv_grid.connect_generators(debug=debug)
if debug:
lv_grid_district.lv_grid.graph_draw(mode='LV')
else:
logger.info(
'{} is of type aggregated. LV generators are not connected to LV grids.'.format(repr(load_area)))
logger.info('=====> Generators connected') | Connects generators (graph nodes) to grid (graph) for every MV and LV Grid District
Args
----
debug: bool, defaults to False
If True, information is printed during process. | Below is the the instruction that describes the task:
### Input:
Connects generators (graph nodes) to grid (graph) for every MV and LV Grid District
Args
----
debug: bool, defaults to False
If True, information is printed during process.
### Response:
def connect_generators(self, debug=False):
""" Connects generators (graph nodes) to grid (graph) for every MV and LV Grid District
Args
----
debug: bool, defaults to False
If True, information is printed during process.
"""
for mv_grid_district in self.mv_grid_districts():
mv_grid_district.mv_grid.connect_generators(debug=debug)
# get predefined random seed and initialize random generator
seed = int(cfg_ding0.get('random', 'seed'))
random.seed(a=seed)
for load_area in mv_grid_district.lv_load_areas():
if not load_area.is_aggregated:
for lv_grid_district in load_area.lv_grid_districts():
lv_grid_district.lv_grid.connect_generators(debug=debug)
if debug:
lv_grid_district.lv_grid.graph_draw(mode='LV')
else:
logger.info(
'{} is of type aggregated. LV generators are not connected to LV grids.'.format(repr(load_area)))
logger.info('=====> Generators connected') |
def get_parts(self, parts=None, reference_level=0):
"""Recursively returns a depth-first list of all known magic parts"""
if parts is None:
parts = list()
new_reference_level = reference_level
else:
self._level_in_section = self._level + reference_level
new_reference_level = self._level_in_section
parts.append(self.my_osid_object)
if self._child_parts is None:
if self.has_magic_children():
self.generate_children()
else:
return parts
for part in self._child_parts:
part.get_parts(parts, new_reference_level)
# Don't need to append here, because parts is passed by reference
# so appending is redundant
# child_parts = part.get_parts(parts, new_reference_level)
# known_part_ids = [str(part.ident) for part in parts]
#
# for child_part in child_parts:
# if str(child_part.ident) not in known_part_ids:
# parts.append(child_part)
# known_part_ids.append(str(child_part.ident))
return parts | Recursively returns a depth-first list of all known magic parts | Below is the the instruction that describes the task:
### Input:
Recursively returns a depth-first list of all known magic parts
### Response:
def get_parts(self, parts=None, reference_level=0):
"""Recursively returns a depth-first list of all known magic parts"""
if parts is None:
parts = list()
new_reference_level = reference_level
else:
self._level_in_section = self._level + reference_level
new_reference_level = self._level_in_section
parts.append(self.my_osid_object)
if self._child_parts is None:
if self.has_magic_children():
self.generate_children()
else:
return parts
for part in self._child_parts:
part.get_parts(parts, new_reference_level)
# Don't need to append here, because parts is passed by reference
# so appending is redundant
# child_parts = part.get_parts(parts, new_reference_level)
# known_part_ids = [str(part.ident) for part in parts]
#
# for child_part in child_parts:
# if str(child_part.ident) not in known_part_ids:
# parts.append(child_part)
# known_part_ids.append(str(child_part.ident))
return parts |
def add_component_info(self, compinfo):
"""Add sub-component specific information to a particular data selection
Parameters
----------
compinfo : `ModelComponentInfo` object
Sub-component being added
"""
if self.components is None:
self.components = {}
self.components[compinfo.comp_key] = compinfo | Add sub-component specific information to a particular data selection
Parameters
----------
compinfo : `ModelComponentInfo` object
Sub-component being added | Below is the the instruction that describes the task:
### Input:
Add sub-component specific information to a particular data selection
Parameters
----------
compinfo : `ModelComponentInfo` object
Sub-component being added
### Response:
def add_component_info(self, compinfo):
"""Add sub-component specific information to a particular data selection
Parameters
----------
compinfo : `ModelComponentInfo` object
Sub-component being added
"""
if self.components is None:
self.components = {}
self.components[compinfo.comp_key] = compinfo |
def get_el(el):
"""
Get value of given `el` tag element.
Automatically choose proper method to set the `value` based on the type
of the `el`.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
Returns:
str: Value of the object.
"""
tag_name = el.elt.tagName.lower()
if tag_name in {"input", "textarea", "select"}:
return el.value
else:
raise ValueError(
"Getter for %s (%s) not implemented!" % (tag_name, el.id)
) | Get value of given `el` tag element.
Automatically choose proper method to set the `value` based on the type
of the `el`.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
Returns:
str: Value of the object. | Below is the the instruction that describes the task:
### Input:
Get value of given `el` tag element.
Automatically choose proper method to set the `value` based on the type
of the `el`.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
Returns:
str: Value of the object.
### Response:
def get_el(el):
"""
Get value of given `el` tag element.
Automatically choose proper method to set the `value` based on the type
of the `el`.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
Returns:
str: Value of the object.
"""
tag_name = el.elt.tagName.lower()
if tag_name in {"input", "textarea", "select"}:
return el.value
else:
raise ValueError(
"Getter for %s (%s) not implemented!" % (tag_name, el.id)
) |
def search_url(self, searchterm):
"""Search for URLs
:type searchterm: str
:rtype: list
"""
return self.__search(type_attribute=self.__mispurltypes(), value=searchterm) | Search for URLs
:type searchterm: str
:rtype: list | Below is the the instruction that describes the task:
### Input:
Search for URLs
:type searchterm: str
:rtype: list
### Response:
def search_url(self, searchterm):
"""Search for URLs
:type searchterm: str
:rtype: list
"""
return self.__search(type_attribute=self.__mispurltypes(), value=searchterm) |
def _renew_by(name, window=None):
'''
Date before a certificate should be renewed
:param name: Common Name of the certificate (DNS name of certificate)
:param window: days before expiry date to renew
:return datetime object of first renewal date
'''
expiry = _expires(name)
if window is not None:
expiry = expiry - datetime.timedelta(days=window)
return expiry | Date before a certificate should be renewed
:param name: Common Name of the certificate (DNS name of certificate)
:param window: days before expiry date to renew
:return datetime object of first renewal date | Below is the the instruction that describes the task:
### Input:
Date before a certificate should be renewed
:param name: Common Name of the certificate (DNS name of certificate)
:param window: days before expiry date to renew
:return datetime object of first renewal date
### Response:
def _renew_by(name, window=None):
'''
Date before a certificate should be renewed
:param name: Common Name of the certificate (DNS name of certificate)
:param window: days before expiry date to renew
:return datetime object of first renewal date
'''
expiry = _expires(name)
if window is not None:
expiry = expiry - datetime.timedelta(days=window)
return expiry |
def enabled(name):
'''
Enable the RDP service and make sure access to the RDP
port is allowed in the firewall configuration
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
stat = __salt__['rdp.status']()
if not stat:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'RDP will be enabled'
return ret
ret['result'] = __salt__['rdp.enable']()
ret['changes'] = {'RDP was enabled': True}
return ret
ret['comment'] = 'RDP is enabled'
return ret | Enable the RDP service and make sure access to the RDP
port is allowed in the firewall configuration | Below is the the instruction that describes the task:
### Input:
Enable the RDP service and make sure access to the RDP
port is allowed in the firewall configuration
### Response:
def enabled(name):
'''
Enable the RDP service and make sure access to the RDP
port is allowed in the firewall configuration
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
stat = __salt__['rdp.status']()
if not stat:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'RDP will be enabled'
return ret
ret['result'] = __salt__['rdp.enable']()
ret['changes'] = {'RDP was enabled': True}
return ret
ret['comment'] = 'RDP is enabled'
return ret |
def nz(value, none_value, strict=True):
''' This function is named after an old VBA function. It returns a default
value if the passed in value is None. If strict is False it will
treat an empty string as None as well.
example:
x = None
nz(x,"hello")
--> "hello"
nz(x,"")
--> ""
y = ""
nz(y,"hello")
--> ""
nz(y,"hello", False)
--> "hello" '''
if not DEBUG:
debug = False
else:
debug = False
if debug: print("START nz frameworkutilities.py ----------------------\n")
if value is None and strict:
return_val = none_value
elif strict and value is not None:
return_val = value
elif not strict and not is_not_null(value):
return_val = none_value
else:
return_val = value
if debug: print("value: %s | none_value: %s | return_val: %s" %
(value, none_value, return_val))
if debug: print("END nz frameworkutilities.py ----------------------\n")
return return_val | This function is named after an old VBA function. It returns a default
value if the passed in value is None. If strict is False it will
treat an empty string as None as well.
example:
x = None
nz(x,"hello")
--> "hello"
nz(x,"")
--> ""
y = ""
nz(y,"hello")
--> ""
nz(y,"hello", False)
--> "hello" | Below is the the instruction that describes the task:
### Input:
This function is named after an old VBA function. It returns a default
value if the passed in value is None. If strict is False it will
treat an empty string as None as well.
example:
x = None
nz(x,"hello")
--> "hello"
nz(x,"")
--> ""
y = ""
nz(y,"hello")
--> ""
nz(y,"hello", False)
--> "hello"
### Response:
def nz(value, none_value, strict=True):
''' This function is named after an old VBA function. It returns a default
value if the passed in value is None. If strict is False it will
treat an empty string as None as well.
example:
x = None
nz(x,"hello")
--> "hello"
nz(x,"")
--> ""
y = ""
nz(y,"hello")
--> ""
nz(y,"hello", False)
--> "hello" '''
if not DEBUG:
debug = False
else:
debug = False
if debug: print("START nz frameworkutilities.py ----------------------\n")
if value is None and strict:
return_val = none_value
elif strict and value is not None:
return_val = value
elif not strict and not is_not_null(value):
return_val = none_value
else:
return_val = value
if debug: print("value: %s | none_value: %s | return_val: %s" %
(value, none_value, return_val))
if debug: print("END nz frameworkutilities.py ----------------------\n")
return return_val |
async def login(self, email: str, password: str) -> bool:
"""Login to the profile."""
login_resp = await self._request(
'post',
API_URL_USER,
json={
'version': '1.0',
'method': 'Signin',
'param': {
'Email': email,
'Password': password,
'CaptchaCode': ''
},
'sourcetype': 0
})
_LOGGER.debug('Login response: %s', login_resp)
if login_resp.get('Code') != 0:
return False
self.account_id = login_resp['Json']['gid']
return True | Login to the profile. | Below is the the instruction that describes the task:
### Input:
Login to the profile.
### Response:
async def login(self, email: str, password: str) -> bool:
"""Login to the profile."""
login_resp = await self._request(
'post',
API_URL_USER,
json={
'version': '1.0',
'method': 'Signin',
'param': {
'Email': email,
'Password': password,
'CaptchaCode': ''
},
'sourcetype': 0
})
_LOGGER.debug('Login response: %s', login_resp)
if login_resp.get('Code') != 0:
return False
self.account_id = login_resp['Json']['gid']
return True |
def _apply_function_in_context(cls, f, args, context):
""" Apply an MPFR function 'f' to the given arguments 'args', rounding to
the given context. Returns a new Mpfr object with precision taken from
the current context.
"""
rounding = context.rounding
bf = mpfr.Mpfr_t.__new__(cls)
mpfr.mpfr_init2(bf, context.precision)
args = (bf,) + args + (rounding,)
ternary = f(*args)
with _temporary_exponent_bounds(context.emin, context.emax):
ternary = mpfr.mpfr_check_range(bf, ternary, rounding)
if context.subnormalize:
# mpfr_subnormalize doesn't set underflow and
# subnormal flags, so we do that ourselves. We choose
# to set the underflow flag for *all* cases where the
# 'after rounding' result is smaller than the smallest
# normal number, even if that result is exact.
# if bf is zero but ternary is nonzero, the underflow
# flag will already have been set by mpfr_check_range;
underflow = (
mpfr.mpfr_number_p(bf) and
not mpfr.mpfr_zero_p(bf) and
mpfr.mpfr_get_exp(bf) < context.precision - 1 + context.emin)
if underflow:
mpfr.mpfr_set_underflow()
ternary = mpfr.mpfr_subnormalize(bf, ternary, rounding)
if ternary:
mpfr.mpfr_set_inexflag()
return bf | Apply an MPFR function 'f' to the given arguments 'args', rounding to
the given context. Returns a new Mpfr object with precision taken from
the current context. | Below is the the instruction that describes the task:
### Input:
Apply an MPFR function 'f' to the given arguments 'args', rounding to
the given context. Returns a new Mpfr object with precision taken from
the current context.
### Response:
def _apply_function_in_context(cls, f, args, context):
""" Apply an MPFR function 'f' to the given arguments 'args', rounding to
the given context. Returns a new Mpfr object with precision taken from
the current context.
"""
rounding = context.rounding
bf = mpfr.Mpfr_t.__new__(cls)
mpfr.mpfr_init2(bf, context.precision)
args = (bf,) + args + (rounding,)
ternary = f(*args)
with _temporary_exponent_bounds(context.emin, context.emax):
ternary = mpfr.mpfr_check_range(bf, ternary, rounding)
if context.subnormalize:
# mpfr_subnormalize doesn't set underflow and
# subnormal flags, so we do that ourselves. We choose
# to set the underflow flag for *all* cases where the
# 'after rounding' result is smaller than the smallest
# normal number, even if that result is exact.
# if bf is zero but ternary is nonzero, the underflow
# flag will already have been set by mpfr_check_range;
underflow = (
mpfr.mpfr_number_p(bf) and
not mpfr.mpfr_zero_p(bf) and
mpfr.mpfr_get_exp(bf) < context.precision - 1 + context.emin)
if underflow:
mpfr.mpfr_set_underflow()
ternary = mpfr.mpfr_subnormalize(bf, ternary, rounding)
if ternary:
mpfr.mpfr_set_inexflag()
return bf |
def create_convert_sbml_id_function(
compartment_prefix='C_', reaction_prefix='R_',
compound_prefix='M_', decode_id=entry_id_from_cobra_encoding):
"""Create function for converting SBML IDs.
The returned function will strip prefixes, decode the ID using the provided
function. These prefixes are common on IDs in SBML models because the IDs
live in a global namespace.
"""
def convert_sbml_id(entry):
if isinstance(entry, BaseCompartmentEntry):
prefix = compartment_prefix
elif isinstance(entry, BaseReactionEntry):
prefix = reaction_prefix
elif isinstance(entry, BaseCompoundEntry):
prefix = compound_prefix
new_id = entry.id
if decode_id is not None:
new_id = decode_id(new_id)
if prefix is not None and new_id.startswith(prefix):
new_id = new_id[len(prefix):]
return new_id
return convert_sbml_id | Create function for converting SBML IDs.
The returned function will strip prefixes, decode the ID using the provided
function. These prefixes are common on IDs in SBML models because the IDs
live in a global namespace. | Below is the the instruction that describes the task:
### Input:
Create function for converting SBML IDs.
The returned function will strip prefixes, decode the ID using the provided
function. These prefixes are common on IDs in SBML models because the IDs
live in a global namespace.
### Response:
def create_convert_sbml_id_function(
compartment_prefix='C_', reaction_prefix='R_',
compound_prefix='M_', decode_id=entry_id_from_cobra_encoding):
"""Create function for converting SBML IDs.
The returned function will strip prefixes, decode the ID using the provided
function. These prefixes are common on IDs in SBML models because the IDs
live in a global namespace.
"""
def convert_sbml_id(entry):
if isinstance(entry, BaseCompartmentEntry):
prefix = compartment_prefix
elif isinstance(entry, BaseReactionEntry):
prefix = reaction_prefix
elif isinstance(entry, BaseCompoundEntry):
prefix = compound_prefix
new_id = entry.id
if decode_id is not None:
new_id = decode_id(new_id)
if prefix is not None and new_id.startswith(prefix):
new_id = new_id[len(prefix):]
return new_id
return convert_sbml_id |
def thr_img(img, thr=2., mode='+'):
""" Use the given magic function name `func` to threshold with value `thr`
the data of `img` and return a new nibabel.Nifti1Image.
Parameters
----------
img: img-like
thr: float or int
The threshold value.
mode: str
Choices: '+' for positive threshold,
'+-' for positive and negative threshold and
'-' for negative threshold.
Returns
-------
thr_img: nibabel.Nifti1Image
Thresholded image
"""
vol = read_img(img).get_data()
if mode == '+':
mask = vol > thr
elif mode == '+-' or mode == '-+':
mask = np.abs(vol) > thr
elif mode == '-':
mask = vol < -thr
else:
raise ValueError("Expected `mode` to be one of ('+', '+-', '-+', '-'), "
"got {}.".format(mode))
return vol * mask | Use the given magic function name `func` to threshold with value `thr`
the data of `img` and return a new nibabel.Nifti1Image.
Parameters
----------
img: img-like
thr: float or int
The threshold value.
mode: str
Choices: '+' for positive threshold,
'+-' for positive and negative threshold and
'-' for negative threshold.
Returns
-------
thr_img: nibabel.Nifti1Image
Thresholded image | Below is the the instruction that describes the task:
### Input:
Use the given magic function name `func` to threshold with value `thr`
the data of `img` and return a new nibabel.Nifti1Image.
Parameters
----------
img: img-like
thr: float or int
The threshold value.
mode: str
Choices: '+' for positive threshold,
'+-' for positive and negative threshold and
'-' for negative threshold.
Returns
-------
thr_img: nibabel.Nifti1Image
Thresholded image
### Response:
def thr_img(img, thr=2., mode='+'):
""" Use the given magic function name `func` to threshold with value `thr`
the data of `img` and return a new nibabel.Nifti1Image.
Parameters
----------
img: img-like
thr: float or int
The threshold value.
mode: str
Choices: '+' for positive threshold,
'+-' for positive and negative threshold and
'-' for negative threshold.
Returns
-------
thr_img: nibabel.Nifti1Image
Thresholded image
"""
vol = read_img(img).get_data()
if mode == '+':
mask = vol > thr
elif mode == '+-' or mode == '-+':
mask = np.abs(vol) > thr
elif mode == '-':
mask = vol < -thr
else:
raise ValueError("Expected `mode` to be one of ('+', '+-', '-+', '-'), "
"got {}.".format(mode))
return vol * mask |
def Alt_Fn(self, n, dl = 0):
"""Alt + Fn1~12 组合键
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.alt_key)
self.keyboard.tap_key(self.keyboard.function_keys[n])
self.keyboard.release_key(self.keyboard.alt_key) | Alt + Fn1~12 组合键 | Below is the the instruction that describes the task:
### Input:
Alt + Fn1~12 组合键
### Response:
def Alt_Fn(self, n, dl = 0):
"""Alt + Fn1~12 组合键
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.alt_key)
self.keyboard.tap_key(self.keyboard.function_keys[n])
self.keyboard.release_key(self.keyboard.alt_key) |
def missingkeys_nonstandard(block, commdct, dtls, objectlist, afield='afiled %s'):
"""This is an object list where thre is no first field name
to give a hint of what the first field name should be"""
afield = 'afield %s'
for key_txt in objectlist:
key_i = dtls.index(key_txt.upper())
comm = commdct[key_i]
if block:
blk = block[key_i]
for i, cmt in enumerate(comm):
if cmt == {}:
first_i = i
break
for i, cmt in enumerate(comm):
if i >= first_i:
if block:
comm[i]['field'] = ['%s' % (blk[i])]
else:
comm[i]['field'] = [afield % (i - first_i + 1,),] | This is an object list where thre is no first field name
to give a hint of what the first field name should be | Below is the the instruction that describes the task:
### Input:
This is an object list where thre is no first field name
to give a hint of what the first field name should be
### Response:
def missingkeys_nonstandard(block, commdct, dtls, objectlist, afield='afiled %s'):
"""This is an object list where thre is no first field name
to give a hint of what the first field name should be"""
afield = 'afield %s'
for key_txt in objectlist:
key_i = dtls.index(key_txt.upper())
comm = commdct[key_i]
if block:
blk = block[key_i]
for i, cmt in enumerate(comm):
if cmt == {}:
first_i = i
break
for i, cmt in enumerate(comm):
if i >= first_i:
if block:
comm[i]['field'] = ['%s' % (blk[i])]
else:
comm[i]['field'] = [afield % (i - first_i + 1,),] |
def unread_count(current):
"""
Number of unread messages for current user
.. code-block:: python
# request:
{
'view':'_zops_unread_count',
}
# response:
{
'status': 'OK',
'code': 200,
'notifications': int,
'messages': int,
}
"""
unread_ntf = 0
unread_msg = 0
for sbs in current.user.subscriptions.objects.filter(is_visible=True):
try:
if sbs.channel.key == current.user.prv_exchange:
unread_ntf += sbs.unread_count()
else:
unread_msg += sbs.unread_count()
except ObjectDoesNotExist:
# FIXME: This should not happen,
log.exception("MULTIPLE PRV EXCHANGES!!!!")
sbs.delete()
current.output = {
'status': 'OK',
'code': 200,
'notifications': unread_ntf,
'messages': unread_msg
} | Number of unread messages for current user
.. code-block:: python
# request:
{
'view':'_zops_unread_count',
}
# response:
{
'status': 'OK',
'code': 200,
'notifications': int,
'messages': int,
} | Below is the the instruction that describes the task:
### Input:
Number of unread messages for current user
.. code-block:: python
# request:
{
'view':'_zops_unread_count',
}
# response:
{
'status': 'OK',
'code': 200,
'notifications': int,
'messages': int,
}
### Response:
def unread_count(current):
"""
Number of unread messages for current user
.. code-block:: python
# request:
{
'view':'_zops_unread_count',
}
# response:
{
'status': 'OK',
'code': 200,
'notifications': int,
'messages': int,
}
"""
unread_ntf = 0
unread_msg = 0
for sbs in current.user.subscriptions.objects.filter(is_visible=True):
try:
if sbs.channel.key == current.user.prv_exchange:
unread_ntf += sbs.unread_count()
else:
unread_msg += sbs.unread_count()
except ObjectDoesNotExist:
# FIXME: This should not happen,
log.exception("MULTIPLE PRV EXCHANGES!!!!")
sbs.delete()
current.output = {
'status': 'OK',
'code': 200,
'notifications': unread_ntf,
'messages': unread_msg
} |
def urlretrieve(self, url, filename=None, method='GET', body=None, dir=None, **kwargs):
"""
Save result of a request to a file, similarly to
:func:`urllib.urlretrieve`.
If an error is encountered may raise any of the scrapelib
`exceptions`_.
A filename may be provided or :meth:`urlretrieve` will safely create a
temporary file. If a directory is provided, a file will be given a random
name within the specified directory. Either way, it is the responsibility
of the caller to ensure that the temporary file is deleted when it is no
longer needed.
:param url: URL for request
:param filename: optional name for file
:param method: any valid HTTP method, but generally GET or POST
:param body: optional body for request, to turn parameters into
an appropriate string use :func:`urllib.urlencode()`
:param dir: optional directory to place file in
:returns filename, response: tuple with filename for saved
response (will be same as given filename if one was given,
otherwise will be a temp file in the OS temp directory) and
a :class:`Response` object that can be used to inspect the
response headers.
"""
result = self.request(method, url, data=body, **kwargs)
result.code = result.status_code # backwards compat
if not filename:
fd, filename = tempfile.mkstemp(dir=dir)
f = os.fdopen(fd, 'wb')
else:
f = open(filename, 'wb')
f.write(result.content)
f.close()
return filename, result | Save result of a request to a file, similarly to
:func:`urllib.urlretrieve`.
If an error is encountered may raise any of the scrapelib
`exceptions`_.
A filename may be provided or :meth:`urlretrieve` will safely create a
temporary file. If a directory is provided, a file will be given a random
name within the specified directory. Either way, it is the responsibility
of the caller to ensure that the temporary file is deleted when it is no
longer needed.
:param url: URL for request
:param filename: optional name for file
:param method: any valid HTTP method, but generally GET or POST
:param body: optional body for request, to turn parameters into
an appropriate string use :func:`urllib.urlencode()`
:param dir: optional directory to place file in
:returns filename, response: tuple with filename for saved
response (will be same as given filename if one was given,
otherwise will be a temp file in the OS temp directory) and
a :class:`Response` object that can be used to inspect the
response headers. | Below is the the instruction that describes the task:
### Input:
Save result of a request to a file, similarly to
:func:`urllib.urlretrieve`.
If an error is encountered may raise any of the scrapelib
`exceptions`_.
A filename may be provided or :meth:`urlretrieve` will safely create a
temporary file. If a directory is provided, a file will be given a random
name within the specified directory. Either way, it is the responsibility
of the caller to ensure that the temporary file is deleted when it is no
longer needed.
:param url: URL for request
:param filename: optional name for file
:param method: any valid HTTP method, but generally GET or POST
:param body: optional body for request, to turn parameters into
an appropriate string use :func:`urllib.urlencode()`
:param dir: optional directory to place file in
:returns filename, response: tuple with filename for saved
response (will be same as given filename if one was given,
otherwise will be a temp file in the OS temp directory) and
a :class:`Response` object that can be used to inspect the
response headers.
### Response:
def urlretrieve(self, url, filename=None, method='GET', body=None, dir=None, **kwargs):
"""
Save result of a request to a file, similarly to
:func:`urllib.urlretrieve`.
If an error is encountered may raise any of the scrapelib
`exceptions`_.
A filename may be provided or :meth:`urlretrieve` will safely create a
temporary file. If a directory is provided, a file will be given a random
name within the specified directory. Either way, it is the responsibility
of the caller to ensure that the temporary file is deleted when it is no
longer needed.
:param url: URL for request
:param filename: optional name for file
:param method: any valid HTTP method, but generally GET or POST
:param body: optional body for request, to turn parameters into
an appropriate string use :func:`urllib.urlencode()`
:param dir: optional directory to place file in
:returns filename, response: tuple with filename for saved
response (will be same as given filename if one was given,
otherwise will be a temp file in the OS temp directory) and
a :class:`Response` object that can be used to inspect the
response headers.
"""
result = self.request(method, url, data=body, **kwargs)
result.code = result.status_code # backwards compat
if not filename:
fd, filename = tempfile.mkstemp(dir=dir)
f = os.fdopen(fd, 'wb')
else:
f = open(filename, 'wb')
f.write(result.content)
f.close()
return filename, result |
def generalize_sql(sql):
"""
Removes most variables from an SQL query and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
:type sql str|None
:rtype: str
"""
if sql is None:
return None
# multiple spaces
sql = re.sub(r'\s{2,}', ' ', sql)
# MW comments
# e.g. /* CategoryDataService::getMostVisited N.N.N.N */
sql = remove_comments_from_sql(sql)
# handle LIKE statements
sql = normalize_likes(sql)
sql = re.sub(r"\\\\", '', sql)
sql = re.sub(r"\\'", '', sql)
sql = re.sub(r'\\"', '', sql)
sql = re.sub(r"'[^\']*'", 'X', sql)
sql = re.sub(r'"[^\"]*"', 'X', sql)
# All newlines, tabs, etc replaced by single space
sql = re.sub(r'\s+', ' ', sql)
# All numbers => N
sql = re.sub(r'-?[0-9]+', 'N', sql)
# WHERE foo IN ('880987','882618','708228','522330')
sql = re.sub(r' (IN|VALUES)\s*\([^,]+,[^)]+\)', ' \\1 (XYZ)', sql, flags=re.IGNORECASE)
return sql.strip() | Removes most variables from an SQL query and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
:type sql str|None
:rtype: str | Below is the the instruction that describes the task:
### Input:
Removes most variables from an SQL query and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
:type sql str|None
:rtype: str
### Response:
def generalize_sql(sql):
"""
Removes most variables from an SQL query and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
:type sql str|None
:rtype: str
"""
if sql is None:
return None
# multiple spaces
sql = re.sub(r'\s{2,}', ' ', sql)
# MW comments
# e.g. /* CategoryDataService::getMostVisited N.N.N.N */
sql = remove_comments_from_sql(sql)
# handle LIKE statements
sql = normalize_likes(sql)
sql = re.sub(r"\\\\", '', sql)
sql = re.sub(r"\\'", '', sql)
sql = re.sub(r'\\"', '', sql)
sql = re.sub(r"'[^\']*'", 'X', sql)
sql = re.sub(r'"[^\"]*"', 'X', sql)
# All newlines, tabs, etc replaced by single space
sql = re.sub(r'\s+', ' ', sql)
# All numbers => N
sql = re.sub(r'-?[0-9]+', 'N', sql)
# WHERE foo IN ('880987','882618','708228','522330')
sql = re.sub(r' (IN|VALUES)\s*\([^,]+,[^)]+\)', ' \\1 (XYZ)', sql, flags=re.IGNORECASE)
return sql.strip() |
def listify(*args):
"""
Convert args to a list, unless there's one arg and it's a
function, then acts a decorator.
"""
if (len(args) == 1) and callable(args[0]):
func = args[0]
@wraps(func)
def _inner(*args, **kwargs):
return list(func(*args, **kwargs))
return _inner
else:
return list(args) | Convert args to a list, unless there's one arg and it's a
function, then acts a decorator. | Below is the the instruction that describes the task:
### Input:
Convert args to a list, unless there's one arg and it's a
function, then acts a decorator.
### Response:
def listify(*args):
"""
Convert args to a list, unless there's one arg and it's a
function, then acts a decorator.
"""
if (len(args) == 1) and callable(args[0]):
func = args[0]
@wraps(func)
def _inner(*args, **kwargs):
return list(func(*args, **kwargs))
return _inner
else:
return list(args) |
def write_contents(self, table, reader):
"""Write the contents of `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source.
Returns None
"""
f = self.FileObjFaker(table, reader.read(table), self.process_row, self.verbose)
self.copy_from(f, '"%s"' % table.name, ['"%s"' % c['name'] for c in table.columns]) | Write the contents of `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source.
Returns None | Below is the the instruction that describes the task:
### Input:
Write the contents of `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source.
Returns None
### Response:
def write_contents(self, table, reader):
"""Write the contents of `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source.
Returns None
"""
f = self.FileObjFaker(table, reader.read(table), self.process_row, self.verbose)
self.copy_from(f, '"%s"' % table.name, ['"%s"' % c['name'] for c in table.columns]) |
def from_string(input_str) -> 'MissionTime':
# noinspection SpellCheckingInspection
"""
Creates a MissionTime instance from a string
Format: YYYYMMDDHHMMSS
Args:
input_str: string to parse
Returns: MissionTime instance
"""
match = RE_INPUT_STRING.match(input_str)
if not match:
raise ValueError(f'badly formatted date/time: {input_str}')
return MissionTime(
datetime.datetime(
int(match.group('year')),
int(match.group('month')),
int(match.group('day')),
int(match.group('hour')),
int(match.group('minute')),
int(match.group('second')),
)
) | Creates a MissionTime instance from a string
Format: YYYYMMDDHHMMSS
Args:
input_str: string to parse
Returns: MissionTime instance | Below is the the instruction that describes the task:
### Input:
Creates a MissionTime instance from a string
Format: YYYYMMDDHHMMSS
Args:
input_str: string to parse
Returns: MissionTime instance
### Response:
def from_string(input_str) -> 'MissionTime':
# noinspection SpellCheckingInspection
"""
Creates a MissionTime instance from a string
Format: YYYYMMDDHHMMSS
Args:
input_str: string to parse
Returns: MissionTime instance
"""
match = RE_INPUT_STRING.match(input_str)
if not match:
raise ValueError(f'badly formatted date/time: {input_str}')
return MissionTime(
datetime.datetime(
int(match.group('year')),
int(match.group('month')),
int(match.group('day')),
int(match.group('hour')),
int(match.group('minute')),
int(match.group('second')),
)
) |
def load_name(*names, load_order=DEFAULT_LOAD_ORDER, extension='yaml', missing=Missing.silent):
"""
Read a `.Configuration` instance by name, trying to read from files in
increasing significance. The default load order is `.system`, `.user`,
`.application`, `.environment`.
Multiple names are combined with multiple loaders using names as the 'inner
loop / selector', loading ``/etc/name1.yaml`` and ``/etc/name2.yaml``
before ``./name1.yaml`` and ``./name2.yaml``.
:param names: application or configuration set names, in increasing
significance
:param load_order: ordered list of name templates or `callable`s, in
increasing order of significance
:param extension: file extension to be used
:param missing: policy to be used when a configured key is missing, either
as a `.Missing` instance or a default value
:return: a `.Configuration` instances providing values loaded from *names*
in *load_order* ordering
"""
def generate_sources():
# argument order for product matters, for names "foo" and "bar":
# /etc/foo.yaml before /etc/bar.yaml, but both of them before ~/.foo.yaml and ~/.bar.yaml
for source, name in product(load_order, names):
if callable(source):
yield source(name, extension)
else:
# expand user to turn ~/.name.yaml into /home/user/.name.yaml
candidate = path.expanduser(source.format(name=name, extension=extension))
yield loadf(candidate, default=NotConfigured)
return Configuration(*generate_sources(), missing=missing) | Read a `.Configuration` instance by name, trying to read from files in
increasing significance. The default load order is `.system`, `.user`,
`.application`, `.environment`.
Multiple names are combined with multiple loaders using names as the 'inner
loop / selector', loading ``/etc/name1.yaml`` and ``/etc/name2.yaml``
before ``./name1.yaml`` and ``./name2.yaml``.
:param names: application or configuration set names, in increasing
significance
:param load_order: ordered list of name templates or `callable`s, in
increasing order of significance
:param extension: file extension to be used
:param missing: policy to be used when a configured key is missing, either
as a `.Missing` instance or a default value
:return: a `.Configuration` instances providing values loaded from *names*
in *load_order* ordering | Below is the the instruction that describes the task:
### Input:
Read a `.Configuration` instance by name, trying to read from files in
increasing significance. The default load order is `.system`, `.user`,
`.application`, `.environment`.
Multiple names are combined with multiple loaders using names as the 'inner
loop / selector', loading ``/etc/name1.yaml`` and ``/etc/name2.yaml``
before ``./name1.yaml`` and ``./name2.yaml``.
:param names: application or configuration set names, in increasing
significance
:param load_order: ordered list of name templates or `callable`s, in
increasing order of significance
:param extension: file extension to be used
:param missing: policy to be used when a configured key is missing, either
as a `.Missing` instance or a default value
:return: a `.Configuration` instances providing values loaded from *names*
in *load_order* ordering
### Response:
def load_name(*names, load_order=DEFAULT_LOAD_ORDER, extension='yaml', missing=Missing.silent):
"""
Read a `.Configuration` instance by name, trying to read from files in
increasing significance. The default load order is `.system`, `.user`,
`.application`, `.environment`.
Multiple names are combined with multiple loaders using names as the 'inner
loop / selector', loading ``/etc/name1.yaml`` and ``/etc/name2.yaml``
before ``./name1.yaml`` and ``./name2.yaml``.
:param names: application or configuration set names, in increasing
significance
:param load_order: ordered list of name templates or `callable`s, in
increasing order of significance
:param extension: file extension to be used
:param missing: policy to be used when a configured key is missing, either
as a `.Missing` instance or a default value
:return: a `.Configuration` instances providing values loaded from *names*
in *load_order* ordering
"""
def generate_sources():
# argument order for product matters, for names "foo" and "bar":
# /etc/foo.yaml before /etc/bar.yaml, but both of them before ~/.foo.yaml and ~/.bar.yaml
for source, name in product(load_order, names):
if callable(source):
yield source(name, extension)
else:
# expand user to turn ~/.name.yaml into /home/user/.name.yaml
candidate = path.expanduser(source.format(name=name, extension=extension))
yield loadf(candidate, default=NotConfigured)
return Configuration(*generate_sources(), missing=missing) |
def status_log(func, message, *args, **kwargs):
"""Emits header message, executes a callable, and echoes the return strings."""
click.echo(message)
log = func(*args, **kwargs)
if log:
out = []
for line in log.split('\n'):
if not line.startswith('#'):
out.append(line)
click.echo(black('\n'.join(out))) | Emits header message, executes a callable, and echoes the return strings. | Below is the the instruction that describes the task:
### Input:
Emits header message, executes a callable, and echoes the return strings.
### Response:
def status_log(func, message, *args, **kwargs):
"""Emits header message, executes a callable, and echoes the return strings."""
click.echo(message)
log = func(*args, **kwargs)
if log:
out = []
for line in log.split('\n'):
if not line.startswith('#'):
out.append(line)
click.echo(black('\n'.join(out))) |
def __get_real_pid(self):
"""
Attempts to determine the true process ID by querying the
/proc/<pid>/sched file. This works on systems with a proc filesystem.
Otherwise default to os default.
"""
pid = None
if os.path.exists("/proc/"):
sched_file = "/proc/%d/sched" % os.getpid()
if os.path.isfile(sched_file):
try:
file = open(sched_file)
line = file.readline()
g = re.search(r'\((\d+),', line)
if len(g.groups()) == 1:
pid = int(g.groups()[0])
except Exception:
logger.debug("parsing sched file failed", exc_info=True)
pass
if pid is None:
pid = os.getpid()
return pid | Attempts to determine the true process ID by querying the
/proc/<pid>/sched file. This works on systems with a proc filesystem.
Otherwise default to os default. | Below is the the instruction that describes the task:
### Input:
Attempts to determine the true process ID by querying the
/proc/<pid>/sched file. This works on systems with a proc filesystem.
Otherwise default to os default.
### Response:
def __get_real_pid(self):
"""
Attempts to determine the true process ID by querying the
/proc/<pid>/sched file. This works on systems with a proc filesystem.
Otherwise default to os default.
"""
pid = None
if os.path.exists("/proc/"):
sched_file = "/proc/%d/sched" % os.getpid()
if os.path.isfile(sched_file):
try:
file = open(sched_file)
line = file.readline()
g = re.search(r'\((\d+),', line)
if len(g.groups()) == 1:
pid = int(g.groups()[0])
except Exception:
logger.debug("parsing sched file failed", exc_info=True)
pass
if pid is None:
pid = os.getpid()
return pid |
def req(self, meth, url, http_data=''):
"""
sugar that wraps the 'requests' module with basic auth and some headers.
"""
self.logger.debug("Making request: %s %s\nBody:%s" % (meth, url, http_data))
req_method = getattr(requests, meth)
return (req_method(url,
auth=(self.__username, self.__password),
data=http_data,
headers=({'user-agent': self.user_agent(), 'Accept': 'application/json'}))) | sugar that wraps the 'requests' module with basic auth and some headers. | Below is the the instruction that describes the task:
### Input:
sugar that wraps the 'requests' module with basic auth and some headers.
### Response:
def req(self, meth, url, http_data=''):
"""
sugar that wraps the 'requests' module with basic auth and some headers.
"""
self.logger.debug("Making request: %s %s\nBody:%s" % (meth, url, http_data))
req_method = getattr(requests, meth)
return (req_method(url,
auth=(self.__username, self.__password),
data=http_data,
headers=({'user-agent': self.user_agent(), 'Accept': 'application/json'}))) |
def main():
"""
NAME
orientation_magic.py
DESCRIPTION
takes tab delimited field notebook information and converts to MagIC formatted tables
SYNTAX
orientation_magic.py [command line options]
OPTIONS
-f FILE: specify input file, default is: orient.txt
-Fsa FILE: specify output file, default is: er_samples.txt
-Fsi FILE: specify output site location file, default is: er_sites.txt
-app append/update these data in existing er_samples.txt, er_sites.txt files
-ocn OCON: specify orientation convention, default is #1 below
-dcn DCON [DEC]: specify declination convention, default is #1 below
if DCON = 2, you must supply the declination correction
-BCN don't correct bedding_dip_dir for magnetic declination -already corrected
-ncn NCON: specify naming convention: default is #1 below
-a: averages all bedding poles and uses average for all samples: default is NO
-gmt HRS: specify hours to subtract from local time to get GMT: default is 0
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
-DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format.
Orientation convention:
Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below.
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail [email protected] for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
OUTPUT
output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0)
- this will overwrite any existing files
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
else:
info = [['WD', False, '.'], ['ID', False, ''], ['f', False, 'orient.txt'],
['app', False, False], ['ocn', False, 1], ['dcn', False, 1],
['BCN', False, True], ['ncn', False, '1'], ['gmt', False, 0],
['mcd', False, ''], ['a', False, False], ['DM', False, 3]]
#output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, samp_con, hours_from_gmt, method_codes, average_bedding
# leave off -Fsa, -Fsi b/c defaults in command_line_extractor
dataframe = extractor.command_line_dataframe(info)
checked_args = extractor.extract_and_check_args(args, dataframe)
output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, samp_file, site_file, data_model = extractor.get_vars(['WD', 'ID', 'f', 'app', 'ocn', 'dcn', 'BCN', 'ncn', 'gmt', 'mcd', 'a', 'Fsa', 'Fsi', 'DM'], checked_args)
if input_dir_path == '.':
input_dir_path = output_dir_path
if not isinstance(dec_correction_con, int):
if len(dec_correction_con) > 1:
dec_correction = int(dec_correction_con.split()[1])
dec_correction_con = int(dec_correction_con.split()[0])
else:
dec_correction = 0
else:
dec_correction = 0
ipmag.orientation_magic(or_con, dec_correction_con, dec_correction, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, orient_file, samp_file, site_file, output_dir_path, input_dir_path, append, data_model) | NAME
orientation_magic.py
DESCRIPTION
takes tab delimited field notebook information and converts to MagIC formatted tables
SYNTAX
orientation_magic.py [command line options]
OPTIONS
-f FILE: specify input file, default is: orient.txt
-Fsa FILE: specify output file, default is: er_samples.txt
-Fsi FILE: specify output site location file, default is: er_sites.txt
-app append/update these data in existing er_samples.txt, er_sites.txt files
-ocn OCON: specify orientation convention, default is #1 below
-dcn DCON [DEC]: specify declination convention, default is #1 below
if DCON = 2, you must supply the declination correction
-BCN don't correct bedding_dip_dir for magnetic declination -already corrected
-ncn NCON: specify naming convention: default is #1 below
-a: averages all bedding poles and uses average for all samples: default is NO
-gmt HRS: specify hours to subtract from local time to get GMT: default is 0
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
-DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format.
Orientation convention:
Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below.
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail [email protected] for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
OUTPUT
output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0)
- this will overwrite any existing files | Below is the the instruction that describes the task:
### Input:
NAME
orientation_magic.py
DESCRIPTION
takes tab delimited field notebook information and converts to MagIC formatted tables
SYNTAX
orientation_magic.py [command line options]
OPTIONS
-f FILE: specify input file, default is: orient.txt
-Fsa FILE: specify output file, default is: er_samples.txt
-Fsi FILE: specify output site location file, default is: er_sites.txt
-app append/update these data in existing er_samples.txt, er_sites.txt files
-ocn OCON: specify orientation convention, default is #1 below
-dcn DCON [DEC]: specify declination convention, default is #1 below
if DCON = 2, you must supply the declination correction
-BCN don't correct bedding_dip_dir for magnetic declination -already corrected
-ncn NCON: specify naming convention: default is #1 below
-a: averages all bedding poles and uses average for all samples: default is NO
-gmt HRS: specify hours to subtract from local time to get GMT: default is 0
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
-DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format.
Orientation convention:
Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below.
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail [email protected] for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
OUTPUT
output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0)
- this will overwrite any existing files
### Response:
def main():
"""
NAME
orientation_magic.py
DESCRIPTION
takes tab delimited field notebook information and converts to MagIC formatted tables
SYNTAX
orientation_magic.py [command line options]
OPTIONS
-f FILE: specify input file, default is: orient.txt
-Fsa FILE: specify output file, default is: er_samples.txt
-Fsi FILE: specify output site location file, default is: er_sites.txt
-app append/update these data in existing er_samples.txt, er_sites.txt files
-ocn OCON: specify orientation convention, default is #1 below
-dcn DCON [DEC]: specify declination convention, default is #1 below
if DCON = 2, you must supply the declination correction
-BCN don't correct bedding_dip_dir for magnetic declination -already corrected
-ncn NCON: specify naming convention: default is #1 below
-a: averages all bedding poles and uses average for all samples: default is NO
-gmt HRS: specify hours to subtract from local time to get GMT: default is 0
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
-DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format.
Orientation convention:
Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below.
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail [email protected] for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
OUTPUT
output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0)
- this will overwrite any existing files
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
else:
info = [['WD', False, '.'], ['ID', False, ''], ['f', False, 'orient.txt'],
['app', False, False], ['ocn', False, 1], ['dcn', False, 1],
['BCN', False, True], ['ncn', False, '1'], ['gmt', False, 0],
['mcd', False, ''], ['a', False, False], ['DM', False, 3]]
#output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, samp_con, hours_from_gmt, method_codes, average_bedding
# leave off -Fsa, -Fsi b/c defaults in command_line_extractor
dataframe = extractor.command_line_dataframe(info)
checked_args = extractor.extract_and_check_args(args, dataframe)
output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, samp_file, site_file, data_model = extractor.get_vars(['WD', 'ID', 'f', 'app', 'ocn', 'dcn', 'BCN', 'ncn', 'gmt', 'mcd', 'a', 'Fsa', 'Fsi', 'DM'], checked_args)
if input_dir_path == '.':
input_dir_path = output_dir_path
if not isinstance(dec_correction_con, int):
if len(dec_correction_con) > 1:
dec_correction = int(dec_correction_con.split()[1])
dec_correction_con = int(dec_correction_con.split()[0])
else:
dec_correction = 0
else:
dec_correction = 0
ipmag.orientation_magic(or_con, dec_correction_con, dec_correction, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, orient_file, samp_file, site_file, output_dir_path, input_dir_path, append, data_model) |
def main():
'''Entry point'''
if len(sys.argv) == 1:
print("Usage: tyler [filename]")
sys.exit(0)
filename = sys.argv[1]
if not os.path.isfile(filename):
print("Specified file does not exists")
sys.exit(8)
my_tyler = Tyler(filename=filename)
while True:
try:
for line in my_tyler:
print(line)
time.sleep(1)
except KeyboardInterrupt:
print("Quit signal received")
sys.exit(0) | Entry point | Below is the the instruction that describes the task:
### Input:
Entry point
### Response:
def main():
'''Entry point'''
if len(sys.argv) == 1:
print("Usage: tyler [filename]")
sys.exit(0)
filename = sys.argv[1]
if not os.path.isfile(filename):
print("Specified file does not exists")
sys.exit(8)
my_tyler = Tyler(filename=filename)
while True:
try:
for line in my_tyler:
print(line)
time.sleep(1)
except KeyboardInterrupt:
print("Quit signal received")
sys.exit(0) |
def save_instance(self, instance, using_transactions=True, dry_run=False):
"""
Takes care of saving the object to the database.
Keep in mind that this is done by calling ``instance.save()``, so
objects are not created in bulk!
"""
self.before_save_instance(instance, using_transactions, dry_run)
if not using_transactions and dry_run:
# we don't have transactions and we want to do a dry_run
pass
else:
instance.save()
self.after_save_instance(instance, using_transactions, dry_run) | Takes care of saving the object to the database.
Keep in mind that this is done by calling ``instance.save()``, so
objects are not created in bulk! | Below is the the instruction that describes the task:
### Input:
Takes care of saving the object to the database.
Keep in mind that this is done by calling ``instance.save()``, so
objects are not created in bulk!
### Response:
def save_instance(self, instance, using_transactions=True, dry_run=False):
"""
Takes care of saving the object to the database.
Keep in mind that this is done by calling ``instance.save()``, so
objects are not created in bulk!
"""
self.before_save_instance(instance, using_transactions, dry_run)
if not using_transactions and dry_run:
# we don't have transactions and we want to do a dry_run
pass
else:
instance.save()
self.after_save_instance(instance, using_transactions, dry_run) |
def _final_frame_length(header, final_frame_bytes):
"""Calculates the length of a final ciphertext frame, given a complete header
and the number of bytes of ciphertext in the final frame.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int final_frame_bytes: Bytes of ciphertext in the final frame
:rtype: int
"""
final_frame_length = 4 # Sequence Number End
final_frame_length += 4 # Sequence Number
final_frame_length += header.algorithm.iv_len # IV
final_frame_length += 4 # Encrypted Content Length
final_frame_length += final_frame_bytes # Encrypted Content
final_frame_length += header.algorithm.auth_len # Authentication Tag
return final_frame_length | Calculates the length of a final ciphertext frame, given a complete header
and the number of bytes of ciphertext in the final frame.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int final_frame_bytes: Bytes of ciphertext in the final frame
:rtype: int | Below is the the instruction that describes the task:
### Input:
Calculates the length of a final ciphertext frame, given a complete header
and the number of bytes of ciphertext in the final frame.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int final_frame_bytes: Bytes of ciphertext in the final frame
:rtype: int
### Response:
def _final_frame_length(header, final_frame_bytes):
"""Calculates the length of a final ciphertext frame, given a complete header
and the number of bytes of ciphertext in the final frame.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int final_frame_bytes: Bytes of ciphertext in the final frame
:rtype: int
"""
final_frame_length = 4 # Sequence Number End
final_frame_length += 4 # Sequence Number
final_frame_length += header.algorithm.iv_len # IV
final_frame_length += 4 # Encrypted Content Length
final_frame_length += final_frame_bytes # Encrypted Content
final_frame_length += header.algorithm.auth_len # Authentication Tag
return final_frame_length |
def adjust_datetime_to_timezone(value, from_tz, to_tz=None):
"""
Given a ``datetime`` object adjust it according to the from_tz timezone
string into the to_tz timezone string.
"""
if to_tz is None:
to_tz = settings.TIME_ZONE
if value.tzinfo is None:
if not hasattr(from_tz, "localize"):
from_tz = pytz.timezone(smart_str(from_tz))
value = from_tz.localize(value)
return value.astimezone(pytz.timezone(smart_str(to_tz))) | Given a ``datetime`` object adjust it according to the from_tz timezone
string into the to_tz timezone string. | Below is the the instruction that describes the task:
### Input:
Given a ``datetime`` object adjust it according to the from_tz timezone
string into the to_tz timezone string.
### Response:
def adjust_datetime_to_timezone(value, from_tz, to_tz=None):
"""
Given a ``datetime`` object adjust it according to the from_tz timezone
string into the to_tz timezone string.
"""
if to_tz is None:
to_tz = settings.TIME_ZONE
if value.tzinfo is None:
if not hasattr(from_tz, "localize"):
from_tz = pytz.timezone(smart_str(from_tz))
value = from_tz.localize(value)
return value.astimezone(pytz.timezone(smart_str(to_tz))) |
def get_all_keys(self, headers=None, callback=None, **params):
"""
A lower-level method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type marker: string
:param marker: The "marker" of where you are in the result set
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
return self._get_all([('Contents', self.key_class),
('CommonPrefixes', boto.s3.prefix.Prefix)],
'', headers, callback=callback, **params) | A lower-level method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type marker: string
:param marker: The "marker" of where you are in the result set
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested | Below is the the instruction that describes the task:
### Input:
A lower-level method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type marker: string
:param marker: The "marker" of where you are in the result set
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested
### Response:
def get_all_keys(self, headers=None, callback=None, **params):
"""
A lower-level method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type marker: string
:param marker: The "marker" of where you are in the result set
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
return self._get_all([('Contents', self.key_class),
('CommonPrefixes', boto.s3.prefix.Prefix)],
'', headers, callback=callback, **params) |
def register():
"""
Plugin registration
"""
try:
signals.article_generator_init.connect(feed_parser_initialization)
signals.article_generator_context.connect(fetch_github_activity)
except ImportError:
logger.warning('`github_activity` failed to load dependency `feedparser`.'
'`github_activity` plugin not loaded.') | Plugin registration | Below is the the instruction that describes the task:
### Input:
Plugin registration
### Response:
def register():
"""
Plugin registration
"""
try:
signals.article_generator_init.connect(feed_parser_initialization)
signals.article_generator_context.connect(fetch_github_activity)
except ImportError:
logger.warning('`github_activity` failed to load dependency `feedparser`.'
'`github_activity` plugin not loaded.') |
def register_model_once(cls, ModelClass, **kwargs):
"""
Tweaked version of `AnyUrlField.register_model` that only registers the
given model after checking that it is not already registered.
"""
if cls._static_registry.get_for_model(ModelClass) is None:
logger.warn("Model is already registered with {0}: '{1}'"
.format(cls, ModelClass))
else:
cls.register_model.register(ModelClass, **kwargs) | Tweaked version of `AnyUrlField.register_model` that only registers the
given model after checking that it is not already registered. | Below is the the instruction that describes the task:
### Input:
Tweaked version of `AnyUrlField.register_model` that only registers the
given model after checking that it is not already registered.
### Response:
def register_model_once(cls, ModelClass, **kwargs):
"""
Tweaked version of `AnyUrlField.register_model` that only registers the
given model after checking that it is not already registered.
"""
if cls._static_registry.get_for_model(ModelClass) is None:
logger.warn("Model is already registered with {0}: '{1}'"
.format(cls, ModelClass))
else:
cls.register_model.register(ModelClass, **kwargs) |
def _normalize(self, word):
'''Normalization used in pre-processing.
- All words are lower cased
- Digits in the range 1800-2100 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
'''
if '-' in word and word[0] != '-':
return '!HYPHEN'
elif word.isdigit() and len(word) == 4:
return '!YEAR'
elif word[0].isdigit():
return '!DIGITS'
else:
return word.lower() | Normalization used in pre-processing.
- All words are lower cased
- Digits in the range 1800-2100 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str | Below is the the instruction that describes the task:
### Input:
Normalization used in pre-processing.
- All words are lower cased
- Digits in the range 1800-2100 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
### Response:
def _normalize(self, word):
'''Normalization used in pre-processing.
- All words are lower cased
- Digits in the range 1800-2100 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
'''
if '-' in word and word[0] != '-':
return '!HYPHEN'
elif word.isdigit() and len(word) == 4:
return '!YEAR'
elif word[0].isdigit():
return '!DIGITS'
else:
return word.lower() |
def indent(buffer, from_row, to_row, count=1):
"""
Indent text of a :class:`.Buffer` object.
"""
current_row = buffer.document.cursor_position_row
line_range = range(from_row, to_row)
# Apply transformation.
new_text = buffer.transform_lines(line_range, lambda l: ' ' * count + l)
buffer.document = Document(
new_text,
Document(new_text).translate_row_col_to_index(current_row, 0))
# Go to the start of the line.
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True) | Indent text of a :class:`.Buffer` object. | Below is the the instruction that describes the task:
### Input:
Indent text of a :class:`.Buffer` object.
### Response:
def indent(buffer, from_row, to_row, count=1):
"""
Indent text of a :class:`.Buffer` object.
"""
current_row = buffer.document.cursor_position_row
line_range = range(from_row, to_row)
# Apply transformation.
new_text = buffer.transform_lines(line_range, lambda l: ' ' * count + l)
buffer.document = Document(
new_text,
Document(new_text).translate_row_col_to_index(current_row, 0))
# Go to the start of the line.
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True) |
def update(self, instance, oldValue, newValue):
"""Updates the aggregate based on a change in the child value."""
self.__set__(instance,
self.__get__(instance, None) + newValue - (oldValue or 0)) | Updates the aggregate based on a change in the child value. | Below is the the instruction that describes the task:
### Input:
Updates the aggregate based on a change in the child value.
### Response:
def update(self, instance, oldValue, newValue):
"""Updates the aggregate based on a change in the child value."""
self.__set__(instance,
self.__get__(instance, None) + newValue - (oldValue or 0)) |
def _integrateOrbit(vxvv,pot,t,method,dt):
"""
NAME:
_integrateOrbit
PURPOSE:
integrate an orbit in a Phi(R) potential in the (R,phi)-plane
INPUT:
vxvv - array with the initial conditions stacked like
[R,vR,vT,phi]; vR outward!
pot - Potential instance
t - list of times at which to output (0 has to be in this!)
method - 'odeint' or 'leapfrog'
dt- if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
[:,4] array of [R,vR,vT,phi] at each t
HISTORY:
2010-07-20 - Written - Bovy (NYU)
"""
#First check that the potential has C
if '_c' in method:
if not ext_loaded or not _check_c(pot):
if ('leapfrog' in method or 'symplec' in method):
method= 'leapfrog'
else:
method= 'odeint'
if not ext_loaded: # pragma: no cover
warnings.warn("Cannot use C integration because C extension not loaded (using %s instead)" % (method), galpyWarning)
else:
warnings.warn("Cannot use C integration because some of the potentials are not implemented in C (using %s instead)" % (method), galpyWarning)
if method.lower() == 'leapfrog':
#go to the rectangular frame
this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[3]),
vxvv[0]*nu.sin(vxvv[3]),
vxvv[1]*nu.cos(vxvv[3])-vxvv[2]*nu.sin(vxvv[3]),
vxvv[2]*nu.cos(vxvv[3])+vxvv[1]*nu.sin(vxvv[3])])
#integrate
tmp_out= symplecticode.leapfrog(_rectForce,this_vxvv,
t,args=(pot,),rtol=10.**-8)
#go back to the cylindrical frame
R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.)
phi= nu.arccos(tmp_out[:,0]/R)
phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)]
vR= tmp_out[:,2]*nu.cos(phi)+tmp_out[:,3]*nu.sin(phi)
vT= tmp_out[:,3]*nu.cos(phi)-tmp_out[:,2]*nu.sin(phi)
out= nu.zeros((len(t),4))
out[:,0]= R
out[:,1]= vR
out[:,2]= vT
out[:,3]= phi
msg= 0
elif ext_loaded and \
(method.lower() == 'leapfrog_c' or method.lower() == 'rk4_c' \
or method.lower() == 'rk6_c' or method.lower() == 'symplec4_c' \
or method.lower() == 'symplec6_c' or method.lower() == 'dopr54_c' \
or method.lower() == 'dop853_c'):
warnings.warn("Using C implementation to integrate orbits",galpyWarningVerbose)
#go to the rectangular frame
this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[3]),
vxvv[0]*nu.sin(vxvv[3]),
vxvv[1]*nu.cos(vxvv[3])-vxvv[2]*nu.sin(vxvv[3]),
vxvv[2]*nu.cos(vxvv[3])+vxvv[1]*nu.sin(vxvv[3])])
#integrate
tmp_out, msg= integratePlanarOrbit_c(pot,this_vxvv,
t,method,dt=dt)
#go back to the cylindrical frame
R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.)
phi= nu.arccos(tmp_out[:,0]/R)
phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)]
vR= tmp_out[:,2]*nu.cos(phi)+tmp_out[:,3]*nu.sin(phi)
vT= tmp_out[:,3]*nu.cos(phi)-tmp_out[:,2]*nu.sin(phi)
out= nu.zeros((len(t),4))
out[:,0]= R
out[:,1]= vR
out[:,2]= vT
out[:,3]= phi
elif method.lower() == 'odeint' or method.lower() == 'dop853' or not ext_loaded:
vphi= vxvv[2]/vxvv[0]
init= [vxvv[0],vxvv[1],vxvv[3],vphi]
if method == 'dop853':
intOut = dop853(_EOM, init, t, args=(pot,))
else:
intOut= integrate.odeint(_EOM,init,t,args=(pot,),
rtol=10.**-8.)#,mxstep=100000000)
out= nu.zeros((len(t),4))
out[:,0]= intOut[:,0]
out[:,1]= intOut[:,1]
out[:,3]= intOut[:,2]
out[:,2]= out[:,0]*intOut[:,3]
msg= 0
else:
raise NotImplementedError("requested integration method does not exist")
#post-process to remove negative radii
neg_radii= (out[:,0] < 0.)
out[neg_radii,0]= -out[neg_radii,0]
out[neg_radii,3]+= m.pi
_parse_warnmessage(msg)
return (out,msg) | NAME:
_integrateOrbit
PURPOSE:
integrate an orbit in a Phi(R) potential in the (R,phi)-plane
INPUT:
vxvv - array with the initial conditions stacked like
[R,vR,vT,phi]; vR outward!
pot - Potential instance
t - list of times at which to output (0 has to be in this!)
method - 'odeint' or 'leapfrog'
dt- if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
[:,4] array of [R,vR,vT,phi] at each t
HISTORY:
2010-07-20 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
_integrateOrbit
PURPOSE:
integrate an orbit in a Phi(R) potential in the (R,phi)-plane
INPUT:
vxvv - array with the initial conditions stacked like
[R,vR,vT,phi]; vR outward!
pot - Potential instance
t - list of times at which to output (0 has to be in this!)
method - 'odeint' or 'leapfrog'
dt- if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
[:,4] array of [R,vR,vT,phi] at each t
HISTORY:
2010-07-20 - Written - Bovy (NYU)
### Response:
def _integrateOrbit(vxvv,pot,t,method,dt):
"""
NAME:
_integrateOrbit
PURPOSE:
integrate an orbit in a Phi(R) potential in the (R,phi)-plane
INPUT:
vxvv - array with the initial conditions stacked like
[R,vR,vT,phi]; vR outward!
pot - Potential instance
t - list of times at which to output (0 has to be in this!)
method - 'odeint' or 'leapfrog'
dt- if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
[:,4] array of [R,vR,vT,phi] at each t
HISTORY:
2010-07-20 - Written - Bovy (NYU)
"""
#First check that the potential has C
if '_c' in method:
if not ext_loaded or not _check_c(pot):
if ('leapfrog' in method or 'symplec' in method):
method= 'leapfrog'
else:
method= 'odeint'
if not ext_loaded: # pragma: no cover
warnings.warn("Cannot use C integration because C extension not loaded (using %s instead)" % (method), galpyWarning)
else:
warnings.warn("Cannot use C integration because some of the potentials are not implemented in C (using %s instead)" % (method), galpyWarning)
if method.lower() == 'leapfrog':
#go to the rectangular frame
this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[3]),
vxvv[0]*nu.sin(vxvv[3]),
vxvv[1]*nu.cos(vxvv[3])-vxvv[2]*nu.sin(vxvv[3]),
vxvv[2]*nu.cos(vxvv[3])+vxvv[1]*nu.sin(vxvv[3])])
#integrate
tmp_out= symplecticode.leapfrog(_rectForce,this_vxvv,
t,args=(pot,),rtol=10.**-8)
#go back to the cylindrical frame
R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.)
phi= nu.arccos(tmp_out[:,0]/R)
phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)]
vR= tmp_out[:,2]*nu.cos(phi)+tmp_out[:,3]*nu.sin(phi)
vT= tmp_out[:,3]*nu.cos(phi)-tmp_out[:,2]*nu.sin(phi)
out= nu.zeros((len(t),4))
out[:,0]= R
out[:,1]= vR
out[:,2]= vT
out[:,3]= phi
msg= 0
elif ext_loaded and \
(method.lower() == 'leapfrog_c' or method.lower() == 'rk4_c' \
or method.lower() == 'rk6_c' or method.lower() == 'symplec4_c' \
or method.lower() == 'symplec6_c' or method.lower() == 'dopr54_c' \
or method.lower() == 'dop853_c'):
warnings.warn("Using C implementation to integrate orbits",galpyWarningVerbose)
#go to the rectangular frame
this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[3]),
vxvv[0]*nu.sin(vxvv[3]),
vxvv[1]*nu.cos(vxvv[3])-vxvv[2]*nu.sin(vxvv[3]),
vxvv[2]*nu.cos(vxvv[3])+vxvv[1]*nu.sin(vxvv[3])])
#integrate
tmp_out, msg= integratePlanarOrbit_c(pot,this_vxvv,
t,method,dt=dt)
#go back to the cylindrical frame
R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.)
phi= nu.arccos(tmp_out[:,0]/R)
phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)]
vR= tmp_out[:,2]*nu.cos(phi)+tmp_out[:,3]*nu.sin(phi)
vT= tmp_out[:,3]*nu.cos(phi)-tmp_out[:,2]*nu.sin(phi)
out= nu.zeros((len(t),4))
out[:,0]= R
out[:,1]= vR
out[:,2]= vT
out[:,3]= phi
elif method.lower() == 'odeint' or method.lower() == 'dop853' or not ext_loaded:
vphi= vxvv[2]/vxvv[0]
init= [vxvv[0],vxvv[1],vxvv[3],vphi]
if method == 'dop853':
intOut = dop853(_EOM, init, t, args=(pot,))
else:
intOut= integrate.odeint(_EOM,init,t,args=(pot,),
rtol=10.**-8.)#,mxstep=100000000)
out= nu.zeros((len(t),4))
out[:,0]= intOut[:,0]
out[:,1]= intOut[:,1]
out[:,3]= intOut[:,2]
out[:,2]= out[:,0]*intOut[:,3]
msg= 0
else:
raise NotImplementedError("requested integration method does not exist")
#post-process to remove negative radii
neg_radii= (out[:,0] < 0.)
out[neg_radii,0]= -out[neg_radii,0]
out[neg_radii,3]+= m.pi
_parse_warnmessage(msg)
return (out,msg) |
def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams):
"""Latent prediction and loss.
Args:
latents_pred: Tensor of shape [..., depth].
latents_discrete_hot: Tensor of shape [..., vocab_size].
vocab_size: an int representing the vocab size.
hparams: HParams.
Returns:
sample: Tensor of shape [...], a sample from a multinomial distribution.
loss: Tensor of shape [...], the softmax cross-entropy.
"""
with tf.variable_scope("latent_logits"):
latents_logits = tf.layers.dense(latents_pred, vocab_size,
name="logits_dense")
if hparams.logit_normalization:
latents_logits *= tf.rsqrt(1e-8 +
tf.reduce_mean(tf.square(latents_logits)))
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=latents_discrete_hot, logits=latents_logits)
# TODO(trandustin): tease this out from ae_latent_softmax.
# we use just the loss portion to anchor prior / encoder on text.
sample = multinomial_sample(latents_logits,
vocab_size,
hparams.sampling_method,
hparams.sampling_temp)
return sample, loss | Latent prediction and loss.
Args:
latents_pred: Tensor of shape [..., depth].
latents_discrete_hot: Tensor of shape [..., vocab_size].
vocab_size: an int representing the vocab size.
hparams: HParams.
Returns:
sample: Tensor of shape [...], a sample from a multinomial distribution.
loss: Tensor of shape [...], the softmax cross-entropy. | Below is the the instruction that describes the task:
### Input:
Latent prediction and loss.
Args:
latents_pred: Tensor of shape [..., depth].
latents_discrete_hot: Tensor of shape [..., vocab_size].
vocab_size: an int representing the vocab size.
hparams: HParams.
Returns:
sample: Tensor of shape [...], a sample from a multinomial distribution.
loss: Tensor of shape [...], the softmax cross-entropy.
### Response:
def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams):
"""Latent prediction and loss.
Args:
latents_pred: Tensor of shape [..., depth].
latents_discrete_hot: Tensor of shape [..., vocab_size].
vocab_size: an int representing the vocab size.
hparams: HParams.
Returns:
sample: Tensor of shape [...], a sample from a multinomial distribution.
loss: Tensor of shape [...], the softmax cross-entropy.
"""
with tf.variable_scope("latent_logits"):
latents_logits = tf.layers.dense(latents_pred, vocab_size,
name="logits_dense")
if hparams.logit_normalization:
latents_logits *= tf.rsqrt(1e-8 +
tf.reduce_mean(tf.square(latents_logits)))
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=latents_discrete_hot, logits=latents_logits)
# TODO(trandustin): tease this out from ae_latent_softmax.
# we use just the loss portion to anchor prior / encoder on text.
sample = multinomial_sample(latents_logits,
vocab_size,
hparams.sampling_method,
hparams.sampling_temp)
return sample, loss |
def generate_file_shared_access_signature(self, share_name,
directory_name=None,
file_name=None,
permission=None,
expiry=None,
start=None,
id=None,
ip=None,
protocol=None,
cache_control=None,
content_disposition=None,
content_encoding=None,
content_language=None,
content_type=None):
'''
Generates a shared access signature for the file.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param str directory_name:
Name of directory. SAS tokens cannot be created for directories, so
this parameter should only be present if file_name is provided.
:param str file_name:
Name of file.
:param FilePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. Possible values are
both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
is https,http. Note that HTTP only is not a permitted value.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = SharedAccessSignature(self.account_name, self.account_key)
return sas.generate_file(
share_name,
directory_name,
file_name,
permission,
expiry,
start=start,
id=id,
ip=ip,
protocol=protocol,
cache_control=cache_control,
content_disposition=content_disposition,
content_encoding=content_encoding,
content_language=content_language,
content_type=content_type,
) | Generates a shared access signature for the file.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param str directory_name:
Name of directory. SAS tokens cannot be created for directories, so
this parameter should only be present if file_name is provided.
:param str file_name:
Name of file.
:param FilePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. Possible values are
both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
is https,http. Note that HTTP only is not a permitted value.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:return: A Shared Access Signature (sas) token.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Generates a shared access signature for the file.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param str directory_name:
Name of directory. SAS tokens cannot be created for directories, so
this parameter should only be present if file_name is provided.
:param str file_name:
Name of file.
:param FilePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. Possible values are
both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
is https,http. Note that HTTP only is not a permitted value.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:return: A Shared Access Signature (sas) token.
:rtype: str
### Response:
def generate_file_shared_access_signature(self, share_name,
directory_name=None,
file_name=None,
permission=None,
expiry=None,
start=None,
id=None,
ip=None,
protocol=None,
cache_control=None,
content_disposition=None,
content_encoding=None,
content_language=None,
content_type=None):
'''
Generates a shared access signature for the file.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param str directory_name:
Name of directory. SAS tokens cannot be created for directories, so
this parameter should only be present if file_name is provided.
:param str file_name:
Name of file.
:param FilePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. Possible values are
both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
is https,http. Note that HTTP only is not a permitted value.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = SharedAccessSignature(self.account_name, self.account_key)
return sas.generate_file(
share_name,
directory_name,
file_name,
permission,
expiry,
start=start,
id=id,
ip=ip,
protocol=protocol,
cache_control=cache_control,
content_disposition=content_disposition,
content_encoding=content_encoding,
content_language=content_language,
content_type=content_type,
) |
def f_expand(self, build_dict, fail_safe=True):
"""Similar to :func:`~pypet.trajectory.Trajectory.f_explore`, but can be used to enlarge
already completed trajectories.
Please ensure before usage, that all explored parameters are loaded!
:param build_dict:
Dictionary containing the expansion
:param fail_safe:
If old ranges should be **deep-copied** in order to allow to restore
the original exploration if something fails during expansion.
Set to `False` if deep-copying your parameter ranges causes errors.
:raises:
TypeError: If not all explored parameters are enlarged
AttributeError: If keys of dictionary cannot be found in the trajectory
NotUniqueNodeError:
If dictionary keys do not unambiguously map to single parameters
ValueError: If not all explored parameter ranges are of the same length
"""
if len(self._explored_parameters) == 0:
self._logger.info('Your trajectory has not been explored, yet. '
'I will call `f_explore` instead.')
return self.f_explore(build_dict)
enlarge_set = set([self.f_get(key).v_full_name
for key in build_dict.keys()])
# Check if all explored parameters will be enlarged, otherwise
# We cannot enlarge the trajectory
if not set(self._explored_parameters.keys()) == enlarge_set:
raise TypeError('You have to enlarge dimensions you have explored before! Currently'
' explored parameters are not the ones you specified in your building'
' dictionary, i.e. %s != %s' %
(str(set(self._explored_parameters.keys())),
str(set(build_dict.keys()))))
if any(x is None for x in self._explored_parameters.values()):
raise TypeError('At least one of your explored parameters is not fully loaded, '
'please load it.')
old_ranges = None
if fail_safe:
old_ranges = {}
for param_name in self._explored_parameters:
old_ranges[param_name] = self._explored_parameters[param_name].f_get_range()
try:
old_ranges = cp.deepcopy(old_ranges)
except Exception:
self._logger.error('Cannot deepcopy old parameter ranges, if '
'something fails during `f_expand` I cannot revert the '
'trajectory to old settings.')
old_ranges = None
try:
count = 0
length = None
for key, builditerable in build_dict.items():
act_param = self.f_get(key)
act_param.f_unlock()
act_param._expand(builditerable)
name = act_param.v_full_name
self._explored_parameters[name] = act_param
# Compare the length of two consecutive parameters in the `build_dict`
if count == 0:
length = act_param.f_get_range_length()
elif not length == act_param.f_get_range_length():
raise ValueError('The parameters to explore have not the same size!')
count += 1
original_length = len(self)
for irun in range(original_length, length):
self._add_run_info(irun)
self._test_run_addition(length)
# We need to update the explored parameters in case they were stored:
self._remove_exploration()
except Exception:
if old_ranges is not None:
# Try to restore the original parameter exploration
for param_name in old_ranges:
param_range = old_ranges[param_name]
param = self._explored_parameters[param_name]
param.f_unlock()
try:
param._shrink()
except Exception as exc:
self._logger.error('Could not shrink parameter `%s` '
'because of:`%s`' % (param_name, repr(exc)))
param._explore(param_range)
param._explored = True
raise | Similar to :func:`~pypet.trajectory.Trajectory.f_explore`, but can be used to enlarge
already completed trajectories.
Please ensure before usage, that all explored parameters are loaded!
:param build_dict:
Dictionary containing the expansion
:param fail_safe:
If old ranges should be **deep-copied** in order to allow to restore
the original exploration if something fails during expansion.
Set to `False` if deep-copying your parameter ranges causes errors.
:raises:
TypeError: If not all explored parameters are enlarged
AttributeError: If keys of dictionary cannot be found in the trajectory
NotUniqueNodeError:
If dictionary keys do not unambiguously map to single parameters
ValueError: If not all explored parameter ranges are of the same length | Below is the the instruction that describes the task:
### Input:
Similar to :func:`~pypet.trajectory.Trajectory.f_explore`, but can be used to enlarge
already completed trajectories.
Please ensure before usage, that all explored parameters are loaded!
:param build_dict:
Dictionary containing the expansion
:param fail_safe:
If old ranges should be **deep-copied** in order to allow to restore
the original exploration if something fails during expansion.
Set to `False` if deep-copying your parameter ranges causes errors.
:raises:
TypeError: If not all explored parameters are enlarged
AttributeError: If keys of dictionary cannot be found in the trajectory
NotUniqueNodeError:
If dictionary keys do not unambiguously map to single parameters
ValueError: If not all explored parameter ranges are of the same length
### Response:
def f_expand(self, build_dict, fail_safe=True):
"""Similar to :func:`~pypet.trajectory.Trajectory.f_explore`, but can be used to enlarge
already completed trajectories.
Please ensure before usage, that all explored parameters are loaded!
:param build_dict:
Dictionary containing the expansion
:param fail_safe:
If old ranges should be **deep-copied** in order to allow to restore
the original exploration if something fails during expansion.
Set to `False` if deep-copying your parameter ranges causes errors.
:raises:
TypeError: If not all explored parameters are enlarged
AttributeError: If keys of dictionary cannot be found in the trajectory
NotUniqueNodeError:
If dictionary keys do not unambiguously map to single parameters
ValueError: If not all explored parameter ranges are of the same length
"""
if len(self._explored_parameters) == 0:
self._logger.info('Your trajectory has not been explored, yet. '
'I will call `f_explore` instead.')
return self.f_explore(build_dict)
enlarge_set = set([self.f_get(key).v_full_name
for key in build_dict.keys()])
# Check if all explored parameters will be enlarged, otherwise
# We cannot enlarge the trajectory
if not set(self._explored_parameters.keys()) == enlarge_set:
raise TypeError('You have to enlarge dimensions you have explored before! Currently'
' explored parameters are not the ones you specified in your building'
' dictionary, i.e. %s != %s' %
(str(set(self._explored_parameters.keys())),
str(set(build_dict.keys()))))
if any(x is None for x in self._explored_parameters.values()):
raise TypeError('At least one of your explored parameters is not fully loaded, '
'please load it.')
old_ranges = None
if fail_safe:
old_ranges = {}
for param_name in self._explored_parameters:
old_ranges[param_name] = self._explored_parameters[param_name].f_get_range()
try:
old_ranges = cp.deepcopy(old_ranges)
except Exception:
self._logger.error('Cannot deepcopy old parameter ranges, if '
'something fails during `f_expand` I cannot revert the '
'trajectory to old settings.')
old_ranges = None
try:
count = 0
length = None
for key, builditerable in build_dict.items():
act_param = self.f_get(key)
act_param.f_unlock()
act_param._expand(builditerable)
name = act_param.v_full_name
self._explored_parameters[name] = act_param
# Compare the length of two consecutive parameters in the `build_dict`
if count == 0:
length = act_param.f_get_range_length()
elif not length == act_param.f_get_range_length():
raise ValueError('The parameters to explore have not the same size!')
count += 1
original_length = len(self)
for irun in range(original_length, length):
self._add_run_info(irun)
self._test_run_addition(length)
# We need to update the explored parameters in case they were stored:
self._remove_exploration()
except Exception:
if old_ranges is not None:
# Try to restore the original parameter exploration
for param_name in old_ranges:
param_range = old_ranges[param_name]
param = self._explored_parameters[param_name]
param.f_unlock()
try:
param._shrink()
except Exception as exc:
self._logger.error('Could not shrink parameter `%s` '
'because of:`%s`' % (param_name, repr(exc)))
param._explore(param_range)
param._explored = True
raise |
def loglike(self, endog, mu, freq_weights=1, scale=1.):
r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
Not used for the Binomial GLM.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,freq_weights,scale) as defined below.
"""
if np.shape(self.n) == () and self.n == 1:
return scale * np.sum((endog * np.log(mu/(1 - mu) + 1e-200) +
np.log(1 - mu)) * freq_weights)
else:
y = endog * self.n # convert back to successes
return scale * np.sum((special.gammaln(self.n + 1) -
special.gammaln(y + 1) -
special.gammaln(self.n - y + 1) + y *
np.log(mu/(1 - mu)) + self.n *
np.log(1 - mu)) * freq_weights) | r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
Not used for the Binomial GLM.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,freq_weights,scale) as defined below. | Below is the the instruction that describes the task:
### Input:
r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
Not used for the Binomial GLM.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,freq_weights,scale) as defined below.
### Response:
def loglike(self, endog, mu, freq_weights=1, scale=1.):
r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
Not used for the Binomial GLM.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,freq_weights,scale) as defined below.
"""
if np.shape(self.n) == () and self.n == 1:
return scale * np.sum((endog * np.log(mu/(1 - mu) + 1e-200) +
np.log(1 - mu)) * freq_weights)
else:
y = endog * self.n # convert back to successes
return scale * np.sum((special.gammaln(self.n + 1) -
special.gammaln(y + 1) -
special.gammaln(self.n - y + 1) + y *
np.log(mu/(1 - mu)) + self.n *
np.log(1 - mu)) * freq_weights) |
def _build_epsf_step(self, stars, epsf=None):
"""
A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF.
"""
if len(stars) < 1:
raise ValueError('stars must contain at least one EPSFStar or '
'LinkedEPSFStar object.')
if epsf is None:
# create an initial ePSF (array of zeros)
epsf = self._create_initial_epsf(stars)
else:
# improve the input ePSF
epsf = copy.deepcopy(epsf)
# compute a 3D stack of 2D residual images
residuals = self._resample_residuals(stars, epsf)
self._residuals.append(residuals)
# compute the sigma-clipped median along the 3D stack
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
warnings.simplefilter('ignore', category=AstropyUserWarning)
residuals = self.sigclip(residuals, axis=0, masked=False,
return_bounds=False)
if HAS_BOTTLENECK:
residuals = bottleneck.nanmedian(residuals, axis=0)
else:
residuals = np.nanmedian(residuals, axis=0)
self._residuals_sigclip.append(residuals)
# interpolate any missing data (np.nan)
mask = ~np.isfinite(residuals)
if np.any(mask):
residuals = _interpolate_missing_data(residuals, mask,
method='cubic')
# fill any remaining nans (outer points) with zeros
residuals[~np.isfinite(residuals)] = 0.
self._residuals_interp.append(residuals)
# add the residuals to the previous ePSF image
new_epsf = epsf.normalized_data + residuals
# smooth the ePSF
new_epsf = self._smooth_epsf(new_epsf)
# recenter the ePSF
new_epsf = self._recenter_epsf(new_epsf, epsf,
centroid_func=self.recentering_func,
box_size=self.recentering_boxsize,
maxiters=self.recentering_maxiters,
center_accuracy=1.0e-4)
# normalize the ePSF data
new_epsf /= np.sum(new_epsf, dtype=np.float64)
# return the new ePSF object
xcenter = (new_epsf.shape[1] - 1) / 2.
ycenter = (new_epsf.shape[0] - 1) / 2.
epsf_new = EPSFModel(data=new_epsf, origin=(xcenter, ycenter),
normalize=False, oversampling=epsf.oversampling)
return epsf_new | A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF. | Below is the the instruction that describes the task:
### Input:
A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF.
### Response:
def _build_epsf_step(self, stars, epsf=None):
"""
A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF.
"""
if len(stars) < 1:
raise ValueError('stars must contain at least one EPSFStar or '
'LinkedEPSFStar object.')
if epsf is None:
# create an initial ePSF (array of zeros)
epsf = self._create_initial_epsf(stars)
else:
# improve the input ePSF
epsf = copy.deepcopy(epsf)
# compute a 3D stack of 2D residual images
residuals = self._resample_residuals(stars, epsf)
self._residuals.append(residuals)
# compute the sigma-clipped median along the 3D stack
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
warnings.simplefilter('ignore', category=AstropyUserWarning)
residuals = self.sigclip(residuals, axis=0, masked=False,
return_bounds=False)
if HAS_BOTTLENECK:
residuals = bottleneck.nanmedian(residuals, axis=0)
else:
residuals = np.nanmedian(residuals, axis=0)
self._residuals_sigclip.append(residuals)
# interpolate any missing data (np.nan)
mask = ~np.isfinite(residuals)
if np.any(mask):
residuals = _interpolate_missing_data(residuals, mask,
method='cubic')
# fill any remaining nans (outer points) with zeros
residuals[~np.isfinite(residuals)] = 0.
self._residuals_interp.append(residuals)
# add the residuals to the previous ePSF image
new_epsf = epsf.normalized_data + residuals
# smooth the ePSF
new_epsf = self._smooth_epsf(new_epsf)
# recenter the ePSF
new_epsf = self._recenter_epsf(new_epsf, epsf,
centroid_func=self.recentering_func,
box_size=self.recentering_boxsize,
maxiters=self.recentering_maxiters,
center_accuracy=1.0e-4)
# normalize the ePSF data
new_epsf /= np.sum(new_epsf, dtype=np.float64)
# return the new ePSF object
xcenter = (new_epsf.shape[1] - 1) / 2.
ycenter = (new_epsf.shape[0] - 1) / 2.
epsf_new = EPSFModel(data=new_epsf, origin=(xcenter, ycenter),
normalize=False, oversampling=epsf.oversampling)
return epsf_new |
def repair_mongo(name, dbpath):
"""repair mongodb after usafe shutdown"""
log_file = os.path.join(dbpath, 'mongod.log')
cmd = [name, "--dbpath", dbpath, "--logpath", log_file, "--logappend",
"--repair"]
proc = subprocess.Popen(
cmd, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
timeout = 45
t_start = time.time()
while time.time() - t_start < timeout:
line = str(proc.stdout.readline())
logger.info("repair output: %s" % (line,))
return_code = proc.poll()
if return_code is not None:
if return_code:
raise Exception("mongod --repair failed with exit code %s, "
"check log file: %s" % (return_code, log_file))
# Success when poll() returns 0
return
time.sleep(1)
proc.terminate()
raise Exception("mongod --repair failed to exit after %s seconds, "
"check log file: %s" % (timeout, log_file)) | repair mongodb after usafe shutdown | Below is the the instruction that describes the task:
### Input:
repair mongodb after usafe shutdown
### Response:
def repair_mongo(name, dbpath):
"""repair mongodb after usafe shutdown"""
log_file = os.path.join(dbpath, 'mongod.log')
cmd = [name, "--dbpath", dbpath, "--logpath", log_file, "--logappend",
"--repair"]
proc = subprocess.Popen(
cmd, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
timeout = 45
t_start = time.time()
while time.time() - t_start < timeout:
line = str(proc.stdout.readline())
logger.info("repair output: %s" % (line,))
return_code = proc.poll()
if return_code is not None:
if return_code:
raise Exception("mongod --repair failed with exit code %s, "
"check log file: %s" % (return_code, log_file))
# Success when poll() returns 0
return
time.sleep(1)
proc.terminate()
raise Exception("mongod --repair failed to exit after %s seconds, "
"check log file: %s" % (timeout, log_file)) |
def kata2hira(text, ignore=''):
"""Convert Full-width Katakana to Hiragana
Parameters
----------
text : str
Full-width Katakana string.
ignore : str
Characters to be ignored in converting.
Return
------
str
Hiragana string.
Examples
--------
>>> print(jaconv.kata2hira('巴マミ'))
巴まみ
>>> print(jaconv.kata2hira('マミサン', ignore='ン'))
まみさン
"""
if ignore:
k2h_map = _exclude_ignorechar(ignore, K2H_TABLE.copy())
return _convert(text, k2h_map)
return _convert(text, K2H_TABLE) | Convert Full-width Katakana to Hiragana
Parameters
----------
text : str
Full-width Katakana string.
ignore : str
Characters to be ignored in converting.
Return
------
str
Hiragana string.
Examples
--------
>>> print(jaconv.kata2hira('巴マミ'))
巴まみ
>>> print(jaconv.kata2hira('マミサン', ignore='ン'))
まみさン | Below is the the instruction that describes the task:
### Input:
Convert Full-width Katakana to Hiragana
Parameters
----------
text : str
Full-width Katakana string.
ignore : str
Characters to be ignored in converting.
Return
------
str
Hiragana string.
Examples
--------
>>> print(jaconv.kata2hira('巴マミ'))
巴まみ
>>> print(jaconv.kata2hira('マミサン', ignore='ン'))
まみさン
### Response:
def kata2hira(text, ignore=''):
"""Convert Full-width Katakana to Hiragana
Parameters
----------
text : str
Full-width Katakana string.
ignore : str
Characters to be ignored in converting.
Return
------
str
Hiragana string.
Examples
--------
>>> print(jaconv.kata2hira('巴マミ'))
巴まみ
>>> print(jaconv.kata2hira('マミサン', ignore='ン'))
まみさン
"""
if ignore:
k2h_map = _exclude_ignorechar(ignore, K2H_TABLE.copy())
return _convert(text, k2h_map)
return _convert(text, K2H_TABLE) |
def get(self, sid):
"""
Constructs a TriggerContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
"""
return TriggerContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) | Constructs a TriggerContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext | Below is the the instruction that describes the task:
### Input:
Constructs a TriggerContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
### Response:
def get(self, sid):
"""
Constructs a TriggerContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
"""
return TriggerContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) |
def fetch(self):
"""
Fetch a SyncListPermissionInstance
:returns: Fetched SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SyncListPermissionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
identity=self._solution['identity'],
) | Fetch a SyncListPermissionInstance
:returns: Fetched SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance | Below is the the instruction that describes the task:
### Input:
Fetch a SyncListPermissionInstance
:returns: Fetched SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance
### Response:
def fetch(self):
"""
Fetch a SyncListPermissionInstance
:returns: Fetched SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SyncListPermissionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
identity=self._solution['identity'],
) |
def parse_args():
"""Parse arguments from the command line"""
parser = argparse.ArgumentParser(description=TO_KIBANA5_DESC_MSG)
parser.add_argument('-s', '--source', dest='src_path', \
required=True, help='source directory')
parser.add_argument('-d', '--dest', dest='dest_path', \
required=True, help='destination directory')
parser.add_argument('-o', '--old-size', dest='old_size', \
default='0', help='aggregation old size')
parser.add_argument('-n', '--new-size', dest='new_size', \
default='1000', help='aggregation new size')
parser.add_argument('-g', '--debug', dest='debug',
action='store_true')
return parser.parse_args() | Parse arguments from the command line | Below is the the instruction that describes the task:
### Input:
Parse arguments from the command line
### Response:
def parse_args():
"""Parse arguments from the command line"""
parser = argparse.ArgumentParser(description=TO_KIBANA5_DESC_MSG)
parser.add_argument('-s', '--source', dest='src_path', \
required=True, help='source directory')
parser.add_argument('-d', '--dest', dest='dest_path', \
required=True, help='destination directory')
parser.add_argument('-o', '--old-size', dest='old_size', \
default='0', help='aggregation old size')
parser.add_argument('-n', '--new-size', dest='new_size', \
default='1000', help='aggregation new size')
parser.add_argument('-g', '--debug', dest='debug',
action='store_true')
return parser.parse_args() |
def perform_create(self, serializer):
"""Create a resource."""
process = serializer.validated_data.get('process')
if not process.is_active:
raise exceptions.ParseError(
'Process retired (id: {}, slug: {}/{}).'.format(process.id, process.slug, process.version)
)
with transaction.atomic():
instance = serializer.save()
assign_contributor_permissions(instance)
# Entity is added to the collection only when it is
# created - when it only contains 1 Data object.
entities = Entity.objects.annotate(num_data=Count('data')).filter(data=instance, num_data=1)
# Assign data object to all specified collections.
collection_pks = self.request.data.get('collections', [])
for collection in Collection.objects.filter(pk__in=collection_pks):
collection.data.add(instance)
copy_permissions(collection, instance)
# Add entities to which data belongs to the collection.
for entity in entities:
entity.collections.add(collection)
copy_permissions(collection, entity) | Create a resource. | Below is the the instruction that describes the task:
### Input:
Create a resource.
### Response:
def perform_create(self, serializer):
"""Create a resource."""
process = serializer.validated_data.get('process')
if not process.is_active:
raise exceptions.ParseError(
'Process retired (id: {}, slug: {}/{}).'.format(process.id, process.slug, process.version)
)
with transaction.atomic():
instance = serializer.save()
assign_contributor_permissions(instance)
# Entity is added to the collection only when it is
# created - when it only contains 1 Data object.
entities = Entity.objects.annotate(num_data=Count('data')).filter(data=instance, num_data=1)
# Assign data object to all specified collections.
collection_pks = self.request.data.get('collections', [])
for collection in Collection.objects.filter(pk__in=collection_pks):
collection.data.add(instance)
copy_permissions(collection, instance)
# Add entities to which data belongs to the collection.
for entity in entities:
entity.collections.add(collection)
copy_permissions(collection, entity) |
def up_by_time(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR, count=1):
"""Sync most recent file by date, time attribues"""
remote_files = command.map_files_raw(remote_dir=remote_dir)
local_files = list_local_files(*filters, local_dir=local_dir)
most_recent = sorted(local_files, key=lambda f: f.datetime)
to_sync = most_recent[-count:]
_notify_sync(Direction.up, to_sync)
up_by_files(to_sync[::-1], remote_dir, remote_files) | Sync most recent file by date, time attribues | Below is the the instruction that describes the task:
### Input:
Sync most recent file by date, time attribues
### Response:
def up_by_time(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR, count=1):
"""Sync most recent file by date, time attribues"""
remote_files = command.map_files_raw(remote_dir=remote_dir)
local_files = list_local_files(*filters, local_dir=local_dir)
most_recent = sorted(local_files, key=lambda f: f.datetime)
to_sync = most_recent[-count:]
_notify_sync(Direction.up, to_sync)
up_by_files(to_sync[::-1], remote_dir, remote_files) |
def _call(self, cmd, get_output):
"""Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned.
"""
server_err = self.server_logger()
chan = self.get_client().get_transport().open_session()
try:
logger.debug("Invoking %r%s",
cmd, " (stdout)" if get_output else "")
chan.exec_command('/bin/sh -c %s' % shell_escape(cmd))
output = b''
while True:
r, w, e = select.select([chan], [], [])
if chan not in r:
continue # pragma: no cover
recvd = False
while chan.recv_stderr_ready():
data = chan.recv_stderr(1024)
server_err.append(data)
recvd = True
while chan.recv_ready():
data = chan.recv(1024)
if get_output:
output += data
recvd = True
if not recvd and chan.exit_status_ready():
break
output = output.rstrip(b'\r\n')
return chan.recv_exit_status(), output
finally:
server_err.done()
chan.close() | Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned. | Below is the the instruction that describes the task:
### Input:
Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned.
### Response:
def _call(self, cmd, get_output):
"""Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned.
"""
server_err = self.server_logger()
chan = self.get_client().get_transport().open_session()
try:
logger.debug("Invoking %r%s",
cmd, " (stdout)" if get_output else "")
chan.exec_command('/bin/sh -c %s' % shell_escape(cmd))
output = b''
while True:
r, w, e = select.select([chan], [], [])
if chan not in r:
continue # pragma: no cover
recvd = False
while chan.recv_stderr_ready():
data = chan.recv_stderr(1024)
server_err.append(data)
recvd = True
while chan.recv_ready():
data = chan.recv(1024)
if get_output:
output += data
recvd = True
if not recvd and chan.exit_status_ready():
break
output = output.rstrip(b'\r\n')
return chan.recv_exit_status(), output
finally:
server_err.done()
chan.close() |
def _from_jd_equinox(jd):
'''Calculate the FR day using the equinox as day 1'''
jd = trunc(jd) + 0.5
equinoxe = premier_da_la_annee(jd)
an = gregorian.from_jd(equinoxe)[0] - YEAR_EPOCH
mois = trunc((jd - equinoxe) / 30.) + 1
jour = int((jd - equinoxe) % 30) + 1
return (an, mois, jour) | Calculate the FR day using the equinox as day 1 | Below is the the instruction that describes the task:
### Input:
Calculate the FR day using the equinox as day 1
### Response:
def _from_jd_equinox(jd):
'''Calculate the FR day using the equinox as day 1'''
jd = trunc(jd) + 0.5
equinoxe = premier_da_la_annee(jd)
an = gregorian.from_jd(equinoxe)[0] - YEAR_EPOCH
mois = trunc((jd - equinoxe) / 30.) + 1
jour = int((jd - equinoxe) % 30) + 1
return (an, mois, jour) |
def BFS_Tree(G, start):
"""
Return an oriented tree constructed from bfs starting at 'start'.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = BFS(G, start)
T = digraph.DiGraph()
queue = Queue()
queue.put(start)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T | Return an oriented tree constructed from bfs starting at 'start'. | Below is the the instruction that describes the task:
### Input:
Return an oriented tree constructed from bfs starting at 'start'.
### Response:
def BFS_Tree(G, start):
"""
Return an oriented tree constructed from bfs starting at 'start'.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = BFS(G, start)
T = digraph.DiGraph()
queue = Queue()
queue.put(start)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T |
def iteritems(self):
""" Sort and then iterate the dictionary """
sorted_data = sorted(self.data.iteritems(), self.cmp, self.key,
self.reverse)
for k,v in sorted_data:
yield k,v | Sort and then iterate the dictionary | Below is the the instruction that describes the task:
### Input:
Sort and then iterate the dictionary
### Response:
def iteritems(self):
""" Sort and then iterate the dictionary """
sorted_data = sorted(self.data.iteritems(), self.cmp, self.key,
self.reverse)
for k,v in sorted_data:
yield k,v |
def Struct(fields): # pylint: disable=invalid-name
"""Construct a struct parameter type description protobuf.
:type fields: list of :class:`type_pb2.StructType.Field`
:param fields: the fields of the struct
:rtype: :class:`type_pb2.Type`
:returns: the appropriate struct-type protobuf
"""
return type_pb2.Type(
code=type_pb2.STRUCT, struct_type=type_pb2.StructType(fields=fields)
) | Construct a struct parameter type description protobuf.
:type fields: list of :class:`type_pb2.StructType.Field`
:param fields: the fields of the struct
:rtype: :class:`type_pb2.Type`
:returns: the appropriate struct-type protobuf | Below is the the instruction that describes the task:
### Input:
Construct a struct parameter type description protobuf.
:type fields: list of :class:`type_pb2.StructType.Field`
:param fields: the fields of the struct
:rtype: :class:`type_pb2.Type`
:returns: the appropriate struct-type protobuf
### Response:
def Struct(fields): # pylint: disable=invalid-name
"""Construct a struct parameter type description protobuf.
:type fields: list of :class:`type_pb2.StructType.Field`
:param fields: the fields of the struct
:rtype: :class:`type_pb2.Type`
:returns: the appropriate struct-type protobuf
"""
return type_pb2.Type(
code=type_pb2.STRUCT, struct_type=type_pb2.StructType(fields=fields)
) |
def get_data(self) -> bytes:
"""Return a compressed datablob representing this object.
To restore the object, use :func:`fints.client.NeedRetryResponse.from_data`.
"""
data = {
"_class_name": self.__class__.__name__,
"version": 1,
"segments_bin": SegmentSequence([self.command_seg, self.tan_request]).render_bytes(),
"resume_method": self.resume_method,
"tan_request_structured": self.tan_request_structured,
}
return compress_datablob(DATA_BLOB_MAGIC_RETRY, 1, data) | Return a compressed datablob representing this object.
To restore the object, use :func:`fints.client.NeedRetryResponse.from_data`. | Below is the the instruction that describes the task:
### Input:
Return a compressed datablob representing this object.
To restore the object, use :func:`fints.client.NeedRetryResponse.from_data`.
### Response:
def get_data(self) -> bytes:
"""Return a compressed datablob representing this object.
To restore the object, use :func:`fints.client.NeedRetryResponse.from_data`.
"""
data = {
"_class_name": self.__class__.__name__,
"version": 1,
"segments_bin": SegmentSequence([self.command_seg, self.tan_request]).render_bytes(),
"resume_method": self.resume_method,
"tan_request_structured": self.tan_request_structured,
}
return compress_datablob(DATA_BLOB_MAGIC_RETRY, 1, data) |
def get_submission(self, url=None, submission_id=None, comment_limit=0,
comment_sort=None, params=None):
"""Return a Submission object for the given url or submission_id.
:param comment_limit: The desired number of comments to fetch. If <= 0
fetch the default number for the session's user. If None, fetch the
maximum possible.
:param comment_sort: The sort order for retrieved comments. When None
use the default for the session's user.
:param params: Dictionary containing extra GET data to put in the url.
"""
if bool(url) == bool(submission_id):
raise TypeError('One (and only one) of id or url is required!')
if submission_id:
url = urljoin(self.config['comments'], submission_id)
return objects.Submission.from_url(self, url,
comment_limit=comment_limit,
comment_sort=comment_sort,
params=params) | Return a Submission object for the given url or submission_id.
:param comment_limit: The desired number of comments to fetch. If <= 0
fetch the default number for the session's user. If None, fetch the
maximum possible.
:param comment_sort: The sort order for retrieved comments. When None
use the default for the session's user.
:param params: Dictionary containing extra GET data to put in the url. | Below is the the instruction that describes the task:
### Input:
Return a Submission object for the given url or submission_id.
:param comment_limit: The desired number of comments to fetch. If <= 0
fetch the default number for the session's user. If None, fetch the
maximum possible.
:param comment_sort: The sort order for retrieved comments. When None
use the default for the session's user.
:param params: Dictionary containing extra GET data to put in the url.
### Response:
def get_submission(self, url=None, submission_id=None, comment_limit=0,
comment_sort=None, params=None):
"""Return a Submission object for the given url or submission_id.
:param comment_limit: The desired number of comments to fetch. If <= 0
fetch the default number for the session's user. If None, fetch the
maximum possible.
:param comment_sort: The sort order for retrieved comments. When None
use the default for the session's user.
:param params: Dictionary containing extra GET data to put in the url.
"""
if bool(url) == bool(submission_id):
raise TypeError('One (and only one) of id or url is required!')
if submission_id:
url = urljoin(self.config['comments'], submission_id)
return objects.Submission.from_url(self, url,
comment_limit=comment_limit,
comment_sort=comment_sort,
params=params) |
def register(self, new_outputs, *args, **kwargs):
"""
Register outputs and metadata.
* ``initial_value`` - used in dynamic calculations
* ``size`` - number of elements per timestep
* ``uncertainty`` - in percent of nominal value
* ``variance`` - dictionary of covariances, diagonal is square of
uncertianties, no units
* ``jacobian`` - dictionary of sensitivities dxi/dfj
* ``isconstant`` - ``True`` if constant, ``False`` if periodic
* ``isproperty`` - ``True`` if output stays at last value during
thresholds, ``False`` if reverts to initial value
* ``timeseries`` - name of corresponding time series output, ``None`` if
no time series
* ``output_source`` - name
:param new_outputs: new outputs to register.
"""
kwargs.update(zip(self.meta_names, args))
# call super method
super(OutputRegistry, self).register(new_outputs, **kwargs) | Register outputs and metadata.
* ``initial_value`` - used in dynamic calculations
* ``size`` - number of elements per timestep
* ``uncertainty`` - in percent of nominal value
* ``variance`` - dictionary of covariances, diagonal is square of
uncertianties, no units
* ``jacobian`` - dictionary of sensitivities dxi/dfj
* ``isconstant`` - ``True`` if constant, ``False`` if periodic
* ``isproperty`` - ``True`` if output stays at last value during
thresholds, ``False`` if reverts to initial value
* ``timeseries`` - name of corresponding time series output, ``None`` if
no time series
* ``output_source`` - name
:param new_outputs: new outputs to register. | Below is the the instruction that describes the task:
### Input:
Register outputs and metadata.
* ``initial_value`` - used in dynamic calculations
* ``size`` - number of elements per timestep
* ``uncertainty`` - in percent of nominal value
* ``variance`` - dictionary of covariances, diagonal is square of
uncertianties, no units
* ``jacobian`` - dictionary of sensitivities dxi/dfj
* ``isconstant`` - ``True`` if constant, ``False`` if periodic
* ``isproperty`` - ``True`` if output stays at last value during
thresholds, ``False`` if reverts to initial value
* ``timeseries`` - name of corresponding time series output, ``None`` if
no time series
* ``output_source`` - name
:param new_outputs: new outputs to register.
### Response:
def register(self, new_outputs, *args, **kwargs):
"""
Register outputs and metadata.
* ``initial_value`` - used in dynamic calculations
* ``size`` - number of elements per timestep
* ``uncertainty`` - in percent of nominal value
* ``variance`` - dictionary of covariances, diagonal is square of
uncertianties, no units
* ``jacobian`` - dictionary of sensitivities dxi/dfj
* ``isconstant`` - ``True`` if constant, ``False`` if periodic
* ``isproperty`` - ``True`` if output stays at last value during
thresholds, ``False`` if reverts to initial value
* ``timeseries`` - name of corresponding time series output, ``None`` if
no time series
* ``output_source`` - name
:param new_outputs: new outputs to register.
"""
kwargs.update(zip(self.meta_names, args))
# call super method
super(OutputRegistry, self).register(new_outputs, **kwargs) |
def handle_symbol_search(self, call_id, payload):
"""Handler for symbol search results"""
self.log.debug('handle_symbol_search: in %s', Pretty(payload))
syms = payload["syms"]
qfList = []
for sym in syms:
p = sym.get("pos")
if p:
item = self.editor.to_quickfix_item(str(p["file"]),
p["line"],
str(sym["name"]),
"info")
qfList.append(item)
self.editor.write_quickfix_list(qfList, "Symbol Search") | Handler for symbol search results | Below is the the instruction that describes the task:
### Input:
Handler for symbol search results
### Response:
def handle_symbol_search(self, call_id, payload):
"""Handler for symbol search results"""
self.log.debug('handle_symbol_search: in %s', Pretty(payload))
syms = payload["syms"]
qfList = []
for sym in syms:
p = sym.get("pos")
if p:
item = self.editor.to_quickfix_item(str(p["file"]),
p["line"],
str(sym["name"]),
"info")
qfList.append(item)
self.editor.write_quickfix_list(qfList, "Symbol Search") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.