code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def addDataset(self, dataset):
"""
Adds the given data set to this chart widget.
:param dataSet | <XChartDataset>
"""
self._datasets.append(dataset)
self._dataChanged = True
self._addDatasetAction(dataset) | Adds the given data set to this chart widget.
:param dataSet | <XChartDataset> | Below is the the instruction that describes the task:
### Input:
Adds the given data set to this chart widget.
:param dataSet | <XChartDataset>
### Response:
def addDataset(self, dataset):
"""
Adds the given data set to this chart widget.
:param dataSet | <XChartDataset>
"""
self._datasets.append(dataset)
self._dataChanged = True
self._addDatasetAction(dataset) |
def _GetCachedEntryDataTypeMap(
self, format_type, value_data, cached_entry_offset):
"""Determines the cached entry data type map.
Args:
format_type (int): format type.
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
dtfabric.DataTypeMap: data type map which contains a data type definition,
such as a structure, that can be mapped onto binary data or None
if the data type map is not defined.
Raises:
ParseError: if the cached entry data type map cannot be determined.
"""
if format_type not in self._SUPPORTED_FORMAT_TYPES:
raise errors.ParseError('Unsupported format type: {0:d}'.format(
format_type))
data_type_map_name = ''
if format_type == self._FORMAT_TYPE_XP:
data_type_map_name = 'appcompatcache_cached_entry_xp_32bit'
elif format_type in (self._FORMAT_TYPE_8, self._FORMAT_TYPE_10):
data_type_map_name = 'appcompatcache_cached_entry_header_8'
else:
cached_entry = self._ParseCommon2003CachedEntry(
value_data, cached_entry_offset)
# Assume the entry is 64-bit if the 32-bit path offset is 0 and
# the 64-bit path offset is set.
if (cached_entry.path_offset_32bit == 0 and
cached_entry.path_offset_64bit != 0):
number_of_bits = '64'
else:
number_of_bits = '32'
if format_type == self._FORMAT_TYPE_2003:
data_type_map_name = (
'appcompatcache_cached_entry_2003_{0:s}bit'.format(number_of_bits))
elif format_type == self._FORMAT_TYPE_VISTA:
data_type_map_name = (
'appcompatcache_cached_entry_vista_{0:s}bit'.format(number_of_bits))
elif format_type == self._FORMAT_TYPE_7:
data_type_map_name = (
'appcompatcache_cached_entry_7_{0:s}bit'.format(number_of_bits))
return self._GetDataTypeMap(data_type_map_name) | Determines the cached entry data type map.
Args:
format_type (int): format type.
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
dtfabric.DataTypeMap: data type map which contains a data type definition,
such as a structure, that can be mapped onto binary data or None
if the data type map is not defined.
Raises:
ParseError: if the cached entry data type map cannot be determined. | Below is the the instruction that describes the task:
### Input:
Determines the cached entry data type map.
Args:
format_type (int): format type.
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
dtfabric.DataTypeMap: data type map which contains a data type definition,
such as a structure, that can be mapped onto binary data or None
if the data type map is not defined.
Raises:
ParseError: if the cached entry data type map cannot be determined.
### Response:
def _GetCachedEntryDataTypeMap(
self, format_type, value_data, cached_entry_offset):
"""Determines the cached entry data type map.
Args:
format_type (int): format type.
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
dtfabric.DataTypeMap: data type map which contains a data type definition,
such as a structure, that can be mapped onto binary data or None
if the data type map is not defined.
Raises:
ParseError: if the cached entry data type map cannot be determined.
"""
if format_type not in self._SUPPORTED_FORMAT_TYPES:
raise errors.ParseError('Unsupported format type: {0:d}'.format(
format_type))
data_type_map_name = ''
if format_type == self._FORMAT_TYPE_XP:
data_type_map_name = 'appcompatcache_cached_entry_xp_32bit'
elif format_type in (self._FORMAT_TYPE_8, self._FORMAT_TYPE_10):
data_type_map_name = 'appcompatcache_cached_entry_header_8'
else:
cached_entry = self._ParseCommon2003CachedEntry(
value_data, cached_entry_offset)
# Assume the entry is 64-bit if the 32-bit path offset is 0 and
# the 64-bit path offset is set.
if (cached_entry.path_offset_32bit == 0 and
cached_entry.path_offset_64bit != 0):
number_of_bits = '64'
else:
number_of_bits = '32'
if format_type == self._FORMAT_TYPE_2003:
data_type_map_name = (
'appcompatcache_cached_entry_2003_{0:s}bit'.format(number_of_bits))
elif format_type == self._FORMAT_TYPE_VISTA:
data_type_map_name = (
'appcompatcache_cached_entry_vista_{0:s}bit'.format(number_of_bits))
elif format_type == self._FORMAT_TYPE_7:
data_type_map_name = (
'appcompatcache_cached_entry_7_{0:s}bit'.format(number_of_bits))
return self._GetDataTypeMap(data_type_map_name) |
def check_gsims(self, gsims):
"""
:param gsims: a sequence of GSIM instances
"""
imts = set(from_string(imt).name for imt in self.imtls)
for gsim in gsims:
restrict_imts = gsim.DEFINED_FOR_INTENSITY_MEASURE_TYPES
if restrict_imts:
names = set(cls.__name__ for cls in restrict_imts)
invalid_imts = ', '.join(imts - names)
if invalid_imts:
raise ValueError(
'The IMT %s is not accepted by the GSIM %s' %
(invalid_imts, gsim))
if 'site_model' not in self.inputs:
# look at the required sites parameters: they must have
# a valid value; the other parameters can keep a NaN
# value since they are not used by the calculator
for param in gsim.REQUIRES_SITES_PARAMETERS:
if param in ('lon', 'lat'): # no check
continue
param_name = self.siteparam[param]
param_value = getattr(self, param_name)
if (isinstance(param_value, float) and
numpy.isnan(param_value)):
raise ValueError(
'Please set a value for %r, this is required by '
'the GSIM %s' % (param_name, gsim)) | :param gsims: a sequence of GSIM instances | Below is the the instruction that describes the task:
### Input:
:param gsims: a sequence of GSIM instances
### Response:
def check_gsims(self, gsims):
"""
:param gsims: a sequence of GSIM instances
"""
imts = set(from_string(imt).name for imt in self.imtls)
for gsim in gsims:
restrict_imts = gsim.DEFINED_FOR_INTENSITY_MEASURE_TYPES
if restrict_imts:
names = set(cls.__name__ for cls in restrict_imts)
invalid_imts = ', '.join(imts - names)
if invalid_imts:
raise ValueError(
'The IMT %s is not accepted by the GSIM %s' %
(invalid_imts, gsim))
if 'site_model' not in self.inputs:
# look at the required sites parameters: they must have
# a valid value; the other parameters can keep a NaN
# value since they are not used by the calculator
for param in gsim.REQUIRES_SITES_PARAMETERS:
if param in ('lon', 'lat'): # no check
continue
param_name = self.siteparam[param]
param_value = getattr(self, param_name)
if (isinstance(param_value, float) and
numpy.isnan(param_value)):
raise ValueError(
'Please set a value for %r, this is required by '
'the GSIM %s' % (param_name, gsim)) |
def _find_conflict(
context, # type: ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
compared_fragments, # type: PairSet
parent_fields_are_mutually_exclusive, # type: bool
response_name, # type: str
field1, # type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]
field2, # type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]
):
# type: (...) -> Optional[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Determines if there is a conflict between two particular fields."""
parent_type1, ast1, def1 = field1
parent_type2, ast2, def2 = field2
# If it is known that two fields could not possibly apply at the same
# time, due to the parent types, then it is safe to permit them to diverge
# in aliased field or arguments used as they will not present any ambiguity
# by differing.
# It is known that two parent types could never overlap if they are
# different Object types. Interface or Union types might overlap - if not
# in the current state of the schema, then perhaps in some future version,
# thus may not safely diverge.
are_mutually_exclusive = parent_fields_are_mutually_exclusive or (
parent_type1 != parent_type2
and isinstance(parent_type1, GraphQLObjectType)
and isinstance(parent_type2, GraphQLObjectType)
)
# The return type for each field.
type1 = def1 and def1.type
type2 = def2 and def2.type
if not are_mutually_exclusive:
# Two aliases must refer to the same field.
name1 = ast1.name.value
name2 = ast2.name.value
if name1 != name2:
return (
(response_name, "{} and {} are different fields".format(name1, name2)),
[ast1],
[ast2],
)
# Two field calls must have the same arguments.
if not _same_arguments(ast1.arguments, ast2.arguments):
return ((response_name, "they have differing arguments"), [ast1], [ast2])
if type1 and type2 and do_types_conflict(type1, type2):
return (
(
response_name,
"they return conflicting types {} and {}".format(type1, type2),
),
[ast1],
[ast2],
)
# Collect and compare sub-fields. Use the same "visited fragment names" list
# for both collections so fields in a fragment reference are never
# compared to themselves.
selection_set1 = ast1.selection_set
selection_set2 = ast2.selection_set
if selection_set1 and selection_set2:
conflicts = _find_conflicts_between_sub_selection_sets( # type: ignore
context,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
get_named_type(type1), # type: ignore
selection_set1,
get_named_type(type2), # type: ignore
selection_set2,
)
return _subfield_conflicts(conflicts, response_name, ast1, ast2)
return None | Determines if there is a conflict between two particular fields. | Below is the the instruction that describes the task:
### Input:
Determines if there is a conflict between two particular fields.
### Response:
def _find_conflict(
context, # type: ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
compared_fragments, # type: PairSet
parent_fields_are_mutually_exclusive, # type: bool
response_name, # type: str
field1, # type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]
field2, # type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]
):
# type: (...) -> Optional[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Determines if there is a conflict between two particular fields."""
parent_type1, ast1, def1 = field1
parent_type2, ast2, def2 = field2
# If it is known that two fields could not possibly apply at the same
# time, due to the parent types, then it is safe to permit them to diverge
# in aliased field or arguments used as they will not present any ambiguity
# by differing.
# It is known that two parent types could never overlap if they are
# different Object types. Interface or Union types might overlap - if not
# in the current state of the schema, then perhaps in some future version,
# thus may not safely diverge.
are_mutually_exclusive = parent_fields_are_mutually_exclusive or (
parent_type1 != parent_type2
and isinstance(parent_type1, GraphQLObjectType)
and isinstance(parent_type2, GraphQLObjectType)
)
# The return type for each field.
type1 = def1 and def1.type
type2 = def2 and def2.type
if not are_mutually_exclusive:
# Two aliases must refer to the same field.
name1 = ast1.name.value
name2 = ast2.name.value
if name1 != name2:
return (
(response_name, "{} and {} are different fields".format(name1, name2)),
[ast1],
[ast2],
)
# Two field calls must have the same arguments.
if not _same_arguments(ast1.arguments, ast2.arguments):
return ((response_name, "they have differing arguments"), [ast1], [ast2])
if type1 and type2 and do_types_conflict(type1, type2):
return (
(
response_name,
"they return conflicting types {} and {}".format(type1, type2),
),
[ast1],
[ast2],
)
# Collect and compare sub-fields. Use the same "visited fragment names" list
# for both collections so fields in a fragment reference are never
# compared to themselves.
selection_set1 = ast1.selection_set
selection_set2 = ast2.selection_set
if selection_set1 and selection_set2:
conflicts = _find_conflicts_between_sub_selection_sets( # type: ignore
context,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
get_named_type(type1), # type: ignore
selection_set1,
get_named_type(type2), # type: ignore
selection_set2,
)
return _subfield_conflicts(conflicts, response_name, ast1, ast2)
return None |
def length_longest_path(input):
"""
:type input: str
:rtype: int
"""
curr_len, max_len = 0, 0 # running length and max length
stack = [] # keep track of the name length
for s in input.split('\n'):
print("---------")
print("<path>:", s)
depth = s.count('\t') # the depth of current dir or file
print("depth: ", depth)
print("stack: ", stack)
print("curlen: ", curr_len)
while len(stack) > depth: # go back to the correct depth
curr_len -= stack.pop()
stack.append(len(s.strip('\t'))+1) # 1 is the length of '/'
curr_len += stack[-1] # increase current length
print("stack: ", stack)
print("curlen: ", curr_len)
if '.' in s: # update maxlen only when it is a file
max_len = max(max_len, curr_len-1) # -1 is to minus one '/'
return max_len | :type input: str
:rtype: int | Below is the the instruction that describes the task:
### Input:
:type input: str
:rtype: int
### Response:
def length_longest_path(input):
"""
:type input: str
:rtype: int
"""
curr_len, max_len = 0, 0 # running length and max length
stack = [] # keep track of the name length
for s in input.split('\n'):
print("---------")
print("<path>:", s)
depth = s.count('\t') # the depth of current dir or file
print("depth: ", depth)
print("stack: ", stack)
print("curlen: ", curr_len)
while len(stack) > depth: # go back to the correct depth
curr_len -= stack.pop()
stack.append(len(s.strip('\t'))+1) # 1 is the length of '/'
curr_len += stack[-1] # increase current length
print("stack: ", stack)
print("curlen: ", curr_len)
if '.' in s: # update maxlen only when it is a file
max_len = max(max_len, curr_len-1) # -1 is to minus one '/'
return max_len |
def load(self):
"""Load proxy list from configured proxy source"""
self._list = self._source.load()
self._list_iter = itertools.cycle(self._list) | Load proxy list from configured proxy source | Below is the the instruction that describes the task:
### Input:
Load proxy list from configured proxy source
### Response:
def load(self):
"""Load proxy list from configured proxy source"""
self._list = self._source.load()
self._list_iter = itertools.cycle(self._list) |
def trigger_replication_schedule(self, schedule_id, dry_run=False):
"""
Trigger replication immediately. Start and end dates on the schedule will be
ignored.
@param schedule_id: The id of the schedule to trigger.
@param dry_run: Whether to execute a dry run.
@return: The command corresponding to the replication job.
@since: API v3
"""
return self._post("replications/%s/run" % schedule_id, ApiCommand,
params=dict(dryRun=dry_run),
api_version=3) | Trigger replication immediately. Start and end dates on the schedule will be
ignored.
@param schedule_id: The id of the schedule to trigger.
@param dry_run: Whether to execute a dry run.
@return: The command corresponding to the replication job.
@since: API v3 | Below is the the instruction that describes the task:
### Input:
Trigger replication immediately. Start and end dates on the schedule will be
ignored.
@param schedule_id: The id of the schedule to trigger.
@param dry_run: Whether to execute a dry run.
@return: The command corresponding to the replication job.
@since: API v3
### Response:
def trigger_replication_schedule(self, schedule_id, dry_run=False):
"""
Trigger replication immediately. Start and end dates on the schedule will be
ignored.
@param schedule_id: The id of the schedule to trigger.
@param dry_run: Whether to execute a dry run.
@return: The command corresponding to the replication job.
@since: API v3
"""
return self._post("replications/%s/run" % schedule_id, ApiCommand,
params=dict(dryRun=dry_run),
api_version=3) |
def attachmethod(target):
'''
Reference: https://blog.tonyseek.com/post/open-class-in-python/
class Spam(object):
pass
@attach_method(Spam)
def egg1(self, name):
print((self, name))
spam1 = Spam()
# OpenClass 加入的方法 egg1 可用
spam1.egg1("Test1")
# 输出Test1
'''
if isinstance(target, type):
def decorator(func):
setattr(target, func.__name__, func)
else:
def decorator(func):
setattr(target, func.__name__, partial(func, target))
return decorator | Reference: https://blog.tonyseek.com/post/open-class-in-python/
class Spam(object):
pass
@attach_method(Spam)
def egg1(self, name):
print((self, name))
spam1 = Spam()
# OpenClass 加入的方法 egg1 可用
spam1.egg1("Test1")
# 输出Test1 | Below is the the instruction that describes the task:
### Input:
Reference: https://blog.tonyseek.com/post/open-class-in-python/
class Spam(object):
pass
@attach_method(Spam)
def egg1(self, name):
print((self, name))
spam1 = Spam()
# OpenClass 加入的方法 egg1 可用
spam1.egg1("Test1")
# 输出Test1
### Response:
def attachmethod(target):
'''
Reference: https://blog.tonyseek.com/post/open-class-in-python/
class Spam(object):
pass
@attach_method(Spam)
def egg1(self, name):
print((self, name))
spam1 = Spam()
# OpenClass 加入的方法 egg1 可用
spam1.egg1("Test1")
# 输出Test1
'''
if isinstance(target, type):
def decorator(func):
setattr(target, func.__name__, func)
else:
def decorator(func):
setattr(target, func.__name__, partial(func, target))
return decorator |
def execute_rolling_restart(
brokers,
jolokia_port,
jolokia_prefix,
check_interval,
check_count,
unhealthy_time_limit,
skip,
verbose,
pre_stop_task,
post_stop_task,
start_command,
stop_command,
ssh_password=None
):
"""Execute the rolling restart on the specified brokers. It checks the
number of under replicated partitions on each broker, using Jolokia.
The check is performed at constant intervals, and a broker will be restarted
when all the brokers are answering and are reporting zero under replicated
partitions.
:param brokers: the brokers that will be restarted
:type brokers: map of broker ids and host names
:param jolokia_port: HTTP port for Jolokia
:type jolokia_port: integer
:param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
:type jolokia_prefix: string
:param check_interval: the number of seconds it will wait between each check
:type check_interval: integer
:param check_count: the number of times the check should be positive before
restarting the next broker
:type check_count: integer
:param unhealthy_time_limit: the maximum number of seconds it will wait for
the cluster to become stable before exiting with error
:type unhealthy_time_limit: integer
:param skip: the number of brokers to skip
:type skip: integer
:param verbose: print commend execution information
:type verbose: bool
:param pre_stop_task: a list of tasks to execute before running stop
:type pre_stop_task: list
:param post_stop_task: a list of task to execute after running stop
:type post_stop_task: list
:param start_command: the start command for kafka
:type start_command: string
:param stop_command: the stop command for kafka
:type stop_command: string
:param ssh_password: The ssh password to use if needed
:type ssh_password: string
"""
all_hosts = [b[1] for b in brokers]
for n, host in enumerate(all_hosts[skip:]):
with ssh(host=host, forward_agent=True, sudoable=True, max_attempts=3, max_timeout=2,
ssh_password=ssh_password) as connection:
execute_task(pre_stop_task, host)
wait_for_stable_cluster(
all_hosts,
jolokia_port,
jolokia_prefix,
check_interval,
1 if n == 0 else check_count,
unhealthy_time_limit,
)
print("Stopping {0} ({1}/{2})".format(host, n + 1, len(all_hosts) - skip))
stop_broker(host, connection, stop_command, verbose)
execute_task(post_stop_task, host)
# we open a new SSH connection in case the hostname has a new IP
with ssh(host=host, forward_agent=True, sudoable=True, max_attempts=3, max_timeout=2,
ssh_password=ssh_password) as connection:
print("Starting {0} ({1}/{2})".format(host, n + 1, len(all_hosts) - skip))
start_broker(host, connection, start_command, verbose)
# Wait before terminating the script
wait_for_stable_cluster(
all_hosts,
jolokia_port,
jolokia_prefix,
check_interval,
check_count,
unhealthy_time_limit,
) | Execute the rolling restart on the specified brokers. It checks the
number of under replicated partitions on each broker, using Jolokia.
The check is performed at constant intervals, and a broker will be restarted
when all the brokers are answering and are reporting zero under replicated
partitions.
:param brokers: the brokers that will be restarted
:type brokers: map of broker ids and host names
:param jolokia_port: HTTP port for Jolokia
:type jolokia_port: integer
:param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
:type jolokia_prefix: string
:param check_interval: the number of seconds it will wait between each check
:type check_interval: integer
:param check_count: the number of times the check should be positive before
restarting the next broker
:type check_count: integer
:param unhealthy_time_limit: the maximum number of seconds it will wait for
the cluster to become stable before exiting with error
:type unhealthy_time_limit: integer
:param skip: the number of brokers to skip
:type skip: integer
:param verbose: print commend execution information
:type verbose: bool
:param pre_stop_task: a list of tasks to execute before running stop
:type pre_stop_task: list
:param post_stop_task: a list of task to execute after running stop
:type post_stop_task: list
:param start_command: the start command for kafka
:type start_command: string
:param stop_command: the stop command for kafka
:type stop_command: string
:param ssh_password: The ssh password to use if needed
:type ssh_password: string | Below is the the instruction that describes the task:
### Input:
Execute the rolling restart on the specified brokers. It checks the
number of under replicated partitions on each broker, using Jolokia.
The check is performed at constant intervals, and a broker will be restarted
when all the brokers are answering and are reporting zero under replicated
partitions.
:param brokers: the brokers that will be restarted
:type brokers: map of broker ids and host names
:param jolokia_port: HTTP port for Jolokia
:type jolokia_port: integer
:param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
:type jolokia_prefix: string
:param check_interval: the number of seconds it will wait between each check
:type check_interval: integer
:param check_count: the number of times the check should be positive before
restarting the next broker
:type check_count: integer
:param unhealthy_time_limit: the maximum number of seconds it will wait for
the cluster to become stable before exiting with error
:type unhealthy_time_limit: integer
:param skip: the number of brokers to skip
:type skip: integer
:param verbose: print commend execution information
:type verbose: bool
:param pre_stop_task: a list of tasks to execute before running stop
:type pre_stop_task: list
:param post_stop_task: a list of task to execute after running stop
:type post_stop_task: list
:param start_command: the start command for kafka
:type start_command: string
:param stop_command: the stop command for kafka
:type stop_command: string
:param ssh_password: The ssh password to use if needed
:type ssh_password: string
### Response:
def execute_rolling_restart(
brokers,
jolokia_port,
jolokia_prefix,
check_interval,
check_count,
unhealthy_time_limit,
skip,
verbose,
pre_stop_task,
post_stop_task,
start_command,
stop_command,
ssh_password=None
):
"""Execute the rolling restart on the specified brokers. It checks the
number of under replicated partitions on each broker, using Jolokia.
The check is performed at constant intervals, and a broker will be restarted
when all the brokers are answering and are reporting zero under replicated
partitions.
:param brokers: the brokers that will be restarted
:type brokers: map of broker ids and host names
:param jolokia_port: HTTP port for Jolokia
:type jolokia_port: integer
:param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
:type jolokia_prefix: string
:param check_interval: the number of seconds it will wait between each check
:type check_interval: integer
:param check_count: the number of times the check should be positive before
restarting the next broker
:type check_count: integer
:param unhealthy_time_limit: the maximum number of seconds it will wait for
the cluster to become stable before exiting with error
:type unhealthy_time_limit: integer
:param skip: the number of brokers to skip
:type skip: integer
:param verbose: print commend execution information
:type verbose: bool
:param pre_stop_task: a list of tasks to execute before running stop
:type pre_stop_task: list
:param post_stop_task: a list of task to execute after running stop
:type post_stop_task: list
:param start_command: the start command for kafka
:type start_command: string
:param stop_command: the stop command for kafka
:type stop_command: string
:param ssh_password: The ssh password to use if needed
:type ssh_password: string
"""
all_hosts = [b[1] for b in brokers]
for n, host in enumerate(all_hosts[skip:]):
with ssh(host=host, forward_agent=True, sudoable=True, max_attempts=3, max_timeout=2,
ssh_password=ssh_password) as connection:
execute_task(pre_stop_task, host)
wait_for_stable_cluster(
all_hosts,
jolokia_port,
jolokia_prefix,
check_interval,
1 if n == 0 else check_count,
unhealthy_time_limit,
)
print("Stopping {0} ({1}/{2})".format(host, n + 1, len(all_hosts) - skip))
stop_broker(host, connection, stop_command, verbose)
execute_task(post_stop_task, host)
# we open a new SSH connection in case the hostname has a new IP
with ssh(host=host, forward_agent=True, sudoable=True, max_attempts=3, max_timeout=2,
ssh_password=ssh_password) as connection:
print("Starting {0} ({1}/{2})".format(host, n + 1, len(all_hosts) - skip))
start_broker(host, connection, start_command, verbose)
# Wait before terminating the script
wait_for_stable_cluster(
all_hosts,
jolokia_port,
jolokia_prefix,
check_interval,
check_count,
unhealthy_time_limit,
) |
def _set_filter_change_update_delay(self, v, load=False):
"""
Setter method for filter_change_update_delay, mapped from YANG variable /rbridge_id/filter_change_update_delay (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_filter_change_update_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_filter_change_update_delay() directly.
YANG Description: Change filter change update delay timer
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("filter_delay_value",filter_change_update_delay.filter_change_update_delay, yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='filter-delay-value', extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}), is_container='list', yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """filter_change_update_delay must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("filter_delay_value",filter_change_update_delay.filter_change_update_delay, yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='filter-delay-value', extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}), is_container='list', yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)""",
})
self.__filter_change_update_delay = t
if hasattr(self, '_set'):
self._set() | Setter method for filter_change_update_delay, mapped from YANG variable /rbridge_id/filter_change_update_delay (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_filter_change_update_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_filter_change_update_delay() directly.
YANG Description: Change filter change update delay timer | Below is the the instruction that describes the task:
### Input:
Setter method for filter_change_update_delay, mapped from YANG variable /rbridge_id/filter_change_update_delay (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_filter_change_update_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_filter_change_update_delay() directly.
YANG Description: Change filter change update delay timer
### Response:
def _set_filter_change_update_delay(self, v, load=False):
"""
Setter method for filter_change_update_delay, mapped from YANG variable /rbridge_id/filter_change_update_delay (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_filter_change_update_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_filter_change_update_delay() directly.
YANG Description: Change filter change update delay timer
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("filter_delay_value",filter_change_update_delay.filter_change_update_delay, yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='filter-delay-value', extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}), is_container='list', yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """filter_change_update_delay must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("filter_delay_value",filter_change_update_delay.filter_change_update_delay, yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='filter-delay-value', extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}), is_container='list', yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)""",
})
self.__filter_change_update_delay = t
if hasattr(self, '_set'):
self._set() |
def get_params(self, url):
"""
Extract the named parameters from a url regex. If the url regex does not contain
named parameters, they will be keyed _0, _1, ...
* Named parameters
Regex:
/photos/^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<object_id>\d+)/
URL:
http://www2.ljworld.com/photos/2009/oct/11/12345/
Return Value:
{u'day': '11', u'month': 'oct', u'object_id': '12345', u'year': '2009'}
* Unnamed parameters
Regex:
/blah/([\w-]+)/(\d+)/
URL:
http://www.example.com/blah/hello/123/
Return Value:
{u'_0': 'hello', u'_1': '123'}
"""
match = re.match(self.regex, url)
if match is not None:
params = match.groupdict()
if not params:
params = {}
for i, group in enumerate(match.groups()[1:]):
params['_%s' % i] = group
return params
raise OEmbedException('No regex matched the url %s' % (url)) | Extract the named parameters from a url regex. If the url regex does not contain
named parameters, they will be keyed _0, _1, ...
* Named parameters
Regex:
/photos/^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<object_id>\d+)/
URL:
http://www2.ljworld.com/photos/2009/oct/11/12345/
Return Value:
{u'day': '11', u'month': 'oct', u'object_id': '12345', u'year': '2009'}
* Unnamed parameters
Regex:
/blah/([\w-]+)/(\d+)/
URL:
http://www.example.com/blah/hello/123/
Return Value:
{u'_0': 'hello', u'_1': '123'} | Below is the the instruction that describes the task:
### Input:
Extract the named parameters from a url regex. If the url regex does not contain
named parameters, they will be keyed _0, _1, ...
* Named parameters
Regex:
/photos/^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<object_id>\d+)/
URL:
http://www2.ljworld.com/photos/2009/oct/11/12345/
Return Value:
{u'day': '11', u'month': 'oct', u'object_id': '12345', u'year': '2009'}
* Unnamed parameters
Regex:
/blah/([\w-]+)/(\d+)/
URL:
http://www.example.com/blah/hello/123/
Return Value:
{u'_0': 'hello', u'_1': '123'}
### Response:
def get_params(self, url):
"""
Extract the named parameters from a url regex. If the url regex does not contain
named parameters, they will be keyed _0, _1, ...
* Named parameters
Regex:
/photos/^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<object_id>\d+)/
URL:
http://www2.ljworld.com/photos/2009/oct/11/12345/
Return Value:
{u'day': '11', u'month': 'oct', u'object_id': '12345', u'year': '2009'}
* Unnamed parameters
Regex:
/blah/([\w-]+)/(\d+)/
URL:
http://www.example.com/blah/hello/123/
Return Value:
{u'_0': 'hello', u'_1': '123'}
"""
match = re.match(self.regex, url)
if match is not None:
params = match.groupdict()
if not params:
params = {}
for i, group in enumerate(match.groups()[1:]):
params['_%s' % i] = group
return params
raise OEmbedException('No regex matched the url %s' % (url)) |
def sample(problem, N, num_levels=4, optimal_trajectories=None,
local_optimization=True):
"""Generate model inputs using the Method of Morris
Returns a NumPy matrix containing the model inputs required for Method of
Morris. The resulting matrix has :math:`(G+1)*T` rows and :math:`D`
columns, where :math:`D` is the number of parameters, :math:`G` is the
number of groups (if no groups are selected, the number of parameters).
:math:`T` is the number of trajectories :math:`N`,
or `optimal_trajectories` if selected.
These model inputs are intended to be used with
:func:`SALib.analyze.morris.analyze`.
Parameters
----------
problem : dict
The problem definition
N : int
The number of trajectories to generate
num_levels : int, default=4
The number of grid levels
optimal_trajectories : int
The number of optimal trajectories to sample (between 2 and N)
local_optimization : bool, default=True
Flag whether to use local optimization according to Ruano et al. (2012)
Speeds up the process tremendously for bigger N and num_levels.
If set to ``False`` brute force method is used, unless ``gurobipy`` is
available
Returns
-------
sample : numpy.ndarray
Returns a numpy.ndarray containing the model inputs required for Method
of Morris. The resulting matrix has :math:`(G/D+1)*N/T` rows and
:math:`D` columns, where :math:`D` is the number of parameters.
"""
if problem.get('groups'):
sample = _sample_groups(problem, N, num_levels)
else:
sample = _sample_oat(problem, N, num_levels)
if optimal_trajectories:
sample = _compute_optimised_trajectories(problem,
sample,
N,
optimal_trajectories,
local_optimization)
scale_samples(sample, problem['bounds'])
return sample | Generate model inputs using the Method of Morris
Returns a NumPy matrix containing the model inputs required for Method of
Morris. The resulting matrix has :math:`(G+1)*T` rows and :math:`D`
columns, where :math:`D` is the number of parameters, :math:`G` is the
number of groups (if no groups are selected, the number of parameters).
:math:`T` is the number of trajectories :math:`N`,
or `optimal_trajectories` if selected.
These model inputs are intended to be used with
:func:`SALib.analyze.morris.analyze`.
Parameters
----------
problem : dict
The problem definition
N : int
The number of trajectories to generate
num_levels : int, default=4
The number of grid levels
optimal_trajectories : int
The number of optimal trajectories to sample (between 2 and N)
local_optimization : bool, default=True
Flag whether to use local optimization according to Ruano et al. (2012)
Speeds up the process tremendously for bigger N and num_levels.
If set to ``False`` brute force method is used, unless ``gurobipy`` is
available
Returns
-------
sample : numpy.ndarray
Returns a numpy.ndarray containing the model inputs required for Method
of Morris. The resulting matrix has :math:`(G/D+1)*N/T` rows and
:math:`D` columns, where :math:`D` is the number of parameters. | Below is the the instruction that describes the task:
### Input:
Generate model inputs using the Method of Morris
Returns a NumPy matrix containing the model inputs required for Method of
Morris. The resulting matrix has :math:`(G+1)*T` rows and :math:`D`
columns, where :math:`D` is the number of parameters, :math:`G` is the
number of groups (if no groups are selected, the number of parameters).
:math:`T` is the number of trajectories :math:`N`,
or `optimal_trajectories` if selected.
These model inputs are intended to be used with
:func:`SALib.analyze.morris.analyze`.
Parameters
----------
problem : dict
The problem definition
N : int
The number of trajectories to generate
num_levels : int, default=4
The number of grid levels
optimal_trajectories : int
The number of optimal trajectories to sample (between 2 and N)
local_optimization : bool, default=True
Flag whether to use local optimization according to Ruano et al. (2012)
Speeds up the process tremendously for bigger N and num_levels.
If set to ``False`` brute force method is used, unless ``gurobipy`` is
available
Returns
-------
sample : numpy.ndarray
Returns a numpy.ndarray containing the model inputs required for Method
of Morris. The resulting matrix has :math:`(G/D+1)*N/T` rows and
:math:`D` columns, where :math:`D` is the number of parameters.
### Response:
def sample(problem, N, num_levels=4, optimal_trajectories=None,
local_optimization=True):
"""Generate model inputs using the Method of Morris
Returns a NumPy matrix containing the model inputs required for Method of
Morris. The resulting matrix has :math:`(G+1)*T` rows and :math:`D`
columns, where :math:`D` is the number of parameters, :math:`G` is the
number of groups (if no groups are selected, the number of parameters).
:math:`T` is the number of trajectories :math:`N`,
or `optimal_trajectories` if selected.
These model inputs are intended to be used with
:func:`SALib.analyze.morris.analyze`.
Parameters
----------
problem : dict
The problem definition
N : int
The number of trajectories to generate
num_levels : int, default=4
The number of grid levels
optimal_trajectories : int
The number of optimal trajectories to sample (between 2 and N)
local_optimization : bool, default=True
Flag whether to use local optimization according to Ruano et al. (2012)
Speeds up the process tremendously for bigger N and num_levels.
If set to ``False`` brute force method is used, unless ``gurobipy`` is
available
Returns
-------
sample : numpy.ndarray
Returns a numpy.ndarray containing the model inputs required for Method
of Morris. The resulting matrix has :math:`(G/D+1)*N/T` rows and
:math:`D` columns, where :math:`D` is the number of parameters.
"""
if problem.get('groups'):
sample = _sample_groups(problem, N, num_levels)
else:
sample = _sample_oat(problem, N, num_levels)
if optimal_trajectories:
sample = _compute_optimised_trajectories(problem,
sample,
N,
optimal_trajectories,
local_optimization)
scale_samples(sample, problem['bounds'])
return sample |
def immediateAssignmentExtended(StartingTime_presence=0):
"""IMMEDIATE ASSIGNMENT EXTENDED Section 9.1.19"""
a = L2PseudoLength()
b = TpPd(pd=0x6)
c = MessageType(mesType=0x39) # 00111001
d = PageModeAndSpareHalfOctets()
f = ChannelDescription()
g = RequestReference()
h = TimingAdvance()
i = MobileAllocation()
packet = a / b / c / d / f / g / h / i
if StartingTime_presence is 1:
j = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
packet = packet / j
k = IaxRestOctets()
packet = packet / k
return packet | IMMEDIATE ASSIGNMENT EXTENDED Section 9.1.19 | Below is the the instruction that describes the task:
### Input:
IMMEDIATE ASSIGNMENT EXTENDED Section 9.1.19
### Response:
def immediateAssignmentExtended(StartingTime_presence=0):
"""IMMEDIATE ASSIGNMENT EXTENDED Section 9.1.19"""
a = L2PseudoLength()
b = TpPd(pd=0x6)
c = MessageType(mesType=0x39) # 00111001
d = PageModeAndSpareHalfOctets()
f = ChannelDescription()
g = RequestReference()
h = TimingAdvance()
i = MobileAllocation()
packet = a / b / c / d / f / g / h / i
if StartingTime_presence is 1:
j = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
packet = packet / j
k = IaxRestOctets()
packet = packet / k
return packet |
def _load_github_hooks(github_url='https://api.github.com'):
"""Request GitHub's IP block from their API.
Return the IP network.
If we detect a rate-limit error, raise an error message stating when
the rate limit will reset.
If something else goes wrong, raise a generic 503.
"""
try:
resp = requests.get(github_url + '/meta')
if resp.status_code == 200:
return resp.json()['hooks']
else:
if resp.headers.get('X-RateLimit-Remaining') == '0':
reset_ts = int(resp.headers['X-RateLimit-Reset'])
reset_string = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(reset_ts))
raise ServiceUnavailable('Rate limited from GitHub until ' +
reset_string)
else:
raise ServiceUnavailable('Error reaching GitHub')
except (KeyError, ValueError, requests.exceptions.ConnectionError):
raise ServiceUnavailable('Error reaching GitHub') | Request GitHub's IP block from their API.
Return the IP network.
If we detect a rate-limit error, raise an error message stating when
the rate limit will reset.
If something else goes wrong, raise a generic 503. | Below is the the instruction that describes the task:
### Input:
Request GitHub's IP block from their API.
Return the IP network.
If we detect a rate-limit error, raise an error message stating when
the rate limit will reset.
If something else goes wrong, raise a generic 503.
### Response:
def _load_github_hooks(github_url='https://api.github.com'):
"""Request GitHub's IP block from their API.
Return the IP network.
If we detect a rate-limit error, raise an error message stating when
the rate limit will reset.
If something else goes wrong, raise a generic 503.
"""
try:
resp = requests.get(github_url + '/meta')
if resp.status_code == 200:
return resp.json()['hooks']
else:
if resp.headers.get('X-RateLimit-Remaining') == '0':
reset_ts = int(resp.headers['X-RateLimit-Reset'])
reset_string = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(reset_ts))
raise ServiceUnavailable('Rate limited from GitHub until ' +
reset_string)
else:
raise ServiceUnavailable('Error reaching GitHub')
except (KeyError, ValueError, requests.exceptions.ConnectionError):
raise ServiceUnavailable('Error reaching GitHub') |
def calc_digest(origin, algorithm="sha1", block_size=None):
"""Calculate digest of a readable object
Args:
origin -- a readable object for which calculate digest
algorithn -- the algorithm to use. See ``hashlib.algorithms_available`` for supported algorithms.
block_size -- the size of the block to read at each iteration
"""
try:
hashM = hashlib.new(algorithm)
except ValueError:
raise ValueError('hash algorithm not supported by the underlying platform: "{0}"'.format(algorithm))
while True:
chunk = origin.read(block_size) if block_size else origin.read()
if not chunk:
break
hashM.update(chunk)
return hashM.hexdigest() | Calculate digest of a readable object
Args:
origin -- a readable object for which calculate digest
algorithn -- the algorithm to use. See ``hashlib.algorithms_available`` for supported algorithms.
block_size -- the size of the block to read at each iteration | Below is the the instruction that describes the task:
### Input:
Calculate digest of a readable object
Args:
origin -- a readable object for which calculate digest
algorithn -- the algorithm to use. See ``hashlib.algorithms_available`` for supported algorithms.
block_size -- the size of the block to read at each iteration
### Response:
def calc_digest(origin, algorithm="sha1", block_size=None):
"""Calculate digest of a readable object
Args:
origin -- a readable object for which calculate digest
algorithn -- the algorithm to use. See ``hashlib.algorithms_available`` for supported algorithms.
block_size -- the size of the block to read at each iteration
"""
try:
hashM = hashlib.new(algorithm)
except ValueError:
raise ValueError('hash algorithm not supported by the underlying platform: "{0}"'.format(algorithm))
while True:
chunk = origin.read(block_size) if block_size else origin.read()
if not chunk:
break
hashM.update(chunk)
return hashM.hexdigest() |
def apply(d, leaf_key, func, new_name=None, remove_lkey=True,
list_of_dicts=False, unflatten_level=0, deepcopy=True, **kwargs):
""" apply a function to all values with a certain leaf (terminal) key
Parameters
----------
d : dict
leaf_key : str
name of leaf key
func : callable
function to apply
new_name : str
if not None, rename leaf_key
remove_lkey: bool
whether to remove original leaf_key (if new_name is not None)
list_of_dicts: bool
treat list of dicts as additional branches
unflatten_level : int or None
the number of levels to leave unflattened before combining,
for instance if you need dicts as inputs
deepcopy: bool
deepcopy values
kwargs : dict
additional keywords to parse to function
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':1}
>>> func = lambda x: x+1
>>> pprint(apply(d,'a',func))
{'a': 2, 'b': 1}
>>> pprint(apply(d,'a',func,new_name='c'))
{'b': 1, 'c': 2}
>>> pprint(apply(d,'a',func,new_name='c', remove_lkey=False))
{'a': 1, 'b': 1, 'c': 2}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(apply(test_dict, "b", lambda x: x[-1], list_of_dicts=True, unflatten_level=2))
{'a': [{'b': {'e': 3, 'f': 4}}, {'b': {'e': 7, 'f': 8}}]}
""" # noqa: E501
list_of_dicts = '__list__' if list_of_dicts else None
if unflatten_level == 0:
flatd = flatten(d, list_of_dicts=list_of_dicts)
else:
flatd = flattennd(d, unflatten_level, list_of_dicts=list_of_dicts)
newd = {k: (func(v, **kwargs) if k[-1] == leaf_key else v)
for k, v in flatd.items()}
if new_name is not None:
newd = {(tuple(list(k[:-1]) + [new_name]) if k[-1]
== leaf_key else k): v for k, v in newd.items()}
if not remove_lkey:
newd.update(flatd)
return unflatten(newd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | apply a function to all values with a certain leaf (terminal) key
Parameters
----------
d : dict
leaf_key : str
name of leaf key
func : callable
function to apply
new_name : str
if not None, rename leaf_key
remove_lkey: bool
whether to remove original leaf_key (if new_name is not None)
list_of_dicts: bool
treat list of dicts as additional branches
unflatten_level : int or None
the number of levels to leave unflattened before combining,
for instance if you need dicts as inputs
deepcopy: bool
deepcopy values
kwargs : dict
additional keywords to parse to function
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':1}
>>> func = lambda x: x+1
>>> pprint(apply(d,'a',func))
{'a': 2, 'b': 1}
>>> pprint(apply(d,'a',func,new_name='c'))
{'b': 1, 'c': 2}
>>> pprint(apply(d,'a',func,new_name='c', remove_lkey=False))
{'a': 1, 'b': 1, 'c': 2}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(apply(test_dict, "b", lambda x: x[-1], list_of_dicts=True, unflatten_level=2))
{'a': [{'b': {'e': 3, 'f': 4}}, {'b': {'e': 7, 'f': 8}}]} | Below is the the instruction that describes the task:
### Input:
apply a function to all values with a certain leaf (terminal) key
Parameters
----------
d : dict
leaf_key : str
name of leaf key
func : callable
function to apply
new_name : str
if not None, rename leaf_key
remove_lkey: bool
whether to remove original leaf_key (if new_name is not None)
list_of_dicts: bool
treat list of dicts as additional branches
unflatten_level : int or None
the number of levels to leave unflattened before combining,
for instance if you need dicts as inputs
deepcopy: bool
deepcopy values
kwargs : dict
additional keywords to parse to function
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':1}
>>> func = lambda x: x+1
>>> pprint(apply(d,'a',func))
{'a': 2, 'b': 1}
>>> pprint(apply(d,'a',func,new_name='c'))
{'b': 1, 'c': 2}
>>> pprint(apply(d,'a',func,new_name='c', remove_lkey=False))
{'a': 1, 'b': 1, 'c': 2}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(apply(test_dict, "b", lambda x: x[-1], list_of_dicts=True, unflatten_level=2))
{'a': [{'b': {'e': 3, 'f': 4}}, {'b': {'e': 7, 'f': 8}}]}
### Response:
def apply(d, leaf_key, func, new_name=None, remove_lkey=True,
list_of_dicts=False, unflatten_level=0, deepcopy=True, **kwargs):
""" apply a function to all values with a certain leaf (terminal) key
Parameters
----------
d : dict
leaf_key : str
name of leaf key
func : callable
function to apply
new_name : str
if not None, rename leaf_key
remove_lkey: bool
whether to remove original leaf_key (if new_name is not None)
list_of_dicts: bool
treat list of dicts as additional branches
unflatten_level : int or None
the number of levels to leave unflattened before combining,
for instance if you need dicts as inputs
deepcopy: bool
deepcopy values
kwargs : dict
additional keywords to parse to function
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':1}
>>> func = lambda x: x+1
>>> pprint(apply(d,'a',func))
{'a': 2, 'b': 1}
>>> pprint(apply(d,'a',func,new_name='c'))
{'b': 1, 'c': 2}
>>> pprint(apply(d,'a',func,new_name='c', remove_lkey=False))
{'a': 1, 'b': 1, 'c': 2}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(apply(test_dict, "b", lambda x: x[-1], list_of_dicts=True, unflatten_level=2))
{'a': [{'b': {'e': 3, 'f': 4}}, {'b': {'e': 7, 'f': 8}}]}
""" # noqa: E501
list_of_dicts = '__list__' if list_of_dicts else None
if unflatten_level == 0:
flatd = flatten(d, list_of_dicts=list_of_dicts)
else:
flatd = flattennd(d, unflatten_level, list_of_dicts=list_of_dicts)
newd = {k: (func(v, **kwargs) if k[-1] == leaf_key else v)
for k, v in flatd.items()}
if new_name is not None:
newd = {(tuple(list(k[:-1]) + [new_name]) if k[-1]
== leaf_key else k): v for k, v in newd.items()}
if not remove_lkey:
newd.update(flatd)
return unflatten(newd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) |
def disallow(
self, foreign, permission="active", account=None, threshold=None, **kwargs
):
""" Remove additional access to an account by some other public
key or account.
:param str foreign: The foreign account that will obtain access
:param str permission: (optional) The actual permission to
modify (defaults to ``active``)
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
:param int threshold: The threshold that needs to be reached
by signatures to be able to interact
"""
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
if permission not in ["owner", "active"]:
raise ValueError("Permission needs to be either 'owner', or 'active")
account = Account(account, blockchain_instance=self)
authority = account[permission]
try:
pubkey = PublicKey(foreign, prefix=self.prefix)
affected_items = list(
filter(lambda x: x[0] == str(pubkey), authority["key_auths"])
)
authority["key_auths"] = list(
filter(lambda x: x[0] != str(pubkey), authority["key_auths"])
)
except:
try:
foreign_account = Account(foreign, blockchain_instance=self)
affected_items = list(
filter(
lambda x: x[0] == foreign_account["id"],
authority["account_auths"],
)
)
authority["account_auths"] = list(
filter(
lambda x: x[0] != foreign_account["id"],
authority["account_auths"],
)
)
except:
raise ValueError("Unknown foreign account or unvalid public key")
if not affected_items:
raise ValueError("Changes nothing!")
removed_weight = affected_items[0][1]
# Define threshold
if threshold:
authority["weight_threshold"] = threshold
# Correct threshold (at most by the amount removed from the
# authority)
try:
self._test_weights_treshold(authority)
except:
log.critical(
"The account's threshold will be reduced by %d" % (removed_weight)
)
authority["weight_threshold"] -= removed_weight
self._test_weights_treshold(authority)
op = operations.Account_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"account": account["id"],
permission: authority,
"extensions": {},
}
)
if permission == "owner":
return self.finalizeOp(op, account["name"], "owner", **kwargs)
else:
return self.finalizeOp(op, account["name"], "active", **kwargs) | Remove additional access to an account by some other public
key or account.
:param str foreign: The foreign account that will obtain access
:param str permission: (optional) The actual permission to
modify (defaults to ``active``)
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
:param int threshold: The threshold that needs to be reached
by signatures to be able to interact | Below is the the instruction that describes the task:
### Input:
Remove additional access to an account by some other public
key or account.
:param str foreign: The foreign account that will obtain access
:param str permission: (optional) The actual permission to
modify (defaults to ``active``)
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
:param int threshold: The threshold that needs to be reached
by signatures to be able to interact
### Response:
def disallow(
self, foreign, permission="active", account=None, threshold=None, **kwargs
):
""" Remove additional access to an account by some other public
key or account.
:param str foreign: The foreign account that will obtain access
:param str permission: (optional) The actual permission to
modify (defaults to ``active``)
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
:param int threshold: The threshold that needs to be reached
by signatures to be able to interact
"""
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
if permission not in ["owner", "active"]:
raise ValueError("Permission needs to be either 'owner', or 'active")
account = Account(account, blockchain_instance=self)
authority = account[permission]
try:
pubkey = PublicKey(foreign, prefix=self.prefix)
affected_items = list(
filter(lambda x: x[0] == str(pubkey), authority["key_auths"])
)
authority["key_auths"] = list(
filter(lambda x: x[0] != str(pubkey), authority["key_auths"])
)
except:
try:
foreign_account = Account(foreign, blockchain_instance=self)
affected_items = list(
filter(
lambda x: x[0] == foreign_account["id"],
authority["account_auths"],
)
)
authority["account_auths"] = list(
filter(
lambda x: x[0] != foreign_account["id"],
authority["account_auths"],
)
)
except:
raise ValueError("Unknown foreign account or unvalid public key")
if not affected_items:
raise ValueError("Changes nothing!")
removed_weight = affected_items[0][1]
# Define threshold
if threshold:
authority["weight_threshold"] = threshold
# Correct threshold (at most by the amount removed from the
# authority)
try:
self._test_weights_treshold(authority)
except:
log.critical(
"The account's threshold will be reduced by %d" % (removed_weight)
)
authority["weight_threshold"] -= removed_weight
self._test_weights_treshold(authority)
op = operations.Account_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"account": account["id"],
permission: authority,
"extensions": {},
}
)
if permission == "owner":
return self.finalizeOp(op, account["name"], "owner", **kwargs)
else:
return self.finalizeOp(op, account["name"], "active", **kwargs) |
def _perform_action(self, params, return_dict=True):
"""
Perform a droplet action.
Args:
params (dict): parameters of the action
Optional Args:
return_dict (bool): Return a dict when True (default),
otherwise return an Action.
Returns dict or Action
"""
action = self.get_data(
"droplets/%s/actions/" % self.id,
type=POST,
params=params
)
if return_dict:
return action
else:
action = action[u'action']
return_action = Action(token=self.token)
# Loading attributes
for attr in action.keys():
setattr(return_action, attr, action[attr])
return return_action | Perform a droplet action.
Args:
params (dict): parameters of the action
Optional Args:
return_dict (bool): Return a dict when True (default),
otherwise return an Action.
Returns dict or Action | Below is the the instruction that describes the task:
### Input:
Perform a droplet action.
Args:
params (dict): parameters of the action
Optional Args:
return_dict (bool): Return a dict when True (default),
otherwise return an Action.
Returns dict or Action
### Response:
def _perform_action(self, params, return_dict=True):
"""
Perform a droplet action.
Args:
params (dict): parameters of the action
Optional Args:
return_dict (bool): Return a dict when True (default),
otherwise return an Action.
Returns dict or Action
"""
action = self.get_data(
"droplets/%s/actions/" % self.id,
type=POST,
params=params
)
if return_dict:
return action
else:
action = action[u'action']
return_action = Action(token=self.token)
# Loading attributes
for attr in action.keys():
setattr(return_action, attr, action[attr])
return return_action |
def color_intervals(colors, levels, clip=None, N=255):
"""
Maps the supplied colors into bins defined by the supplied levels.
If a clip tuple is defined the bins are clipped to the defined
range otherwise the range is computed from the levels and returned.
Arguments
---------
colors: list
List of colors (usually hex string or named colors)
levels: list or array_like
Levels specifying the bins to map the colors to
clip: tuple (optional)
Lower and upper limits of the color range
N: int
Number of discrete colors to map the range onto
Returns
-------
cmap: list
List of colors
clip: tuple
Lower and upper bounds of the color range
"""
if len(colors) != len(levels)-1:
raise ValueError('The number of colors in the colormap '
'must match the intervals defined in the '
'color_levels, expected %d colors found %d.'
% (N, len(colors)))
intervals = np.diff(levels)
cmin, cmax = min(levels), max(levels)
interval = cmax-cmin
cmap = []
for intv, c in zip(intervals, colors):
cmap += [c]*int(round(N*(intv/interval)))
if clip is not None:
clmin, clmax = clip
lidx = int(round(N*((clmin-cmin)/interval)))
uidx = int(round(N*((cmax-clmax)/interval)))
uidx = N-uidx
if lidx == uidx:
uidx = lidx+1
cmap = cmap[lidx:uidx]
if clmin == clmax:
idx = np.argmin(np.abs(np.array(levels)-clmin))
clip = levels[idx: idx+2] if len(levels) > idx+2 else levels[idx-1: idx+1]
return cmap, clip | Maps the supplied colors into bins defined by the supplied levels.
If a clip tuple is defined the bins are clipped to the defined
range otherwise the range is computed from the levels and returned.
Arguments
---------
colors: list
List of colors (usually hex string or named colors)
levels: list or array_like
Levels specifying the bins to map the colors to
clip: tuple (optional)
Lower and upper limits of the color range
N: int
Number of discrete colors to map the range onto
Returns
-------
cmap: list
List of colors
clip: tuple
Lower and upper bounds of the color range | Below is the the instruction that describes the task:
### Input:
Maps the supplied colors into bins defined by the supplied levels.
If a clip tuple is defined the bins are clipped to the defined
range otherwise the range is computed from the levels and returned.
Arguments
---------
colors: list
List of colors (usually hex string or named colors)
levels: list or array_like
Levels specifying the bins to map the colors to
clip: tuple (optional)
Lower and upper limits of the color range
N: int
Number of discrete colors to map the range onto
Returns
-------
cmap: list
List of colors
clip: tuple
Lower and upper bounds of the color range
### Response:
def color_intervals(colors, levels, clip=None, N=255):
"""
Maps the supplied colors into bins defined by the supplied levels.
If a clip tuple is defined the bins are clipped to the defined
range otherwise the range is computed from the levels and returned.
Arguments
---------
colors: list
List of colors (usually hex string or named colors)
levels: list or array_like
Levels specifying the bins to map the colors to
clip: tuple (optional)
Lower and upper limits of the color range
N: int
Number of discrete colors to map the range onto
Returns
-------
cmap: list
List of colors
clip: tuple
Lower and upper bounds of the color range
"""
if len(colors) != len(levels)-1:
raise ValueError('The number of colors in the colormap '
'must match the intervals defined in the '
'color_levels, expected %d colors found %d.'
% (N, len(colors)))
intervals = np.diff(levels)
cmin, cmax = min(levels), max(levels)
interval = cmax-cmin
cmap = []
for intv, c in zip(intervals, colors):
cmap += [c]*int(round(N*(intv/interval)))
if clip is not None:
clmin, clmax = clip
lidx = int(round(N*((clmin-cmin)/interval)))
uidx = int(round(N*((cmax-clmax)/interval)))
uidx = N-uidx
if lidx == uidx:
uidx = lidx+1
cmap = cmap[lidx:uidx]
if clmin == clmax:
idx = np.argmin(np.abs(np.array(levels)-clmin))
clip = levels[idx: idx+2] if len(levels) > idx+2 else levels[idx-1: idx+1]
return cmap, clip |
def middle_begin(self, index):
"""
Set the index where MIDDLE starts.
:param int index: the new index for MIDDLE begin
"""
if (index < 0) or (index > self.all_length):
raise ValueError(u"The given index is not valid")
self.__middle_begin = index | Set the index where MIDDLE starts.
:param int index: the new index for MIDDLE begin | Below is the the instruction that describes the task:
### Input:
Set the index where MIDDLE starts.
:param int index: the new index for MIDDLE begin
### Response:
def middle_begin(self, index):
"""
Set the index where MIDDLE starts.
:param int index: the new index for MIDDLE begin
"""
if (index < 0) or (index > self.all_length):
raise ValueError(u"The given index is not valid")
self.__middle_begin = index |
def phone_text_subs():
"""
Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual
Number value.
Returns:
dictionary of dictionaries containing Strings mapped to Numbers
"""
Small = {
'zero': 0,
'zer0': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'fuor': 4,
'five': 5,
'fith': 5,
'six': 6,
'seven': 7,
'sven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90,
'oh': 0
}
Magnitude = {
'thousand': 000,
'million': 000000,
}
Others = {
'!': 1,
'o': 0,
'l': 1,
'i': 1
}
output = {}
output['Small'] = Small
output['Magnitude'] = Magnitude
output['Others'] = Others
return output | Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual
Number value.
Returns:
dictionary of dictionaries containing Strings mapped to Numbers | Below is the the instruction that describes the task:
### Input:
Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual
Number value.
Returns:
dictionary of dictionaries containing Strings mapped to Numbers
### Response:
def phone_text_subs():
"""
Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual
Number value.
Returns:
dictionary of dictionaries containing Strings mapped to Numbers
"""
Small = {
'zero': 0,
'zer0': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'fuor': 4,
'five': 5,
'fith': 5,
'six': 6,
'seven': 7,
'sven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90,
'oh': 0
}
Magnitude = {
'thousand': 000,
'million': 000000,
}
Others = {
'!': 1,
'o': 0,
'l': 1,
'i': 1
}
output = {}
output['Small'] = Small
output['Magnitude'] = Magnitude
output['Others'] = Others
return output |
def from_dict(cls, operation, client, **caller_metadata):
"""Factory: construct an instance from a dictionary.
:type operation: dict
:param operation: Operation as a JSON object.
:type client: :class:`~google.cloud.client.Client`
:param client: The client used to poll for the status of the operation.
:type caller_metadata: dict
:param caller_metadata: caller-assigned metadata about the operation
:rtype: :class:`Operation`
:returns: new instance, with attributes based on the protobuf.
"""
operation_pb = json_format.ParseDict(operation, operations_pb2.Operation())
result = cls(operation_pb.name, client, **caller_metadata)
result._update_state(operation_pb)
result._from_grpc = False
return result | Factory: construct an instance from a dictionary.
:type operation: dict
:param operation: Operation as a JSON object.
:type client: :class:`~google.cloud.client.Client`
:param client: The client used to poll for the status of the operation.
:type caller_metadata: dict
:param caller_metadata: caller-assigned metadata about the operation
:rtype: :class:`Operation`
:returns: new instance, with attributes based on the protobuf. | Below is the the instruction that describes the task:
### Input:
Factory: construct an instance from a dictionary.
:type operation: dict
:param operation: Operation as a JSON object.
:type client: :class:`~google.cloud.client.Client`
:param client: The client used to poll for the status of the operation.
:type caller_metadata: dict
:param caller_metadata: caller-assigned metadata about the operation
:rtype: :class:`Operation`
:returns: new instance, with attributes based on the protobuf.
### Response:
def from_dict(cls, operation, client, **caller_metadata):
"""Factory: construct an instance from a dictionary.
:type operation: dict
:param operation: Operation as a JSON object.
:type client: :class:`~google.cloud.client.Client`
:param client: The client used to poll for the status of the operation.
:type caller_metadata: dict
:param caller_metadata: caller-assigned metadata about the operation
:rtype: :class:`Operation`
:returns: new instance, with attributes based on the protobuf.
"""
operation_pb = json_format.ParseDict(operation, operations_pb2.Operation())
result = cls(operation_pb.name, client, **caller_metadata)
result._update_state(operation_pb)
result._from_grpc = False
return result |
def update(self):
"""This method should be called to update associated Posts
It will call content-specific methods:
_get_data() to obtain list of entries
_store_post() to store obtained entry object
_get_data_source_url() to get an URL to identify Posts from this Data Source
"""
#get the raw data
# self.posts.all().delete() # TODO: handle in update_posts if source changes without deleting every time
data = self._get_data()
#iterate through them and for each item
msg = []
for entry in data:
link = self._get_entry_link(entry)
stored_entry, is_new = Post.objects.get_or_create(link=link)
self._store_post(stored_entry, entry)
if is_new is True:
#self._set_dates(stored_entry)
# self._store_post(stored_entry, entry)
msg.append('Post "%s" added.' % stored_entry.link)
else:
msg.append('Post "%s" already saved.' % stored_entry.link)
self.updated = utils.get_datetime_now()
self.save(no_signals=True)
return '<br />'.join(msg) | This method should be called to update associated Posts
It will call content-specific methods:
_get_data() to obtain list of entries
_store_post() to store obtained entry object
_get_data_source_url() to get an URL to identify Posts from this Data Source | Below is the the instruction that describes the task:
### Input:
This method should be called to update associated Posts
It will call content-specific methods:
_get_data() to obtain list of entries
_store_post() to store obtained entry object
_get_data_source_url() to get an URL to identify Posts from this Data Source
### Response:
def update(self):
"""This method should be called to update associated Posts
It will call content-specific methods:
_get_data() to obtain list of entries
_store_post() to store obtained entry object
_get_data_source_url() to get an URL to identify Posts from this Data Source
"""
#get the raw data
# self.posts.all().delete() # TODO: handle in update_posts if source changes without deleting every time
data = self._get_data()
#iterate through them and for each item
msg = []
for entry in data:
link = self._get_entry_link(entry)
stored_entry, is_new = Post.objects.get_or_create(link=link)
self._store_post(stored_entry, entry)
if is_new is True:
#self._set_dates(stored_entry)
# self._store_post(stored_entry, entry)
msg.append('Post "%s" added.' % stored_entry.link)
else:
msg.append('Post "%s" already saved.' % stored_entry.link)
self.updated = utils.get_datetime_now()
self.save(no_signals=True)
return '<br />'.join(msg) |
def query_download_tasks(self, task_ids, operate_type=1,
expires=None, **kwargs):
"""根据任务ID号,查询离线下载任务信息及进度信息。
:param task_ids: 要查询的任务ID列表
:type task_ids: list or tuple
:param operate_type:
* 0:查任务信息
* 1:查进度信息,默认为1
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
"""
params = {
'task_ids': ','.join(map(str, task_ids)),
'op_type': operate_type,
'expires': expires,
}
return self._request('services/cloud_dl', 'query_task',
extra_params=params, **kwargs) | 根据任务ID号,查询离线下载任务信息及进度信息。
:param task_ids: 要查询的任务ID列表
:type task_ids: list or tuple
:param operate_type:
* 0:查任务信息
* 1:查进度信息,默认为1
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象 | Below is the the instruction that describes the task:
### Input:
根据任务ID号,查询离线下载任务信息及进度信息。
:param task_ids: 要查询的任务ID列表
:type task_ids: list or tuple
:param operate_type:
* 0:查任务信息
* 1:查进度信息,默认为1
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
### Response:
def query_download_tasks(self, task_ids, operate_type=1,
expires=None, **kwargs):
"""根据任务ID号,查询离线下载任务信息及进度信息。
:param task_ids: 要查询的任务ID列表
:type task_ids: list or tuple
:param operate_type:
* 0:查任务信息
* 1:查进度信息,默认为1
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
"""
params = {
'task_ids': ','.join(map(str, task_ids)),
'op_type': operate_type,
'expires': expires,
}
return self._request('services/cloud_dl', 'query_task',
extra_params=params, **kwargs) |
def get_too_few_non_zero_degree_day_warning(
model_type, balance_point, degree_day_type, degree_days, minimum_non_zero
):
""" Return an empty list or a single warning wrapped in a list regarding
non-zero degree days for a set of degree days.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
degree_days : :any:`pandas.Series`
A series of degree day values.
minimum_non_zero : :any:`int`
Minimum allowable number of non-zero degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
n_non_zero = int((degree_days > 0).sum())
if n_non_zero < minimum_non_zero:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}".format(
model_type=model_type, degree_day_type=degree_day_type
)
),
description=(
"Number of non-zero daily {degree_day_type} values below accepted minimum."
" Candidate fit not attempted.".format(
degree_day_type=degree_day_type.upper()
)
),
data={
"n_non_zero_{degree_day_type}".format(
degree_day_type=degree_day_type
): n_non_zero,
"minimum_non_zero_{degree_day_type}".format(
degree_day_type=degree_day_type
): minimum_non_zero,
"{degree_day_type}_balance_point".format(
degree_day_type=degree_day_type
): balance_point,
},
)
)
return warnings | Return an empty list or a single warning wrapped in a list regarding
non-zero degree days for a set of degree days.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
degree_days : :any:`pandas.Series`
A series of degree day values.
minimum_non_zero : :any:`int`
Minimum allowable number of non-zero degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning. | Below is the the instruction that describes the task:
### Input:
Return an empty list or a single warning wrapped in a list regarding
non-zero degree days for a set of degree days.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
degree_days : :any:`pandas.Series`
A series of degree day values.
minimum_non_zero : :any:`int`
Minimum allowable number of non-zero degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
### Response:
def get_too_few_non_zero_degree_day_warning(
model_type, balance_point, degree_day_type, degree_days, minimum_non_zero
):
""" Return an empty list or a single warning wrapped in a list regarding
non-zero degree days for a set of degree days.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
degree_days : :any:`pandas.Series`
A series of degree day values.
minimum_non_zero : :any:`int`
Minimum allowable number of non-zero degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
n_non_zero = int((degree_days > 0).sum())
if n_non_zero < minimum_non_zero:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}".format(
model_type=model_type, degree_day_type=degree_day_type
)
),
description=(
"Number of non-zero daily {degree_day_type} values below accepted minimum."
" Candidate fit not attempted.".format(
degree_day_type=degree_day_type.upper()
)
),
data={
"n_non_zero_{degree_day_type}".format(
degree_day_type=degree_day_type
): n_non_zero,
"minimum_non_zero_{degree_day_type}".format(
degree_day_type=degree_day_type
): minimum_non_zero,
"{degree_day_type}_balance_point".format(
degree_day_type=degree_day_type
): balance_point,
},
)
)
return warnings |
def annotate_tree_properties(comments):
"""
iterate through nodes and adds some magic properties to each of them
representing opening list of children and closing it
"""
if not comments:
return
it = iter(comments)
# get the first item, this will fail if no items !
old = next(it)
# first item starts a new thread
old.open = True
last = set()
for c in it:
# if this comment has a parent, store its last child for future reference
if old.last_child_id:
last.add(old.last_child_id)
# this is the last child, mark it
if c.pk in last:
c.last = True
# increase the depth
if c.depth > old.depth:
c.open = True
else: # c.depth <= old.depth
# close some depths
old.close = list(range(old.depth - c.depth))
# new thread
if old.root_id != c.root_id:
# close even the top depth
old.close.append(len(old.close))
# and start a new thread
c.open = True
# empty the last set
last = set()
# iterate
yield old
old = c
old.close = range(old.depth)
yield old | iterate through nodes and adds some magic properties to each of them
representing opening list of children and closing it | Below is the the instruction that describes the task:
### Input:
iterate through nodes and adds some magic properties to each of them
representing opening list of children and closing it
### Response:
def annotate_tree_properties(comments):
"""
iterate through nodes and adds some magic properties to each of them
representing opening list of children and closing it
"""
if not comments:
return
it = iter(comments)
# get the first item, this will fail if no items !
old = next(it)
# first item starts a new thread
old.open = True
last = set()
for c in it:
# if this comment has a parent, store its last child for future reference
if old.last_child_id:
last.add(old.last_child_id)
# this is the last child, mark it
if c.pk in last:
c.last = True
# increase the depth
if c.depth > old.depth:
c.open = True
else: # c.depth <= old.depth
# close some depths
old.close = list(range(old.depth - c.depth))
# new thread
if old.root_id != c.root_id:
# close even the top depth
old.close.append(len(old.close))
# and start a new thread
c.open = True
# empty the last set
last = set()
# iterate
yield old
old = c
old.close = range(old.depth)
yield old |
def visit_function_inline(self, node):
"""Returns an GeneratedExpr for a function with the given body."""
# First pass collects the names of locals used in this function. Do this in
# a separate pass so that we know whether to resolve a name as a local or a
# global during the second pass.
func_visitor = block.FunctionBlockVisitor(node)
for child in node.body:
func_visitor.visit(child)
func_block = block.FunctionBlock(self.block, node.name, func_visitor.vars,
func_visitor.is_generator)
visitor = StatementVisitor(func_block, self.future_node)
# Indent so that the function body is aligned with the goto labels.
with visitor.writer.indent_block():
visitor._visit_each(node.body) # pylint: disable=protected-access
result = self.block.alloc_temp()
with self.block.alloc_temp('[]πg.Param') as func_args:
args = node.args
argc = len(args.args)
self.writer.write('{} = make([]πg.Param, {})'.format(
func_args.expr, argc))
# The list of defaults only contains args for which a default value is
# specified so pad it with None to make it the same length as args.
defaults = [None] * (argc - len(args.defaults)) + args.defaults
for i, (a, d) in enumerate(zip(args.args, defaults)):
with self.visit_expr(d) if d else expr.nil_expr as default:
tmpl = '$args[$i] = πg.Param{Name: $name, Def: $default}'
self.writer.write_tmpl(tmpl, args=func_args.expr, i=i,
name=util.go_str(a.arg), default=default.expr)
flags = []
if args.vararg:
flags.append('πg.CodeFlagVarArg')
if args.kwarg:
flags.append('πg.CodeFlagKWArg')
# The function object gets written to a temporary writer because we need
# it as an expression that we subsequently bind to some variable.
self.writer.write_tmpl(
'$result = πg.NewFunction(πg.NewCode($name, $filename, $args, '
'$flags, func(πF *πg.Frame, πArgs []*πg.Object) '
'(*πg.Object, *πg.BaseException) {',
result=result.name, name=util.go_str(node.name),
filename=util.go_str(self.block.root.filename), args=func_args.expr,
flags=' | '.join(flags) if flags else 0)
with self.writer.indent_block():
for var in func_block.vars.values():
if var.type != block.Var.TYPE_GLOBAL:
fmt = 'var {0} *πg.Object = {1}; _ = {0}'
self.writer.write(fmt.format(
util.adjust_local_name(var.name), var.init_expr))
self.writer.write_temp_decls(func_block)
self.writer.write('var πR *πg.Object; _ = πR')
self.writer.write('var πE *πg.BaseException; _ = πE')
if func_block.is_generator:
self.writer.write(
'return πg.NewGenerator(πF, func(πSent *πg.Object) '
'(*πg.Object, *πg.BaseException) {')
with self.writer.indent_block():
self.writer.write_block(func_block, visitor.writer.getvalue())
self.writer.write('return nil, πE')
self.writer.write('}).ToObject(), nil')
else:
self.writer.write_block(func_block, visitor.writer.getvalue())
self.writer.write(textwrap.dedent("""\
if πE != nil {
\tπR = nil
} else if πR == nil {
\tπR = πg.None
}
return πR, πE"""))
self.writer.write('}), πF.Globals()).ToObject()')
return result | Returns an GeneratedExpr for a function with the given body. | Below is the the instruction that describes the task:
### Input:
Returns an GeneratedExpr for a function with the given body.
### Response:
def visit_function_inline(self, node):
"""Returns an GeneratedExpr for a function with the given body."""
# First pass collects the names of locals used in this function. Do this in
# a separate pass so that we know whether to resolve a name as a local or a
# global during the second pass.
func_visitor = block.FunctionBlockVisitor(node)
for child in node.body:
func_visitor.visit(child)
func_block = block.FunctionBlock(self.block, node.name, func_visitor.vars,
func_visitor.is_generator)
visitor = StatementVisitor(func_block, self.future_node)
# Indent so that the function body is aligned with the goto labels.
with visitor.writer.indent_block():
visitor._visit_each(node.body) # pylint: disable=protected-access
result = self.block.alloc_temp()
with self.block.alloc_temp('[]πg.Param') as func_args:
args = node.args
argc = len(args.args)
self.writer.write('{} = make([]πg.Param, {})'.format(
func_args.expr, argc))
# The list of defaults only contains args for which a default value is
# specified so pad it with None to make it the same length as args.
defaults = [None] * (argc - len(args.defaults)) + args.defaults
for i, (a, d) in enumerate(zip(args.args, defaults)):
with self.visit_expr(d) if d else expr.nil_expr as default:
tmpl = '$args[$i] = πg.Param{Name: $name, Def: $default}'
self.writer.write_tmpl(tmpl, args=func_args.expr, i=i,
name=util.go_str(a.arg), default=default.expr)
flags = []
if args.vararg:
flags.append('πg.CodeFlagVarArg')
if args.kwarg:
flags.append('πg.CodeFlagKWArg')
# The function object gets written to a temporary writer because we need
# it as an expression that we subsequently bind to some variable.
self.writer.write_tmpl(
'$result = πg.NewFunction(πg.NewCode($name, $filename, $args, '
'$flags, func(πF *πg.Frame, πArgs []*πg.Object) '
'(*πg.Object, *πg.BaseException) {',
result=result.name, name=util.go_str(node.name),
filename=util.go_str(self.block.root.filename), args=func_args.expr,
flags=' | '.join(flags) if flags else 0)
with self.writer.indent_block():
for var in func_block.vars.values():
if var.type != block.Var.TYPE_GLOBAL:
fmt = 'var {0} *πg.Object = {1}; _ = {0}'
self.writer.write(fmt.format(
util.adjust_local_name(var.name), var.init_expr))
self.writer.write_temp_decls(func_block)
self.writer.write('var πR *πg.Object; _ = πR')
self.writer.write('var πE *πg.BaseException; _ = πE')
if func_block.is_generator:
self.writer.write(
'return πg.NewGenerator(πF, func(πSent *πg.Object) '
'(*πg.Object, *πg.BaseException) {')
with self.writer.indent_block():
self.writer.write_block(func_block, visitor.writer.getvalue())
self.writer.write('return nil, πE')
self.writer.write('}).ToObject(), nil')
else:
self.writer.write_block(func_block, visitor.writer.getvalue())
self.writer.write(textwrap.dedent("""\
if πE != nil {
\tπR = nil
} else if πR == nil {
\tπR = πg.None
}
return πR, πE"""))
self.writer.write('}), πF.Globals()).ToObject()')
return result |
def is_bool_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a boolean dtype.
Notes
-----
An ExtensionArray is considered boolean when the ``_is_boolean``
attribute is set to True.
Examples
--------
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
>>> is_bool_dtype(pd.Categorical([True, False]))
True
>>> is_bool_dtype(pd.SparseArray([True, False]))
True
"""
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
except TypeError:
return False
if isinstance(arr_or_dtype, CategoricalDtype):
arr_or_dtype = arr_or_dtype.categories
# now we use the special definition for Index
if isinstance(arr_or_dtype, ABCIndexClass):
# TODO(jreback)
# we don't have a boolean Index class
# so its object, we need to infer to
# guess this
return (arr_or_dtype.is_object and
arr_or_dtype.inferred_type == 'boolean')
elif is_extension_array_dtype(arr_or_dtype):
dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)
return dtype._is_boolean
return issubclass(dtype.type, np.bool_) | Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a boolean dtype.
Notes
-----
An ExtensionArray is considered boolean when the ``_is_boolean``
attribute is set to True.
Examples
--------
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
>>> is_bool_dtype(pd.Categorical([True, False]))
True
>>> is_bool_dtype(pd.SparseArray([True, False]))
True | Below is the the instruction that describes the task:
### Input:
Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a boolean dtype.
Notes
-----
An ExtensionArray is considered boolean when the ``_is_boolean``
attribute is set to True.
Examples
--------
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
>>> is_bool_dtype(pd.Categorical([True, False]))
True
>>> is_bool_dtype(pd.SparseArray([True, False]))
True
### Response:
def is_bool_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a boolean dtype.
Notes
-----
An ExtensionArray is considered boolean when the ``_is_boolean``
attribute is set to True.
Examples
--------
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
>>> is_bool_dtype(pd.Categorical([True, False]))
True
>>> is_bool_dtype(pd.SparseArray([True, False]))
True
"""
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
except TypeError:
return False
if isinstance(arr_or_dtype, CategoricalDtype):
arr_or_dtype = arr_or_dtype.categories
# now we use the special definition for Index
if isinstance(arr_or_dtype, ABCIndexClass):
# TODO(jreback)
# we don't have a boolean Index class
# so its object, we need to infer to
# guess this
return (arr_or_dtype.is_object and
arr_or_dtype.inferred_type == 'boolean')
elif is_extension_array_dtype(arr_or_dtype):
dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)
return dtype._is_boolean
return issubclass(dtype.type, np.bool_) |
def hbas(self):
"""
:class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in
this Partition.
If the "dpm-storage-management" feature is enabled, this property is
`None`.
"""
# We do here some lazy loading.
if not self._hbas:
try:
dpm_sm = self.feature_enabled('dpm-storage-management')
except ValueError:
dpm_sm = False
if not dpm_sm:
self._hbas = HbaManager(self)
return self._hbas | :class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in
this Partition.
If the "dpm-storage-management" feature is enabled, this property is
`None`. | Below is the the instruction that describes the task:
### Input:
:class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in
this Partition.
If the "dpm-storage-management" feature is enabled, this property is
`None`.
### Response:
def hbas(self):
"""
:class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in
this Partition.
If the "dpm-storage-management" feature is enabled, this property is
`None`.
"""
# We do here some lazy loading.
if not self._hbas:
try:
dpm_sm = self.feature_enabled('dpm-storage-management')
except ValueError:
dpm_sm = False
if not dpm_sm:
self._hbas = HbaManager(self)
return self._hbas |
def annotation(self, type, set=None):
"""Obtain a single annotation element.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
Returns:
An element (instance derived from :class:`AbstractElement`)
Example::
sense = word.annotation(folia.Sense, 'http://some/path/cornetto').cls
See also:
:meth:`AllowTokenAnnotation.annotations`
:meth:`AbstractElement.select`
Raises:
:class:`NoSuchAnnotation` if no such annotation exists
"""
"""Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found"""
for e in self.select(type,set,True,default_ignore_annotations):
return e
raise NoSuchAnnotation() | Obtain a single annotation element.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
Returns:
An element (instance derived from :class:`AbstractElement`)
Example::
sense = word.annotation(folia.Sense, 'http://some/path/cornetto').cls
See also:
:meth:`AllowTokenAnnotation.annotations`
:meth:`AbstractElement.select`
Raises:
:class:`NoSuchAnnotation` if no such annotation exists | Below is the the instruction that describes the task:
### Input:
Obtain a single annotation element.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
Returns:
An element (instance derived from :class:`AbstractElement`)
Example::
sense = word.annotation(folia.Sense, 'http://some/path/cornetto').cls
See also:
:meth:`AllowTokenAnnotation.annotations`
:meth:`AbstractElement.select`
Raises:
:class:`NoSuchAnnotation` if no such annotation exists
### Response:
def annotation(self, type, set=None):
"""Obtain a single annotation element.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
Returns:
An element (instance derived from :class:`AbstractElement`)
Example::
sense = word.annotation(folia.Sense, 'http://some/path/cornetto').cls
See also:
:meth:`AllowTokenAnnotation.annotations`
:meth:`AbstractElement.select`
Raises:
:class:`NoSuchAnnotation` if no such annotation exists
"""
"""Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found"""
for e in self.select(type,set,True,default_ignore_annotations):
return e
raise NoSuchAnnotation() |
def brightness(self, brightness):
"""
Build command for setting the brightness of the led.
:param brightness: Value to set (0.0-1.0).
:return: The command.
"""
return self._build_command(0x4E, self.convert_brightness(brightness),
select=True, select_command=self.on()) | Build command for setting the brightness of the led.
:param brightness: Value to set (0.0-1.0).
:return: The command. | Below is the the instruction that describes the task:
### Input:
Build command for setting the brightness of the led.
:param brightness: Value to set (0.0-1.0).
:return: The command.
### Response:
def brightness(self, brightness):
"""
Build command for setting the brightness of the led.
:param brightness: Value to set (0.0-1.0).
:return: The command.
"""
return self._build_command(0x4E, self.convert_brightness(brightness),
select=True, select_command=self.on()) |
def load_backends(config, callback, internal_attributes):
"""
Load all backend modules specified in the config
:type config: satosa.satosa_config.SATOSAConfig
:type callback:
(satosa.context.Context, satosa.internal.InternalData) -> satosa.response.Response
:type internal_attributes: dict[string, dict[str, str | list[str]]]
:rtype: Sequence[satosa.backends.base.BackendModule]
:param config: The configuration of the satosa proxy
:param callback: Function that will be called by the backend after the authentication is done.
:return: A list of backend modules
"""
backend_modules = _load_plugins(config.get("CUSTOM_PLUGIN_MODULE_PATHS"), config["BACKEND_MODULES"], backend_filter,
config["BASE"], internal_attributes, callback)
logger.info("Setup backends: %s" % [backend.name for backend in backend_modules])
return backend_modules | Load all backend modules specified in the config
:type config: satosa.satosa_config.SATOSAConfig
:type callback:
(satosa.context.Context, satosa.internal.InternalData) -> satosa.response.Response
:type internal_attributes: dict[string, dict[str, str | list[str]]]
:rtype: Sequence[satosa.backends.base.BackendModule]
:param config: The configuration of the satosa proxy
:param callback: Function that will be called by the backend after the authentication is done.
:return: A list of backend modules | Below is the the instruction that describes the task:
### Input:
Load all backend modules specified in the config
:type config: satosa.satosa_config.SATOSAConfig
:type callback:
(satosa.context.Context, satosa.internal.InternalData) -> satosa.response.Response
:type internal_attributes: dict[string, dict[str, str | list[str]]]
:rtype: Sequence[satosa.backends.base.BackendModule]
:param config: The configuration of the satosa proxy
:param callback: Function that will be called by the backend after the authentication is done.
:return: A list of backend modules
### Response:
def load_backends(config, callback, internal_attributes):
"""
Load all backend modules specified in the config
:type config: satosa.satosa_config.SATOSAConfig
:type callback:
(satosa.context.Context, satosa.internal.InternalData) -> satosa.response.Response
:type internal_attributes: dict[string, dict[str, str | list[str]]]
:rtype: Sequence[satosa.backends.base.BackendModule]
:param config: The configuration of the satosa proxy
:param callback: Function that will be called by the backend after the authentication is done.
:return: A list of backend modules
"""
backend_modules = _load_plugins(config.get("CUSTOM_PLUGIN_MODULE_PATHS"), config["BACKEND_MODULES"], backend_filter,
config["BASE"], internal_attributes, callback)
logger.info("Setup backends: %s" % [backend.name for backend in backend_modules])
return backend_modules |
def hincrbyfloat(self, key, field, increment=1.0):
"""Increment the float value of a hash field by the given number."""
fut = self.execute(b'HINCRBYFLOAT', key, field, increment)
return wait_convert(fut, float) | Increment the float value of a hash field by the given number. | Below is the the instruction that describes the task:
### Input:
Increment the float value of a hash field by the given number.
### Response:
def hincrbyfloat(self, key, field, increment=1.0):
"""Increment the float value of a hash field by the given number."""
fut = self.execute(b'HINCRBYFLOAT', key, field, increment)
return wait_convert(fut, float) |
def run_server(self):
"""
Runs the WebSocket server
"""
self.protocol = MeaseWebSocketServerProtocol
reactor.listenTCP(port=self.port, factory=self, interface=self.host)
logger.info("Websocket server listening on {address}".format(
address=self.address))
reactor.run() | Runs the WebSocket server | Below is the the instruction that describes the task:
### Input:
Runs the WebSocket server
### Response:
def run_server(self):
"""
Runs the WebSocket server
"""
self.protocol = MeaseWebSocketServerProtocol
reactor.listenTCP(port=self.port, factory=self, interface=self.host)
logger.info("Websocket server listening on {address}".format(
address=self.address))
reactor.run() |
def _parse_string_host(host_str):
"""
Parse host string into a dictionary host
:param host_str:
:return:
"""
host_str = EsParser._fix_host_prefix(host_str)
parsed_url = urlparse(host_str)
host = {HostParsing.HOST: parsed_url.hostname}
if parsed_url.port:
host[HostParsing.PORT] = parsed_url.port
if parsed_url.scheme == HostParsing.HTTPS:
host[HostParsing.PORT] = parsed_url.port or EsParser.SSL_DEFAULT_PORT
host[HostParsing.USE_SSL] = True
host[HostParsing.SCHEME] = HostParsing.HTTPS
elif parsed_url.scheme:
host[HostParsing.SCHEME] = parsed_url.scheme
if parsed_url.username or parsed_url.password:
host[HostParsing.HTTP_AUTH] = '%s:%s' % (parsed_url.username, parsed_url.password)
if parsed_url.path and parsed_url.path != '/':
host[HostParsing.URL_PREFIX] = parsed_url.path
return host | Parse host string into a dictionary host
:param host_str:
:return: | Below is the the instruction that describes the task:
### Input:
Parse host string into a dictionary host
:param host_str:
:return:
### Response:
def _parse_string_host(host_str):
"""
Parse host string into a dictionary host
:param host_str:
:return:
"""
host_str = EsParser._fix_host_prefix(host_str)
parsed_url = urlparse(host_str)
host = {HostParsing.HOST: parsed_url.hostname}
if parsed_url.port:
host[HostParsing.PORT] = parsed_url.port
if parsed_url.scheme == HostParsing.HTTPS:
host[HostParsing.PORT] = parsed_url.port or EsParser.SSL_DEFAULT_PORT
host[HostParsing.USE_SSL] = True
host[HostParsing.SCHEME] = HostParsing.HTTPS
elif parsed_url.scheme:
host[HostParsing.SCHEME] = parsed_url.scheme
if parsed_url.username or parsed_url.password:
host[HostParsing.HTTP_AUTH] = '%s:%s' % (parsed_url.username, parsed_url.password)
if parsed_url.path and parsed_url.path != '/':
host[HostParsing.URL_PREFIX] = parsed_url.path
return host |
def _make_reserved_tokens_re(reserved_tokens):
"""Constructs compiled regex to parse out reserved tokens."""
if not reserved_tokens:
return None
escaped_tokens = [_re_escape(rt) for rt in reserved_tokens]
pattern = "(%s)" % "|".join(escaped_tokens)
reserved_tokens_re = _re_compile(pattern)
return reserved_tokens_re | Constructs compiled regex to parse out reserved tokens. | Below is the the instruction that describes the task:
### Input:
Constructs compiled regex to parse out reserved tokens.
### Response:
def _make_reserved_tokens_re(reserved_tokens):
"""Constructs compiled regex to parse out reserved tokens."""
if not reserved_tokens:
return None
escaped_tokens = [_re_escape(rt) for rt in reserved_tokens]
pattern = "(%s)" % "|".join(escaped_tokens)
reserved_tokens_re = _re_compile(pattern)
return reserved_tokens_re |
def reduced_chi2(self, model, error_map=0):
"""
returns reduced chi2
:param model:
:param error_map:
:return:
"""
chi2 = self.reduced_residuals(model, error_map)
return np.sum(chi2**2) / self.num_data_evaluate() | returns reduced chi2
:param model:
:param error_map:
:return: | Below is the the instruction that describes the task:
### Input:
returns reduced chi2
:param model:
:param error_map:
:return:
### Response:
def reduced_chi2(self, model, error_map=0):
"""
returns reduced chi2
:param model:
:param error_map:
:return:
"""
chi2 = self.reduced_residuals(model, error_map)
return np.sum(chi2**2) / self.num_data_evaluate() |
def _validate_and_get_value(options, options_name, key, _type):
"""
Check that `options` has a value for `key` with type
`_type`. Return that value. `options_name` is a string representing a
human-readable name for `options` to be used when printing errors.
"""
if isinstance(options, dict):
has = lambda k: k in options
get = lambda k: options[k]
elif isinstance(options, object):
has = lambda k: hasattr(options, k)
get = lambda k: getattr(options, k)
else:
raise ImproperlyConfigured(
'`{}` must be a dictionary-like object.'.format(options_name))
if not has(key):
raise ImproperlyConfigured(
'`{}` must be specified in `{}`'.format(key, options_name))
value = get(key)
if not isinstance(value, _type):
raise ImproperlyConfigured(
'`{}` in `{}` must be a {}'.format(key, options_name, repr(_type)))
return value | Check that `options` has a value for `key` with type
`_type`. Return that value. `options_name` is a string representing a
human-readable name for `options` to be used when printing errors. | Below is the the instruction that describes the task:
### Input:
Check that `options` has a value for `key` with type
`_type`. Return that value. `options_name` is a string representing a
human-readable name for `options` to be used when printing errors.
### Response:
def _validate_and_get_value(options, options_name, key, _type):
"""
Check that `options` has a value for `key` with type
`_type`. Return that value. `options_name` is a string representing a
human-readable name for `options` to be used when printing errors.
"""
if isinstance(options, dict):
has = lambda k: k in options
get = lambda k: options[k]
elif isinstance(options, object):
has = lambda k: hasattr(options, k)
get = lambda k: getattr(options, k)
else:
raise ImproperlyConfigured(
'`{}` must be a dictionary-like object.'.format(options_name))
if not has(key):
raise ImproperlyConfigured(
'`{}` must be specified in `{}`'.format(key, options_name))
value = get(key)
if not isinstance(value, _type):
raise ImproperlyConfigured(
'`{}` in `{}` must be a {}'.format(key, options_name, repr(_type)))
return value |
def mouseMoveEvent(self, ev):
"""Update line with last point and current coordinates."""
pos = self.transformPos(ev.pos())
# Update coordinates in status bar if image is opened
window = self.parent().window()
if window.filePath is not None:
self.parent().window().labelCoordinates.setText(
'X: %d; Y: %d' % (pos.x(), pos.y()))
# Polygon drawing.
if self.drawing():
self.overrideCursor(CURSOR_DRAW)
if self.current:
color = self.drawingLineColor
if self.outOfPixmap(pos):
# Don't allow the user to draw outside the pixmap.
# Project the point to the pixmap's edges.
pos = self.intersectionPoint(self.current[-1], pos)
elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]):
# Attract line to starting point and colorise to alert the
# user:
pos = self.current[0]
color = self.current.line_color
self.overrideCursor(CURSOR_POINT)
self.current.highlightVertex(0, Shape.NEAR_VERTEX)
if self.drawSquare:
initPos = self.current[0]
minX = initPos.x()
minY = initPos.y()
min_size = min(abs(pos.x() - minX), abs(pos.y() - minY))
directionX = -1 if pos.x() - minX < 0 else 1
directionY = -1 if pos.y() - minY < 0 else 1
self.line[1] = QPointF(minX + directionX * min_size, minY + directionY * min_size)
else:
self.line[1] = pos
self.line.line_color = color
self.prevPoint = QPointF()
self.current.highlightClear()
else:
self.prevPoint = pos
self.repaint()
return
# Polygon copy moving.
if Qt.RightButton & ev.buttons():
if self.selectedShapeCopy and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShapeCopy, pos)
self.repaint()
elif self.selectedShape:
self.selectedShapeCopy = self.selectedShape.copy()
self.repaint()
return
# Polygon/Vertex moving.
if Qt.LeftButton & ev.buttons():
if self.selectedVertex():
self.boundedMoveVertex(pos)
self.shapeMoved.emit()
self.repaint()
elif self.selectedShape and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShape, pos)
self.shapeMoved.emit()
self.repaint()
return
# Just hovering over the canvas, 2 posibilities:
# - Highlight shapes
# - Highlight vertex
# Update shape/vertex fill and tooltip value accordingly.
self.setToolTip("Image")
for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
# Look for a nearby vertex to highlight. If that fails,
# check if we happen to be inside a shape.
index = shape.nearestVertex(pos, self.epsilon)
if index is not None:
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = index, shape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.overrideCursor(CURSOR_POINT)
self.setToolTip("Click & drag to move point")
self.setStatusTip(self.toolTip())
self.update()
break
elif shape.containsPoint(pos):
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = None, shape
self.setToolTip(
"Click & drag to move shape '%s'" % shape.label)
self.setStatusTip(self.toolTip())
self.overrideCursor(CURSOR_GRAB)
self.update()
break
else: # Nothing found, clear highlights, reset state.
if self.hShape:
self.hShape.highlightClear()
self.update()
self.hVertex, self.hShape = None, None
self.overrideCursor(CURSOR_DEFAULT) | Update line with last point and current coordinates. | Below is the the instruction that describes the task:
### Input:
Update line with last point and current coordinates.
### Response:
def mouseMoveEvent(self, ev):
"""Update line with last point and current coordinates."""
pos = self.transformPos(ev.pos())
# Update coordinates in status bar if image is opened
window = self.parent().window()
if window.filePath is not None:
self.parent().window().labelCoordinates.setText(
'X: %d; Y: %d' % (pos.x(), pos.y()))
# Polygon drawing.
if self.drawing():
self.overrideCursor(CURSOR_DRAW)
if self.current:
color = self.drawingLineColor
if self.outOfPixmap(pos):
# Don't allow the user to draw outside the pixmap.
# Project the point to the pixmap's edges.
pos = self.intersectionPoint(self.current[-1], pos)
elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]):
# Attract line to starting point and colorise to alert the
# user:
pos = self.current[0]
color = self.current.line_color
self.overrideCursor(CURSOR_POINT)
self.current.highlightVertex(0, Shape.NEAR_VERTEX)
if self.drawSquare:
initPos = self.current[0]
minX = initPos.x()
minY = initPos.y()
min_size = min(abs(pos.x() - minX), abs(pos.y() - minY))
directionX = -1 if pos.x() - minX < 0 else 1
directionY = -1 if pos.y() - minY < 0 else 1
self.line[1] = QPointF(minX + directionX * min_size, minY + directionY * min_size)
else:
self.line[1] = pos
self.line.line_color = color
self.prevPoint = QPointF()
self.current.highlightClear()
else:
self.prevPoint = pos
self.repaint()
return
# Polygon copy moving.
if Qt.RightButton & ev.buttons():
if self.selectedShapeCopy and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShapeCopy, pos)
self.repaint()
elif self.selectedShape:
self.selectedShapeCopy = self.selectedShape.copy()
self.repaint()
return
# Polygon/Vertex moving.
if Qt.LeftButton & ev.buttons():
if self.selectedVertex():
self.boundedMoveVertex(pos)
self.shapeMoved.emit()
self.repaint()
elif self.selectedShape and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShape, pos)
self.shapeMoved.emit()
self.repaint()
return
# Just hovering over the canvas, 2 posibilities:
# - Highlight shapes
# - Highlight vertex
# Update shape/vertex fill and tooltip value accordingly.
self.setToolTip("Image")
for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
# Look for a nearby vertex to highlight. If that fails,
# check if we happen to be inside a shape.
index = shape.nearestVertex(pos, self.epsilon)
if index is not None:
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = index, shape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.overrideCursor(CURSOR_POINT)
self.setToolTip("Click & drag to move point")
self.setStatusTip(self.toolTip())
self.update()
break
elif shape.containsPoint(pos):
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = None, shape
self.setToolTip(
"Click & drag to move shape '%s'" % shape.label)
self.setStatusTip(self.toolTip())
self.overrideCursor(CURSOR_GRAB)
self.update()
break
else: # Nothing found, clear highlights, reset state.
if self.hShape:
self.hShape.highlightClear()
self.update()
self.hVertex, self.hShape = None, None
self.overrideCursor(CURSOR_DEFAULT) |
def grating_coupler_period(wavelength,
n_eff,
n_clad,
incidence_angle_deg,
diffration_order=1):
'''
Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at.
'''
k0 = 2. * np.pi / wavelength
beta = n_eff.real * k0
n_inc = n_clad
grating_period = (2.*np.pi*diffration_order) \
/ (beta - k0*n_inc*np.sin(np.radians(incidence_angle_deg)))
return grating_period | Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at. | Below is the the instruction that describes the task:
### Input:
Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at.
### Response:
def grating_coupler_period(wavelength,
n_eff,
n_clad,
incidence_angle_deg,
diffration_order=1):
'''
Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at.
'''
k0 = 2. * np.pi / wavelength
beta = n_eff.real * k0
n_inc = n_clad
grating_period = (2.*np.pi*diffration_order) \
/ (beta - k0*n_inc*np.sin(np.radians(incidence_angle_deg)))
return grating_period |
def dump_data(request):
"""Exports data from whole project.
"""
# Try to grab app_label data
app_label = request.GET.get('app_label', [])
if app_label:
app_label = app_label.split(',')
return dump_to_response(request, app_label=app_label,
exclude=settings.SMUGGLER_EXCLUDE_LIST) | Exports data from whole project. | Below is the the instruction that describes the task:
### Input:
Exports data from whole project.
### Response:
def dump_data(request):
"""Exports data from whole project.
"""
# Try to grab app_label data
app_label = request.GET.get('app_label', [])
if app_label:
app_label = app_label.split(',')
return dump_to_response(request, app_label=app_label,
exclude=settings.SMUGGLER_EXCLUDE_LIST) |
def send(self, event):
"""Convert a high-level event into bytes that can be sent to the peer,
while updating our internal state machine.
Args:
event: The :ref:`event <events>` to send.
Returns:
If ``type(event) is ConnectionClosed``, then returns
``None``. Otherwise, returns a :term:`bytes-like object`.
Raises:
LocalProtocolError:
Sending this event at this time would violate our
understanding of the HTTP/1.1 protocol.
If this method raises any exception then it also sets
:attr:`Connection.our_state` to :data:`ERROR` -- see
:ref:`error-handling` for discussion.
"""
data_list = self.send_with_data_passthrough(event)
if data_list is None:
return None
else:
return b"".join(data_list) | Convert a high-level event into bytes that can be sent to the peer,
while updating our internal state machine.
Args:
event: The :ref:`event <events>` to send.
Returns:
If ``type(event) is ConnectionClosed``, then returns
``None``. Otherwise, returns a :term:`bytes-like object`.
Raises:
LocalProtocolError:
Sending this event at this time would violate our
understanding of the HTTP/1.1 protocol.
If this method raises any exception then it also sets
:attr:`Connection.our_state` to :data:`ERROR` -- see
:ref:`error-handling` for discussion. | Below is the the instruction that describes the task:
### Input:
Convert a high-level event into bytes that can be sent to the peer,
while updating our internal state machine.
Args:
event: The :ref:`event <events>` to send.
Returns:
If ``type(event) is ConnectionClosed``, then returns
``None``. Otherwise, returns a :term:`bytes-like object`.
Raises:
LocalProtocolError:
Sending this event at this time would violate our
understanding of the HTTP/1.1 protocol.
If this method raises any exception then it also sets
:attr:`Connection.our_state` to :data:`ERROR` -- see
:ref:`error-handling` for discussion.
### Response:
def send(self, event):
"""Convert a high-level event into bytes that can be sent to the peer,
while updating our internal state machine.
Args:
event: The :ref:`event <events>` to send.
Returns:
If ``type(event) is ConnectionClosed``, then returns
``None``. Otherwise, returns a :term:`bytes-like object`.
Raises:
LocalProtocolError:
Sending this event at this time would violate our
understanding of the HTTP/1.1 protocol.
If this method raises any exception then it also sets
:attr:`Connection.our_state` to :data:`ERROR` -- see
:ref:`error-handling` for discussion.
"""
data_list = self.send_with_data_passthrough(event)
if data_list is None:
return None
else:
return b"".join(data_list) |
def onConnsChanged(self, joined: Set[str], left: Set[str]):
"""
A series of operations to perform once a connection count has changed.
- Set f to max number of failures this system can handle.
- Set status to one of started, started_hungry or starting depending on
the number of protocol instances.
- Check protocol instances. See `checkInstances()`
"""
_prev_status = self.status
if self.isGoing():
if self.connectedNodeCount == self.totalNodes:
self.status = Status.started
elif self.connectedNodeCount >= self.minimumNodes:
self.status = Status.started_hungry
else:
self.status = Status.starting
self.elector.nodeCount = self.connectedNodeCount
if self.master_primary_name in joined:
self.primaries_disconnection_times[self.master_replica.instId] = None
if self.master_primary_name in left:
logger.display('{} lost connection to primary of master'.format(self))
self.lost_master_primary()
elif _prev_status == Status.starting and self.status == Status.started_hungry \
and self.primaries_disconnection_times[self.master_replica.instId] is not None \
and self.master_primary_name is not None:
"""
Such situation may occur if the pool has come back to reachable consensus but
primary is still disconnected, so view change proposal makes sense now.
"""
self._schedule_view_change()
for inst_id, replica in self.replicas.items():
if not replica.isMaster and replica.primaryName is not None:
primary_node_name = replica.primaryName.split(':')[0]
if primary_node_name in joined:
self.primaries_disconnection_times[inst_id] = None
elif primary_node_name in left:
self.primaries_disconnection_times[inst_id] = time.perf_counter()
self._schedule_replica_removal(inst_id)
if self.isReady():
self.checkInstances()
else:
logger.info("{} joined nodes {} but status is {}".format(self, joined, self.status))
# Send ledger status whether ready (connected to enough nodes) or not
for node in joined:
self.send_ledger_status_to_newly_connected_node(node)
for node in left:
self.network_i3pc_watcher.disconnect(node)
for node in joined:
self.network_i3pc_watcher.connect(node) | A series of operations to perform once a connection count has changed.
- Set f to max number of failures this system can handle.
- Set status to one of started, started_hungry or starting depending on
the number of protocol instances.
- Check protocol instances. See `checkInstances()` | Below is the the instruction that describes the task:
### Input:
A series of operations to perform once a connection count has changed.
- Set f to max number of failures this system can handle.
- Set status to one of started, started_hungry or starting depending on
the number of protocol instances.
- Check protocol instances. See `checkInstances()`
### Response:
def onConnsChanged(self, joined: Set[str], left: Set[str]):
"""
A series of operations to perform once a connection count has changed.
- Set f to max number of failures this system can handle.
- Set status to one of started, started_hungry or starting depending on
the number of protocol instances.
- Check protocol instances. See `checkInstances()`
"""
_prev_status = self.status
if self.isGoing():
if self.connectedNodeCount == self.totalNodes:
self.status = Status.started
elif self.connectedNodeCount >= self.minimumNodes:
self.status = Status.started_hungry
else:
self.status = Status.starting
self.elector.nodeCount = self.connectedNodeCount
if self.master_primary_name in joined:
self.primaries_disconnection_times[self.master_replica.instId] = None
if self.master_primary_name in left:
logger.display('{} lost connection to primary of master'.format(self))
self.lost_master_primary()
elif _prev_status == Status.starting and self.status == Status.started_hungry \
and self.primaries_disconnection_times[self.master_replica.instId] is not None \
and self.master_primary_name is not None:
"""
Such situation may occur if the pool has come back to reachable consensus but
primary is still disconnected, so view change proposal makes sense now.
"""
self._schedule_view_change()
for inst_id, replica in self.replicas.items():
if not replica.isMaster and replica.primaryName is not None:
primary_node_name = replica.primaryName.split(':')[0]
if primary_node_name in joined:
self.primaries_disconnection_times[inst_id] = None
elif primary_node_name in left:
self.primaries_disconnection_times[inst_id] = time.perf_counter()
self._schedule_replica_removal(inst_id)
if self.isReady():
self.checkInstances()
else:
logger.info("{} joined nodes {} but status is {}".format(self, joined, self.status))
# Send ledger status whether ready (connected to enough nodes) or not
for node in joined:
self.send_ledger_status_to_newly_connected_node(node)
for node in left:
self.network_i3pc_watcher.disconnect(node)
for node in joined:
self.network_i3pc_watcher.connect(node) |
def add_methods(methods_to_add):
''' use this to bulk add new methods to Generator '''
for i in methods_to_add:
try:
Generator.add_method(*i)
except Exception as ex:
raise Exception('issue adding {} - {}'.format(repr(i), ex)) | use this to bulk add new methods to Generator | Below is the the instruction that describes the task:
### Input:
use this to bulk add new methods to Generator
### Response:
def add_methods(methods_to_add):
''' use this to bulk add new methods to Generator '''
for i in methods_to_add:
try:
Generator.add_method(*i)
except Exception as ex:
raise Exception('issue adding {} - {}'.format(repr(i), ex)) |
def process_action(self, request, queryset):
"""
Publishes the selected objects by passing the value of \
'when' to the object's publish method. The object's \
`purge_archives` method is also called to limit the number \
of old items that we keep around. The action is logged as \
either 'published' or 'scheduled' depending on the value of \
'when', and the user is notified with a message.
Returns a 'render redirect' to the result of the \
`get_done_url` method.
"""
form = self.form(request.POST)
if form.is_valid():
when = form.cleaned_data.get('when')
count = 0
for obj in queryset:
count += 1
obj.publish(user=request.user, when=when)
obj.purge_archives()
object_url = self.get_object_url(obj)
if obj.state == obj.PUBLISHED:
self.log_action(
obj, CMSLog.PUBLISH, url=object_url)
else:
self.log_action(
obj, CMSLog.SCHEDULE, url=object_url)
message = "%s objects published." % count
self.write_message(message=message)
return self.render(request, redirect_url= self.get_done_url(),
message=message,
collect_render_data=False)
return self.render(request, queryset=queryset, publish_form=form, action='Publish') | Publishes the selected objects by passing the value of \
'when' to the object's publish method. The object's \
`purge_archives` method is also called to limit the number \
of old items that we keep around. The action is logged as \
either 'published' or 'scheduled' depending on the value of \
'when', and the user is notified with a message.
Returns a 'render redirect' to the result of the \
`get_done_url` method. | Below is the the instruction that describes the task:
### Input:
Publishes the selected objects by passing the value of \
'when' to the object's publish method. The object's \
`purge_archives` method is also called to limit the number \
of old items that we keep around. The action is logged as \
either 'published' or 'scheduled' depending on the value of \
'when', and the user is notified with a message.
Returns a 'render redirect' to the result of the \
`get_done_url` method.
### Response:
def process_action(self, request, queryset):
"""
Publishes the selected objects by passing the value of \
'when' to the object's publish method. The object's \
`purge_archives` method is also called to limit the number \
of old items that we keep around. The action is logged as \
either 'published' or 'scheduled' depending on the value of \
'when', and the user is notified with a message.
Returns a 'render redirect' to the result of the \
`get_done_url` method.
"""
form = self.form(request.POST)
if form.is_valid():
when = form.cleaned_data.get('when')
count = 0
for obj in queryset:
count += 1
obj.publish(user=request.user, when=when)
obj.purge_archives()
object_url = self.get_object_url(obj)
if obj.state == obj.PUBLISHED:
self.log_action(
obj, CMSLog.PUBLISH, url=object_url)
else:
self.log_action(
obj, CMSLog.SCHEDULE, url=object_url)
message = "%s objects published." % count
self.write_message(message=message)
return self.render(request, redirect_url= self.get_done_url(),
message=message,
collect_render_data=False)
return self.render(request, queryset=queryset, publish_form=form, action='Publish') |
def discrete(self):
"""
Set sequence to be discrete.
:rtype: Column
:Example:
>>> # Table schema is create table test(f1 double, f2 string)
>>> # Original continuity: f1=CONTINUOUS, f2=CONTINUOUS
>>> # Now we want to set ``f1`` and ``f2`` into continuous
>>> new_ds = df.discrete('f1 f2')
"""
field_name = self.name
new_df = copy_df(self)
new_df._perform_operation(op.FieldContinuityOperation({field_name: False}))
return new_df | Set sequence to be discrete.
:rtype: Column
:Example:
>>> # Table schema is create table test(f1 double, f2 string)
>>> # Original continuity: f1=CONTINUOUS, f2=CONTINUOUS
>>> # Now we want to set ``f1`` and ``f2`` into continuous
>>> new_ds = df.discrete('f1 f2') | Below is the the instruction that describes the task:
### Input:
Set sequence to be discrete.
:rtype: Column
:Example:
>>> # Table schema is create table test(f1 double, f2 string)
>>> # Original continuity: f1=CONTINUOUS, f2=CONTINUOUS
>>> # Now we want to set ``f1`` and ``f2`` into continuous
>>> new_ds = df.discrete('f1 f2')
### Response:
def discrete(self):
"""
Set sequence to be discrete.
:rtype: Column
:Example:
>>> # Table schema is create table test(f1 double, f2 string)
>>> # Original continuity: f1=CONTINUOUS, f2=CONTINUOUS
>>> # Now we want to set ``f1`` and ``f2`` into continuous
>>> new_ds = df.discrete('f1 f2')
"""
field_name = self.name
new_df = copy_df(self)
new_df._perform_operation(op.FieldContinuityOperation({field_name: False}))
return new_df |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ExportConfigurationContext for this ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationContext
"""
if self._context is None:
self._context = ExportConfigurationContext(
self._version,
resource_type=self._solution['resource_type'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ExportConfigurationContext for this ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ExportConfigurationContext for this ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ExportConfigurationContext for this ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationContext
"""
if self._context is None:
self._context = ExportConfigurationContext(
self._version,
resource_type=self._solution['resource_type'],
)
return self._context |
def getPeersWithAttrValues(self, attrName, attrValues):
'''
getPeersWithAttrValues - Gets peers (elements on same level) whose attribute given by #attrName
are in the list of possible vaues #attrValues
@param attrName - Name of attribute
@param attrValues - List of possible values which will match
@return - None if no parent element (error condition), otherwise a TagCollection of peers that matched.
'''
peers = self.peers
if peers is None:
return None
return TagCollection([peer for peer in peers if peer.getAttribute(attrName) in attrValues]) | getPeersWithAttrValues - Gets peers (elements on same level) whose attribute given by #attrName
are in the list of possible vaues #attrValues
@param attrName - Name of attribute
@param attrValues - List of possible values which will match
@return - None if no parent element (error condition), otherwise a TagCollection of peers that matched. | Below is the the instruction that describes the task:
### Input:
getPeersWithAttrValues - Gets peers (elements on same level) whose attribute given by #attrName
are in the list of possible vaues #attrValues
@param attrName - Name of attribute
@param attrValues - List of possible values which will match
@return - None if no parent element (error condition), otherwise a TagCollection of peers that matched.
### Response:
def getPeersWithAttrValues(self, attrName, attrValues):
'''
getPeersWithAttrValues - Gets peers (elements on same level) whose attribute given by #attrName
are in the list of possible vaues #attrValues
@param attrName - Name of attribute
@param attrValues - List of possible values which will match
@return - None if no parent element (error condition), otherwise a TagCollection of peers that matched.
'''
peers = self.peers
if peers is None:
return None
return TagCollection([peer for peer in peers if peer.getAttribute(attrName) in attrValues]) |
def checkout(self, revision, options):
"""
Checkout a specific revision.
:param revision: The revision identifier.
:type revision: :class:`Revision`
:param options: Any additional options.
:type options: ``dict``
"""
rev = revision.key
self.repo.git.checkout(rev) | Checkout a specific revision.
:param revision: The revision identifier.
:type revision: :class:`Revision`
:param options: Any additional options.
:type options: ``dict`` | Below is the the instruction that describes the task:
### Input:
Checkout a specific revision.
:param revision: The revision identifier.
:type revision: :class:`Revision`
:param options: Any additional options.
:type options: ``dict``
### Response:
def checkout(self, revision, options):
"""
Checkout a specific revision.
:param revision: The revision identifier.
:type revision: :class:`Revision`
:param options: Any additional options.
:type options: ``dict``
"""
rev = revision.key
self.repo.git.checkout(rev) |
def add_argument(self, *args, **kwargs):
"""Add an argument.
This method adds a new argument to the current parser. The function is
same as ``argparse.ArgumentParser.add_argument``. However, this method
tries to determine help messages for the adding argument from some
docstrings.
If the new arguments belong to some sub commands, the docstring
of a function implements behavior of the sub command has ``Args:`` section,
and defines same name variable, this function sets such
definition to the help message.
Positional Args:
same positional arguments as argparse.ArgumentParser.add_argument.
Keyword Args:
same keywards arguments as argparse.ArgumentParser.add_argument.
"""
if _HELP not in kwargs:
for name in args:
name = name.replace("-", "")
if name in self.__argmap:
kwargs[_HELP] = self.__argmap[name]
break
return super(ArgumentParser, self).add_argument(*args, **kwargs) | Add an argument.
This method adds a new argument to the current parser. The function is
same as ``argparse.ArgumentParser.add_argument``. However, this method
tries to determine help messages for the adding argument from some
docstrings.
If the new arguments belong to some sub commands, the docstring
of a function implements behavior of the sub command has ``Args:`` section,
and defines same name variable, this function sets such
definition to the help message.
Positional Args:
same positional arguments as argparse.ArgumentParser.add_argument.
Keyword Args:
same keywards arguments as argparse.ArgumentParser.add_argument. | Below is the the instruction that describes the task:
### Input:
Add an argument.
This method adds a new argument to the current parser. The function is
same as ``argparse.ArgumentParser.add_argument``. However, this method
tries to determine help messages for the adding argument from some
docstrings.
If the new arguments belong to some sub commands, the docstring
of a function implements behavior of the sub command has ``Args:`` section,
and defines same name variable, this function sets such
definition to the help message.
Positional Args:
same positional arguments as argparse.ArgumentParser.add_argument.
Keyword Args:
same keywards arguments as argparse.ArgumentParser.add_argument.
### Response:
def add_argument(self, *args, **kwargs):
"""Add an argument.
This method adds a new argument to the current parser. The function is
same as ``argparse.ArgumentParser.add_argument``. However, this method
tries to determine help messages for the adding argument from some
docstrings.
If the new arguments belong to some sub commands, the docstring
of a function implements behavior of the sub command has ``Args:`` section,
and defines same name variable, this function sets such
definition to the help message.
Positional Args:
same positional arguments as argparse.ArgumentParser.add_argument.
Keyword Args:
same keywards arguments as argparse.ArgumentParser.add_argument.
"""
if _HELP not in kwargs:
for name in args:
name = name.replace("-", "")
if name in self.__argmap:
kwargs[_HELP] = self.__argmap[name]
break
return super(ArgumentParser, self).add_argument(*args, **kwargs) |
def svd_moments(u, s, v, stachans, event_list, n_svs=2):
"""
Calculate relative moments/amplitudes using singular-value decomposition.
Convert basis vectors calculated by singular value \
decomposition (see the SVD functions in clustering) into relative \
moments.
For more information see the paper by \
`Rubinstein & Ellsworth (2010).
<http://www.bssaonline.org/content/100/5A/1952.short>`_
:type u: list
:param u:
List of the :class:`numpy.ndarray` input basis vectors from the SVD,
one array for each channel used.
:type s: list
:param s:
List of the :class:`numpy.ndarray` of singular values, one array for
each channel.
:type v: list
:param v:
List of :class:`numpy.ndarray` of output basis vectors from SVD, one
array per channel.
:type stachans: list
:param stachans: List of station.channel input
:type event_list: list
:param event_list: List of events for which you have data, such that \
event_list[i] corresponds to stachans[i], U[i] etc. and \
event_list[i][j] corresponds to event j in U[i]. These are a series \
of indexes that map the basis vectors to their relative events and \
channels - if you have every channel for every event generating these \
is trivial (see example).
:type n_svs: int
:param n_svs: Number of singular values to use, defaults to 4.
:returns: M, array of relative moments
:rtype: :class:`numpy.ndarray`
:returns: events_out, list of events that relate to M (in order), \
does not include the magnitude information in the events, see note.
:rtype: :class:`obspy.core.event.event.Event`
.. note:: M is an array of relative moments (or amplitudes), these cannot
be directly compared to true moments without calibration.
.. note:: When comparing this method with the method used for creation
of subspace detectors (Harris 2006) it is important to note that the
input `design set` matrix in Harris contains waveforms as columns,
whereas in Rubinstein & Ellsworth it contains waveforms as rows
(i.e. the transpose of the Harris data matrix). The U and V matrices
are therefore swapped between the two approaches. This is accounted
for in EQcorrscan but may lead to confusion when reviewing the code.
Here we use the Harris approach.
.. rubric:: Example
>>> from eqcorrscan.utils.mag_calc import svd_moments
>>> from obspy import read
>>> import glob
>>> import os
>>> from eqcorrscan.utils.clustering import svd
>>> import numpy as np
>>> # Do the set-up
>>> testing_path = 'eqcorrscan/tests/test_data/similar_events'
>>> stream_files = glob.glob(os.path.join(testing_path, '*'))
>>> stream_list = [read(stream_file) for stream_file in stream_files]
>>> event_list = []
>>> remove_list = [('WHAT2', 'SH1'), ('WV04', 'SHZ'), ('GCSZ', 'EHZ')]
>>> for i, stream in enumerate(stream_list):
... st_list = []
... for tr in stream:
... if (tr.stats.station, tr.stats.channel) not in remove_list:
... stream.remove(tr)
... continue
... tr.detrend('simple')
... tr.filter('bandpass', freqmin=5.0, freqmax=15.0)
... tr.trim(tr.stats.starttime + 40, tr.stats.endtime - 45)
... st_list.append(i)
... event_list.append(st_list) # doctest: +SKIP
>>> event_list = np.asarray(event_list).T.tolist()
>>> SVec, SVal, U, stachans = svd(stream_list=stream_list) # doctest: +SKIP
['GCSZ.EHZ', 'WV04.SHZ', 'WHAT2.SH1']
>>> M, events_out = svd_moments(u=U, s=SVal, v=SVec, stachans=stachans,
... event_list=event_list) # doctest: +SKIP
"""
# Define maximum number of events, will be the width of K
K_width = max([max(ev_list) for ev_list in event_list]) + 1
# Sometimes the randomisation generates a singular matrix - rather than
# attempting to regulerize this matrix I propose undertaking the
# randomisation step a further time
if len(stachans) == 1:
print('Only provided data from one station-channel - '
'will not try to invert')
return u[0][:, 0], event_list[0]
for i, stachan in enumerate(stachans):
k = [] # Small kernel matrix for one station - channel
# Copy the relevant vectors so as not to destroy them
# Here we'll swap into the Rubinstein U and V matrices
U_working = copy.deepcopy(v[i].T)
V_working = copy.deepcopy(u[i])
s_working = copy.deepcopy(s[i].T)
ev_list = event_list[i]
if len(ev_list) > len(U_working):
print('U is : ' + str(U_working.shape))
print('ev_list is len %s' % str(len(ev_list)))
f_dump = open('mag_calc_U_working.pkl', 'wb')
pickle.dump(U_working, f_dump)
f_dump.close()
raise IOError('More events than represented in U')
# Set all non-important singular values to zero
s_working[n_svs:len(s_working)] = 0
s_working = np.diag(s_working)
# Convert to numpy matrices
U_working = np.matrix(U_working)
V_working = np.matrix(V_working)
s_working = np.matrix(s_working)
SVD_weights = U_working[:, 0]
# If all the weights are negative take the abs
if np.all(SVD_weights < 0):
warnings.warn('All weights are negative - flipping them')
SVD_weights = np.abs(SVD_weights)
SVD_weights = np.array(SVD_weights).reshape(-1).tolist()
# Shuffle the SVD_weights prior to pairing - will give one of multiple
# pairwise options - see p1956 of Rubinstein & Ellsworth 2010
# We need to keep the real indexes though, otherwise, if there are
# multiple events with the same weight we will end up with multiple
# -1 values
random_SVD_weights = np.copy(SVD_weights)
# Tack on the indexes
random_SVD_weights = random_SVD_weights.tolist()
random_SVD_weights = [(random_SVD_weights[_i], _i)
for _i in range(len(random_SVD_weights))]
random.shuffle(random_SVD_weights)
# Add the first element to the end so all elements will be paired twice
random_SVD_weights.append(random_SVD_weights[0])
# Take pairs of all the SVD_weights (each weight appears in 2 pairs)
pairs = []
for pair in _pairwise(random_SVD_weights):
pairs.append(pair)
# Deciding values for each place in kernel matrix using the pairs
for pairsIndex in range(len(pairs)):
# We will normalize by the minimum weight
_weights = list(zip(*list(pairs[pairsIndex])))[0]
_indeces = list(zip(*list(pairs[pairsIndex])))[1]
min_weight = min(np.abs(_weights))
max_weight = max(np.abs(_weights))
min_index = _indeces[np.argmin(np.abs(_weights))]
max_index = _indeces[np.argmax(np.abs(_weights))]
row = []
# Working out values for each row of kernel matrix
for j in range(len(SVD_weights)):
if j == max_index:
result = -1
elif j == min_index:
normalised = max_weight / min_weight
result = float(normalised)
else:
result = 0
row.append(result)
# Add each row to the K matrix
k.append(row)
# k is now a square matrix, we need to flesh it out to be K_width
k_filled = np.zeros([len(k), K_width])
for j in range(len(k)):
for l, ev in enumerate(ev_list):
k_filled[j, ev] = k[j][l]
if 'K' not in locals():
K = k_filled
else:
K = np.concatenate([K, k_filled])
# Remove any empty rows
K_nonempty = []
events_out = []
for i in range(0, K_width):
if not np.all(K[:, i] == 0):
K_nonempty.append(K[:, i])
events_out.append(i)
K = np.array(K_nonempty).T
K = K.tolist()
K_width = len(K[0])
# Add an extra row to K, so average moment = 1
K.append(np.ones(K_width) * (1. / K_width))
print("Created Kernel matrix: ")
del row
print('\n'.join([''.join([str(round(float(item), 3)).ljust(6)
for item in row]) for row in K]))
Krounded = np.around(K, decimals=4)
# Create a weighting matrix to put emphasis on the final row.
W = np.matrix(np.identity(len(K)))
# the final element of W = the number of stations*number of events
W[-1, -1] = len(K) - 1
# Make K into a matrix
K = np.matrix(K)
############
# Solve using the weighted least squares equation, K.T is K transpose
Kinv = np.array(np.linalg.inv(K.T * W * K) * K.T * W)
# M are the relative moments of the events
M = Kinv[:, -1]
# XXX TODO This still needs an outlier removal step
return M, events_out | Calculate relative moments/amplitudes using singular-value decomposition.
Convert basis vectors calculated by singular value \
decomposition (see the SVD functions in clustering) into relative \
moments.
For more information see the paper by \
`Rubinstein & Ellsworth (2010).
<http://www.bssaonline.org/content/100/5A/1952.short>`_
:type u: list
:param u:
List of the :class:`numpy.ndarray` input basis vectors from the SVD,
one array for each channel used.
:type s: list
:param s:
List of the :class:`numpy.ndarray` of singular values, one array for
each channel.
:type v: list
:param v:
List of :class:`numpy.ndarray` of output basis vectors from SVD, one
array per channel.
:type stachans: list
:param stachans: List of station.channel input
:type event_list: list
:param event_list: List of events for which you have data, such that \
event_list[i] corresponds to stachans[i], U[i] etc. and \
event_list[i][j] corresponds to event j in U[i]. These are a series \
of indexes that map the basis vectors to their relative events and \
channels - if you have every channel for every event generating these \
is trivial (see example).
:type n_svs: int
:param n_svs: Number of singular values to use, defaults to 4.
:returns: M, array of relative moments
:rtype: :class:`numpy.ndarray`
:returns: events_out, list of events that relate to M (in order), \
does not include the magnitude information in the events, see note.
:rtype: :class:`obspy.core.event.event.Event`
.. note:: M is an array of relative moments (or amplitudes), these cannot
be directly compared to true moments without calibration.
.. note:: When comparing this method with the method used for creation
of subspace detectors (Harris 2006) it is important to note that the
input `design set` matrix in Harris contains waveforms as columns,
whereas in Rubinstein & Ellsworth it contains waveforms as rows
(i.e. the transpose of the Harris data matrix). The U and V matrices
are therefore swapped between the two approaches. This is accounted
for in EQcorrscan but may lead to confusion when reviewing the code.
Here we use the Harris approach.
.. rubric:: Example
>>> from eqcorrscan.utils.mag_calc import svd_moments
>>> from obspy import read
>>> import glob
>>> import os
>>> from eqcorrscan.utils.clustering import svd
>>> import numpy as np
>>> # Do the set-up
>>> testing_path = 'eqcorrscan/tests/test_data/similar_events'
>>> stream_files = glob.glob(os.path.join(testing_path, '*'))
>>> stream_list = [read(stream_file) for stream_file in stream_files]
>>> event_list = []
>>> remove_list = [('WHAT2', 'SH1'), ('WV04', 'SHZ'), ('GCSZ', 'EHZ')]
>>> for i, stream in enumerate(stream_list):
... st_list = []
... for tr in stream:
... if (tr.stats.station, tr.stats.channel) not in remove_list:
... stream.remove(tr)
... continue
... tr.detrend('simple')
... tr.filter('bandpass', freqmin=5.0, freqmax=15.0)
... tr.trim(tr.stats.starttime + 40, tr.stats.endtime - 45)
... st_list.append(i)
... event_list.append(st_list) # doctest: +SKIP
>>> event_list = np.asarray(event_list).T.tolist()
>>> SVec, SVal, U, stachans = svd(stream_list=stream_list) # doctest: +SKIP
['GCSZ.EHZ', 'WV04.SHZ', 'WHAT2.SH1']
>>> M, events_out = svd_moments(u=U, s=SVal, v=SVec, stachans=stachans,
... event_list=event_list) # doctest: +SKIP | Below is the the instruction that describes the task:
### Input:
Calculate relative moments/amplitudes using singular-value decomposition.
Convert basis vectors calculated by singular value \
decomposition (see the SVD functions in clustering) into relative \
moments.
For more information see the paper by \
`Rubinstein & Ellsworth (2010).
<http://www.bssaonline.org/content/100/5A/1952.short>`_
:type u: list
:param u:
List of the :class:`numpy.ndarray` input basis vectors from the SVD,
one array for each channel used.
:type s: list
:param s:
List of the :class:`numpy.ndarray` of singular values, one array for
each channel.
:type v: list
:param v:
List of :class:`numpy.ndarray` of output basis vectors from SVD, one
array per channel.
:type stachans: list
:param stachans: List of station.channel input
:type event_list: list
:param event_list: List of events for which you have data, such that \
event_list[i] corresponds to stachans[i], U[i] etc. and \
event_list[i][j] corresponds to event j in U[i]. These are a series \
of indexes that map the basis vectors to their relative events and \
channels - if you have every channel for every event generating these \
is trivial (see example).
:type n_svs: int
:param n_svs: Number of singular values to use, defaults to 4.
:returns: M, array of relative moments
:rtype: :class:`numpy.ndarray`
:returns: events_out, list of events that relate to M (in order), \
does not include the magnitude information in the events, see note.
:rtype: :class:`obspy.core.event.event.Event`
.. note:: M is an array of relative moments (or amplitudes), these cannot
be directly compared to true moments without calibration.
.. note:: When comparing this method with the method used for creation
of subspace detectors (Harris 2006) it is important to note that the
input `design set` matrix in Harris contains waveforms as columns,
whereas in Rubinstein & Ellsworth it contains waveforms as rows
(i.e. the transpose of the Harris data matrix). The U and V matrices
are therefore swapped between the two approaches. This is accounted
for in EQcorrscan but may lead to confusion when reviewing the code.
Here we use the Harris approach.
.. rubric:: Example
>>> from eqcorrscan.utils.mag_calc import svd_moments
>>> from obspy import read
>>> import glob
>>> import os
>>> from eqcorrscan.utils.clustering import svd
>>> import numpy as np
>>> # Do the set-up
>>> testing_path = 'eqcorrscan/tests/test_data/similar_events'
>>> stream_files = glob.glob(os.path.join(testing_path, '*'))
>>> stream_list = [read(stream_file) for stream_file in stream_files]
>>> event_list = []
>>> remove_list = [('WHAT2', 'SH1'), ('WV04', 'SHZ'), ('GCSZ', 'EHZ')]
>>> for i, stream in enumerate(stream_list):
... st_list = []
... for tr in stream:
... if (tr.stats.station, tr.stats.channel) not in remove_list:
... stream.remove(tr)
... continue
... tr.detrend('simple')
... tr.filter('bandpass', freqmin=5.0, freqmax=15.0)
... tr.trim(tr.stats.starttime + 40, tr.stats.endtime - 45)
... st_list.append(i)
... event_list.append(st_list) # doctest: +SKIP
>>> event_list = np.asarray(event_list).T.tolist()
>>> SVec, SVal, U, stachans = svd(stream_list=stream_list) # doctest: +SKIP
['GCSZ.EHZ', 'WV04.SHZ', 'WHAT2.SH1']
>>> M, events_out = svd_moments(u=U, s=SVal, v=SVec, stachans=stachans,
... event_list=event_list) # doctest: +SKIP
### Response:
def svd_moments(u, s, v, stachans, event_list, n_svs=2):
"""
Calculate relative moments/amplitudes using singular-value decomposition.
Convert basis vectors calculated by singular value \
decomposition (see the SVD functions in clustering) into relative \
moments.
For more information see the paper by \
`Rubinstein & Ellsworth (2010).
<http://www.bssaonline.org/content/100/5A/1952.short>`_
:type u: list
:param u:
List of the :class:`numpy.ndarray` input basis vectors from the SVD,
one array for each channel used.
:type s: list
:param s:
List of the :class:`numpy.ndarray` of singular values, one array for
each channel.
:type v: list
:param v:
List of :class:`numpy.ndarray` of output basis vectors from SVD, one
array per channel.
:type stachans: list
:param stachans: List of station.channel input
:type event_list: list
:param event_list: List of events for which you have data, such that \
event_list[i] corresponds to stachans[i], U[i] etc. and \
event_list[i][j] corresponds to event j in U[i]. These are a series \
of indexes that map the basis vectors to their relative events and \
channels - if you have every channel for every event generating these \
is trivial (see example).
:type n_svs: int
:param n_svs: Number of singular values to use, defaults to 4.
:returns: M, array of relative moments
:rtype: :class:`numpy.ndarray`
:returns: events_out, list of events that relate to M (in order), \
does not include the magnitude information in the events, see note.
:rtype: :class:`obspy.core.event.event.Event`
.. note:: M is an array of relative moments (or amplitudes), these cannot
be directly compared to true moments without calibration.
.. note:: When comparing this method with the method used for creation
of subspace detectors (Harris 2006) it is important to note that the
input `design set` matrix in Harris contains waveforms as columns,
whereas in Rubinstein & Ellsworth it contains waveforms as rows
(i.e. the transpose of the Harris data matrix). The U and V matrices
are therefore swapped between the two approaches. This is accounted
for in EQcorrscan but may lead to confusion when reviewing the code.
Here we use the Harris approach.
.. rubric:: Example
>>> from eqcorrscan.utils.mag_calc import svd_moments
>>> from obspy import read
>>> import glob
>>> import os
>>> from eqcorrscan.utils.clustering import svd
>>> import numpy as np
>>> # Do the set-up
>>> testing_path = 'eqcorrscan/tests/test_data/similar_events'
>>> stream_files = glob.glob(os.path.join(testing_path, '*'))
>>> stream_list = [read(stream_file) for stream_file in stream_files]
>>> event_list = []
>>> remove_list = [('WHAT2', 'SH1'), ('WV04', 'SHZ'), ('GCSZ', 'EHZ')]
>>> for i, stream in enumerate(stream_list):
... st_list = []
... for tr in stream:
... if (tr.stats.station, tr.stats.channel) not in remove_list:
... stream.remove(tr)
... continue
... tr.detrend('simple')
... tr.filter('bandpass', freqmin=5.0, freqmax=15.0)
... tr.trim(tr.stats.starttime + 40, tr.stats.endtime - 45)
... st_list.append(i)
... event_list.append(st_list) # doctest: +SKIP
>>> event_list = np.asarray(event_list).T.tolist()
>>> SVec, SVal, U, stachans = svd(stream_list=stream_list) # doctest: +SKIP
['GCSZ.EHZ', 'WV04.SHZ', 'WHAT2.SH1']
>>> M, events_out = svd_moments(u=U, s=SVal, v=SVec, stachans=stachans,
... event_list=event_list) # doctest: +SKIP
"""
# Define maximum number of events, will be the width of K
K_width = max([max(ev_list) for ev_list in event_list]) + 1
# Sometimes the randomisation generates a singular matrix - rather than
# attempting to regulerize this matrix I propose undertaking the
# randomisation step a further time
if len(stachans) == 1:
print('Only provided data from one station-channel - '
'will not try to invert')
return u[0][:, 0], event_list[0]
for i, stachan in enumerate(stachans):
k = [] # Small kernel matrix for one station - channel
# Copy the relevant vectors so as not to destroy them
# Here we'll swap into the Rubinstein U and V matrices
U_working = copy.deepcopy(v[i].T)
V_working = copy.deepcopy(u[i])
s_working = copy.deepcopy(s[i].T)
ev_list = event_list[i]
if len(ev_list) > len(U_working):
print('U is : ' + str(U_working.shape))
print('ev_list is len %s' % str(len(ev_list)))
f_dump = open('mag_calc_U_working.pkl', 'wb')
pickle.dump(U_working, f_dump)
f_dump.close()
raise IOError('More events than represented in U')
# Set all non-important singular values to zero
s_working[n_svs:len(s_working)] = 0
s_working = np.diag(s_working)
# Convert to numpy matrices
U_working = np.matrix(U_working)
V_working = np.matrix(V_working)
s_working = np.matrix(s_working)
SVD_weights = U_working[:, 0]
# If all the weights are negative take the abs
if np.all(SVD_weights < 0):
warnings.warn('All weights are negative - flipping them')
SVD_weights = np.abs(SVD_weights)
SVD_weights = np.array(SVD_weights).reshape(-1).tolist()
# Shuffle the SVD_weights prior to pairing - will give one of multiple
# pairwise options - see p1956 of Rubinstein & Ellsworth 2010
# We need to keep the real indexes though, otherwise, if there are
# multiple events with the same weight we will end up with multiple
# -1 values
random_SVD_weights = np.copy(SVD_weights)
# Tack on the indexes
random_SVD_weights = random_SVD_weights.tolist()
random_SVD_weights = [(random_SVD_weights[_i], _i)
for _i in range(len(random_SVD_weights))]
random.shuffle(random_SVD_weights)
# Add the first element to the end so all elements will be paired twice
random_SVD_weights.append(random_SVD_weights[0])
# Take pairs of all the SVD_weights (each weight appears in 2 pairs)
pairs = []
for pair in _pairwise(random_SVD_weights):
pairs.append(pair)
# Deciding values for each place in kernel matrix using the pairs
for pairsIndex in range(len(pairs)):
# We will normalize by the minimum weight
_weights = list(zip(*list(pairs[pairsIndex])))[0]
_indeces = list(zip(*list(pairs[pairsIndex])))[1]
min_weight = min(np.abs(_weights))
max_weight = max(np.abs(_weights))
min_index = _indeces[np.argmin(np.abs(_weights))]
max_index = _indeces[np.argmax(np.abs(_weights))]
row = []
# Working out values for each row of kernel matrix
for j in range(len(SVD_weights)):
if j == max_index:
result = -1
elif j == min_index:
normalised = max_weight / min_weight
result = float(normalised)
else:
result = 0
row.append(result)
# Add each row to the K matrix
k.append(row)
# k is now a square matrix, we need to flesh it out to be K_width
k_filled = np.zeros([len(k), K_width])
for j in range(len(k)):
for l, ev in enumerate(ev_list):
k_filled[j, ev] = k[j][l]
if 'K' not in locals():
K = k_filled
else:
K = np.concatenate([K, k_filled])
# Remove any empty rows
K_nonempty = []
events_out = []
for i in range(0, K_width):
if not np.all(K[:, i] == 0):
K_nonempty.append(K[:, i])
events_out.append(i)
K = np.array(K_nonempty).T
K = K.tolist()
K_width = len(K[0])
# Add an extra row to K, so average moment = 1
K.append(np.ones(K_width) * (1. / K_width))
print("Created Kernel matrix: ")
del row
print('\n'.join([''.join([str(round(float(item), 3)).ljust(6)
for item in row]) for row in K]))
Krounded = np.around(K, decimals=4)
# Create a weighting matrix to put emphasis on the final row.
W = np.matrix(np.identity(len(K)))
# the final element of W = the number of stations*number of events
W[-1, -1] = len(K) - 1
# Make K into a matrix
K = np.matrix(K)
############
# Solve using the weighted least squares equation, K.T is K transpose
Kinv = np.array(np.linalg.inv(K.T * W * K) * K.T * W)
# M are the relative moments of the events
M = Kinv[:, -1]
# XXX TODO This still needs an outlier removal step
return M, events_out |
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
"""
Try and return page content in the requested format using selenium
"""
try:
# **TODO**: Find what exception this will throw and catch it and call
# self.driver.execute_script("window.stop()")
# Then still try and get the source from the page
self.driver.set_page_load_timeout(timeout)
self.driver.get(url)
header_data = self.get_selenium_header()
status_code = header_data['status-code']
# Set data to access from script
self.status_code = status_code
self.url = self.driver.current_url
except TimeoutException:
logger.warning("Page timeout: {}".format(url))
try:
scraper_monitor.failed_url(url, 'Timeout')
except (NameError, AttributeError):
# Happens when scraper_monitor is not being used/setup
pass
except Exception:
logger.exception("Unknown problem with scraper_monitor sending a failed url")
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
else:
# If an exception was not thrown then check the http status code
if status_code < 400:
# If the http status code is not an error
return self.driver.page_source
else:
# If http status code is 400 or greater
raise SeleniumHTTPError("Status code >= 400", status_code=status_code) | Try and return page content in the requested format using selenium | Below is the the instruction that describes the task:
### Input:
Try and return page content in the requested format using selenium
### Response:
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
"""
Try and return page content in the requested format using selenium
"""
try:
# **TODO**: Find what exception this will throw and catch it and call
# self.driver.execute_script("window.stop()")
# Then still try and get the source from the page
self.driver.set_page_load_timeout(timeout)
self.driver.get(url)
header_data = self.get_selenium_header()
status_code = header_data['status-code']
# Set data to access from script
self.status_code = status_code
self.url = self.driver.current_url
except TimeoutException:
logger.warning("Page timeout: {}".format(url))
try:
scraper_monitor.failed_url(url, 'Timeout')
except (NameError, AttributeError):
# Happens when scraper_monitor is not being used/setup
pass
except Exception:
logger.exception("Unknown problem with scraper_monitor sending a failed url")
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
else:
# If an exception was not thrown then check the http status code
if status_code < 400:
# If the http status code is not an error
return self.driver.page_source
else:
# If http status code is 400 or greater
raise SeleniumHTTPError("Status code >= 400", status_code=status_code) |
def main():
"""
Handle command line arguments to convert a file to a JSON stream as a
script.
"""
logging.basicConfig(level=logging.INFO)
import argparse
parser = argparse.ArgumentParser(
description="Translate CSV or JSON input to a JSON stream, or verify "
"something that is already a JSON stream."
)
parser.add_argument('input',
help='A CSV, JSON, or JSON stream file to read.')
parser.add_argument('output', nargs='?', default=None,
help="The filename to output to. Recommended extension is .jsons. "
"If omitted, use standard output.")
args = parser.parse_args()
transcode(args.input, args.output) | Handle command line arguments to convert a file to a JSON stream as a
script. | Below is the the instruction that describes the task:
### Input:
Handle command line arguments to convert a file to a JSON stream as a
script.
### Response:
def main():
"""
Handle command line arguments to convert a file to a JSON stream as a
script.
"""
logging.basicConfig(level=logging.INFO)
import argparse
parser = argparse.ArgumentParser(
description="Translate CSV or JSON input to a JSON stream, or verify "
"something that is already a JSON stream."
)
parser.add_argument('input',
help='A CSV, JSON, or JSON stream file to read.')
parser.add_argument('output', nargs='?', default=None,
help="The filename to output to. Recommended extension is .jsons. "
"If omitted, use standard output.")
args = parser.parse_args()
transcode(args.input, args.output) |
def set_intermediates(self, intermediates, betas=None, transition_states=None):
"""Sets up intermediates and specifies whether it's an electrochemical step.
Either provide individual contributions or net contributions. If both are given,
only the net contributions are used.
intermediate_list: list of basestrings
transition_states: list of True and False
electrochemical_steps: list of True and False
betas = list of charge transfer coefficients
net_corrections: A sum of all contributions per intermediate.
"""
self.intermediates = intermediates
self.betas = betas
self.transition_states = transition_states
if self.corrections is None:
self.net_corrections = [0.0 for _ in intermediates]
if not self.betas:
self.betas = [0.0 for _ in intermediates]
if not self.transition_states:
self.transition_states = [False for _ in intermediates]
# check if all lists have same length:
props = [len(self.intermediates), len(self.net_corrections), len(self.transition_states),
len(self.betas)]
if not len(set(props)) <= 1:
raise ValueError('intermediate, net_corrections, transition_states and , '
'betas all have to have the same length')
self.get_corrections()
return(True) | Sets up intermediates and specifies whether it's an electrochemical step.
Either provide individual contributions or net contributions. If both are given,
only the net contributions are used.
intermediate_list: list of basestrings
transition_states: list of True and False
electrochemical_steps: list of True and False
betas = list of charge transfer coefficients
net_corrections: A sum of all contributions per intermediate. | Below is the the instruction that describes the task:
### Input:
Sets up intermediates and specifies whether it's an electrochemical step.
Either provide individual contributions or net contributions. If both are given,
only the net contributions are used.
intermediate_list: list of basestrings
transition_states: list of True and False
electrochemical_steps: list of True and False
betas = list of charge transfer coefficients
net_corrections: A sum of all contributions per intermediate.
### Response:
def set_intermediates(self, intermediates, betas=None, transition_states=None):
"""Sets up intermediates and specifies whether it's an electrochemical step.
Either provide individual contributions or net contributions. If both are given,
only the net contributions are used.
intermediate_list: list of basestrings
transition_states: list of True and False
electrochemical_steps: list of True and False
betas = list of charge transfer coefficients
net_corrections: A sum of all contributions per intermediate.
"""
self.intermediates = intermediates
self.betas = betas
self.transition_states = transition_states
if self.corrections is None:
self.net_corrections = [0.0 for _ in intermediates]
if not self.betas:
self.betas = [0.0 for _ in intermediates]
if not self.transition_states:
self.transition_states = [False for _ in intermediates]
# check if all lists have same length:
props = [len(self.intermediates), len(self.net_corrections), len(self.transition_states),
len(self.betas)]
if not len(set(props)) <= 1:
raise ValueError('intermediate, net_corrections, transition_states and , '
'betas all have to have the same length')
self.get_corrections()
return(True) |
def _create_geonode_uploader_action(self):
"""Create action for Geonode uploader dialog."""
icon = resources_path('img', 'icons', 'geonode.png')
label = tr('Geonode Uploader')
self.action_geonode = QAction(
QIcon(icon), label, self.iface.mainWindow())
self.action_geonode.setStatusTip(label)
self.action_geonode.setWhatsThis(label)
self.action_geonode.triggered.connect(self.show_geonode_uploader)
self.add_action(self.action_geonode, add_to_toolbar=False) | Create action for Geonode uploader dialog. | Below is the the instruction that describes the task:
### Input:
Create action for Geonode uploader dialog.
### Response:
def _create_geonode_uploader_action(self):
"""Create action for Geonode uploader dialog."""
icon = resources_path('img', 'icons', 'geonode.png')
label = tr('Geonode Uploader')
self.action_geonode = QAction(
QIcon(icon), label, self.iface.mainWindow())
self.action_geonode.setStatusTip(label)
self.action_geonode.setWhatsThis(label)
self.action_geonode.triggered.connect(self.show_geonode_uploader)
self.add_action(self.action_geonode, add_to_toolbar=False) |
def template(tem, queue=False, **kwargs):
'''
Execute the information stored in a template file on the minion.
This function does not ask a master for a SLS file to render but
instead directly processes the file at the provided path on the minion.
CLI Example:
.. code-block:: bash
salt '*' state.template '<Path to template on the minion>'
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
context=__context__,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
if not tem.endswith('.sls'):
tem = '{sls}.sls'.format(sls=tem)
high_state, errors = st_.render_state(tem,
kwargs.get('saltenv', ''),
'',
None,
local=True)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
ret = st_.state.call_high(high_state)
_set_retcode(ret, highstate=high_state)
return ret | Execute the information stored in a template file on the minion.
This function does not ask a master for a SLS file to render but
instead directly processes the file at the provided path on the minion.
CLI Example:
.. code-block:: bash
salt '*' state.template '<Path to template on the minion>' | Below is the the instruction that describes the task:
### Input:
Execute the information stored in a template file on the minion.
This function does not ask a master for a SLS file to render but
instead directly processes the file at the provided path on the minion.
CLI Example:
.. code-block:: bash
salt '*' state.template '<Path to template on the minion>'
### Response:
def template(tem, queue=False, **kwargs):
'''
Execute the information stored in a template file on the minion.
This function does not ask a master for a SLS file to render but
instead directly processes the file at the provided path on the minion.
CLI Example:
.. code-block:: bash
salt '*' state.template '<Path to template on the minion>'
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
context=__context__,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
if not tem.endswith('.sls'):
tem = '{sls}.sls'.format(sls=tem)
high_state, errors = st_.render_state(tem,
kwargs.get('saltenv', ''),
'',
None,
local=True)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
ret = st_.state.call_high(high_state)
_set_retcode(ret, highstate=high_state)
return ret |
def shared_memory(attrs=None, where=None):
'''
Return shared_memory information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.shared_memory
'''
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='shared_memory', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'} | Return shared_memory information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.shared_memory | Below is the the instruction that describes the task:
### Input:
Return shared_memory information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.shared_memory
### Response:
def shared_memory(attrs=None, where=None):
'''
Return shared_memory information from osquery
CLI Example:
.. code-block:: bash
salt '*' osquery.shared_memory
'''
if __grains__['os_family'] in ['RedHat', 'Debian']:
return _osquery_cmd(table='shared_memory', attrs=attrs, where=where)
return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'} |
def prepare_ec(self, scaffolds, tour, weights):
"""
Prepare Evolutionary Computation. This converts scaffold names into
indices (integer) in the scaffolds array.
"""
scaffolds_ii = dict((s, i) for i, s in enumerate(scaffolds))
scfs = []
ww = []
for mlg in self.linkage_groups:
w = float(weights[mlg.mapname])
scf = {}
for s, o in tour:
si = scaffolds_ii[s]
scf[si] = self.get_series(mlg.lg, s, orientation=o)
scfs.append(scf)
ww.append(w)
tour = [scaffolds_ii[x] for x, o in tour]
return scfs, tour, ww | Prepare Evolutionary Computation. This converts scaffold names into
indices (integer) in the scaffolds array. | Below is the the instruction that describes the task:
### Input:
Prepare Evolutionary Computation. This converts scaffold names into
indices (integer) in the scaffolds array.
### Response:
def prepare_ec(self, scaffolds, tour, weights):
"""
Prepare Evolutionary Computation. This converts scaffold names into
indices (integer) in the scaffolds array.
"""
scaffolds_ii = dict((s, i) for i, s in enumerate(scaffolds))
scfs = []
ww = []
for mlg in self.linkage_groups:
w = float(weights[mlg.mapname])
scf = {}
for s, o in tour:
si = scaffolds_ii[s]
scf[si] = self.get_series(mlg.lg, s, orientation=o)
scfs.append(scf)
ww.append(w)
tour = [scaffolds_ii[x] for x, o in tour]
return scfs, tour, ww |
async def postback_send(msg: BaseMessage, platform: Platform) -> Response:
"""
Injects the POST body into the FSM as a Postback message.
"""
await platform.inject_message(msg)
return json_response({
'status': 'ok',
}) | Injects the POST body into the FSM as a Postback message. | Below is the the instruction that describes the task:
### Input:
Injects the POST body into the FSM as a Postback message.
### Response:
async def postback_send(msg: BaseMessage, platform: Platform) -> Response:
"""
Injects the POST body into the FSM as a Postback message.
"""
await platform.inject_message(msg)
return json_response({
'status': 'ok',
}) |
def _compute_magnitude(self, rup, C):
"""
Compute the third term of the equation 1:
e1 + b1 * (M-Mh) + b2 * (M-Mh)**2 for M<=Mh
e1 + b3 * (M-Mh) otherwise
"""
m_h = 6.75
b_3 = 0.0
if rup.mag <= m_h:
return C["e1"] + (C['b1'] * (rup.mag - m_h)) +\
(C['b2'] * (rup.mag - m_h) ** 2)
else:
return C["e1"] + (b_3 * (rup.mag - m_h)) | Compute the third term of the equation 1:
e1 + b1 * (M-Mh) + b2 * (M-Mh)**2 for M<=Mh
e1 + b3 * (M-Mh) otherwise | Below is the the instruction that describes the task:
### Input:
Compute the third term of the equation 1:
e1 + b1 * (M-Mh) + b2 * (M-Mh)**2 for M<=Mh
e1 + b3 * (M-Mh) otherwise
### Response:
def _compute_magnitude(self, rup, C):
"""
Compute the third term of the equation 1:
e1 + b1 * (M-Mh) + b2 * (M-Mh)**2 for M<=Mh
e1 + b3 * (M-Mh) otherwise
"""
m_h = 6.75
b_3 = 0.0
if rup.mag <= m_h:
return C["e1"] + (C['b1'] * (rup.mag - m_h)) +\
(C['b2'] * (rup.mag - m_h) ** 2)
else:
return C["e1"] + (b_3 * (rup.mag - m_h)) |
def schedule_hostgroup_host_downtime(self, hostgroup, start_time, end_time, fixed,
trigger_id, duration, author, comment):
"""Schedule a downtime for each host of a hostgroup
Format of the line that triggers function call::
SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
:param hostgroup: hostgroup to schedule
:type hostgroup: alignak.objects.hostgroup.Hostgroup
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed:
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None
"""
for host_id in hostgroup.get_hosts():
if host_id in self.daemon.hosts:
host = self.daemon.hosts[host_id]
self.schedule_host_downtime(host, start_time, end_time, fixed,
trigger_id, duration, author, comment) | Schedule a downtime for each host of a hostgroup
Format of the line that triggers function call::
SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
:param hostgroup: hostgroup to schedule
:type hostgroup: alignak.objects.hostgroup.Hostgroup
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed:
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None | Below is the the instruction that describes the task:
### Input:
Schedule a downtime for each host of a hostgroup
Format of the line that triggers function call::
SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
:param hostgroup: hostgroup to schedule
:type hostgroup: alignak.objects.hostgroup.Hostgroup
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed:
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None
### Response:
def schedule_hostgroup_host_downtime(self, hostgroup, start_time, end_time, fixed,
trigger_id, duration, author, comment):
"""Schedule a downtime for each host of a hostgroup
Format of the line that triggers function call::
SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
:param hostgroup: hostgroup to schedule
:type hostgroup: alignak.objects.hostgroup.Hostgroup
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed:
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None
"""
for host_id in hostgroup.get_hosts():
if host_id in self.daemon.hosts:
host = self.daemon.hosts[host_id]
self.schedule_host_downtime(host, start_time, end_time, fixed,
trigger_id, duration, author, comment) |
def _resolve_type(self, env, type_ref, enforce_fully_defined=False):
"""
Resolves the data type referenced by type_ref.
If `enforce_fully_defined` is True, then the referenced type must be
fully populated (fields, parent_type, ...), and not simply a forward
reference.
"""
loc = type_ref.lineno, type_ref.path
orig_namespace_name = env.namespace_name
if type_ref.ns:
# TODO(kelkabany): If a spec file imports a namespace, it is
# available to all spec files that are part of the same namespace.
# Might want to introduce the concept of an environment specific
# to a file.
if type_ref.ns not in env:
raise InvalidSpec(
'Namespace %s is not imported' % quote(type_ref.ns), *loc)
env = env[type_ref.ns]
if not isinstance(env, Environment):
raise InvalidSpec(
'%s is not a namespace.' % quote(type_ref.ns), *loc)
if type_ref.name not in env:
raise InvalidSpec(
'Symbol %s is undefined.' % quote(type_ref.name), *loc)
obj = env[type_ref.name]
if obj is Void and type_ref.nullable:
raise InvalidSpec('Void cannot be marked nullable.',
*loc)
elif inspect.isclass(obj):
resolved_data_type_args = self._resolve_args(env, type_ref.args)
data_type = self._instantiate_data_type(
obj, resolved_data_type_args, (type_ref.lineno, type_ref.path))
elif isinstance(obj, ApiRoutesByVersion):
raise InvalidSpec('A route cannot be referenced here.',
*loc)
elif type_ref.args[0] or type_ref.args[1]:
# An instance of a type cannot have any additional
# attributes specified.
raise InvalidSpec('Attributes cannot be specified for '
'instantiated type %s.' %
quote(type_ref.name),
*loc)
else:
data_type = env[type_ref.name]
if type_ref.ns:
# Add the source namespace as an import.
namespace = self.api.ensure_namespace(orig_namespace_name)
if isinstance(data_type, UserDefined):
namespace.add_imported_namespace(
self.api.ensure_namespace(type_ref.ns),
imported_data_type=True)
elif isinstance(data_type, Alias):
namespace.add_imported_namespace(
self.api.ensure_namespace(type_ref.ns),
imported_alias=True)
if (enforce_fully_defined and isinstance(data_type, UserDefined) and
data_type._is_forward_ref):
if data_type in self._resolution_in_progress:
raise InvalidSpec(
'Unresolvable circular reference for type %s.' %
quote(type_ref.name), *loc)
self._resolution_in_progress.add(data_type)
if isinstance(data_type, Struct):
self._populate_struct_type_attributes(env, data_type)
elif isinstance(data_type, Union):
self._populate_union_type_attributes(env, data_type)
self._resolution_in_progress.remove(data_type)
if type_ref.nullable:
unwrapped_dt, _ = unwrap_aliases(data_type)
if isinstance(unwrapped_dt, Nullable):
raise InvalidSpec(
'Cannot mark reference to nullable type as nullable.',
*loc)
data_type = Nullable(data_type)
return data_type | Resolves the data type referenced by type_ref.
If `enforce_fully_defined` is True, then the referenced type must be
fully populated (fields, parent_type, ...), and not simply a forward
reference. | Below is the the instruction that describes the task:
### Input:
Resolves the data type referenced by type_ref.
If `enforce_fully_defined` is True, then the referenced type must be
fully populated (fields, parent_type, ...), and not simply a forward
reference.
### Response:
def _resolve_type(self, env, type_ref, enforce_fully_defined=False):
"""
Resolves the data type referenced by type_ref.
If `enforce_fully_defined` is True, then the referenced type must be
fully populated (fields, parent_type, ...), and not simply a forward
reference.
"""
loc = type_ref.lineno, type_ref.path
orig_namespace_name = env.namespace_name
if type_ref.ns:
# TODO(kelkabany): If a spec file imports a namespace, it is
# available to all spec files that are part of the same namespace.
# Might want to introduce the concept of an environment specific
# to a file.
if type_ref.ns not in env:
raise InvalidSpec(
'Namespace %s is not imported' % quote(type_ref.ns), *loc)
env = env[type_ref.ns]
if not isinstance(env, Environment):
raise InvalidSpec(
'%s is not a namespace.' % quote(type_ref.ns), *loc)
if type_ref.name not in env:
raise InvalidSpec(
'Symbol %s is undefined.' % quote(type_ref.name), *loc)
obj = env[type_ref.name]
if obj is Void and type_ref.nullable:
raise InvalidSpec('Void cannot be marked nullable.',
*loc)
elif inspect.isclass(obj):
resolved_data_type_args = self._resolve_args(env, type_ref.args)
data_type = self._instantiate_data_type(
obj, resolved_data_type_args, (type_ref.lineno, type_ref.path))
elif isinstance(obj, ApiRoutesByVersion):
raise InvalidSpec('A route cannot be referenced here.',
*loc)
elif type_ref.args[0] or type_ref.args[1]:
# An instance of a type cannot have any additional
# attributes specified.
raise InvalidSpec('Attributes cannot be specified for '
'instantiated type %s.' %
quote(type_ref.name),
*loc)
else:
data_type = env[type_ref.name]
if type_ref.ns:
# Add the source namespace as an import.
namespace = self.api.ensure_namespace(orig_namespace_name)
if isinstance(data_type, UserDefined):
namespace.add_imported_namespace(
self.api.ensure_namespace(type_ref.ns),
imported_data_type=True)
elif isinstance(data_type, Alias):
namespace.add_imported_namespace(
self.api.ensure_namespace(type_ref.ns),
imported_alias=True)
if (enforce_fully_defined and isinstance(data_type, UserDefined) and
data_type._is_forward_ref):
if data_type in self._resolution_in_progress:
raise InvalidSpec(
'Unresolvable circular reference for type %s.' %
quote(type_ref.name), *loc)
self._resolution_in_progress.add(data_type)
if isinstance(data_type, Struct):
self._populate_struct_type_attributes(env, data_type)
elif isinstance(data_type, Union):
self._populate_union_type_attributes(env, data_type)
self._resolution_in_progress.remove(data_type)
if type_ref.nullable:
unwrapped_dt, _ = unwrap_aliases(data_type)
if isinstance(unwrapped_dt, Nullable):
raise InvalidSpec(
'Cannot mark reference to nullable type as nullable.',
*loc)
data_type = Nullable(data_type)
return data_type |
def _check_running_services(services):
"""Check that the services dict provided is actually running and provide
a list of (service, boolean) tuples for each service.
Returns both a zipped list of (service, boolean) and a list of booleans
in the same order as the services.
@param services: OrderedDict of strings: [ports], one for each service to
check.
@returns [(service, boolean), ...], : results for checks
[boolean] : just the result of the service checks
"""
services_running = [service_running(s) for s in services]
return list(zip(services, services_running)), services_running | Check that the services dict provided is actually running and provide
a list of (service, boolean) tuples for each service.
Returns both a zipped list of (service, boolean) and a list of booleans
in the same order as the services.
@param services: OrderedDict of strings: [ports], one for each service to
check.
@returns [(service, boolean), ...], : results for checks
[boolean] : just the result of the service checks | Below is the the instruction that describes the task:
### Input:
Check that the services dict provided is actually running and provide
a list of (service, boolean) tuples for each service.
Returns both a zipped list of (service, boolean) and a list of booleans
in the same order as the services.
@param services: OrderedDict of strings: [ports], one for each service to
check.
@returns [(service, boolean), ...], : results for checks
[boolean] : just the result of the service checks
### Response:
def _check_running_services(services):
"""Check that the services dict provided is actually running and provide
a list of (service, boolean) tuples for each service.
Returns both a zipped list of (service, boolean) and a list of booleans
in the same order as the services.
@param services: OrderedDict of strings: [ports], one for each service to
check.
@returns [(service, boolean), ...], : results for checks
[boolean] : just the result of the service checks
"""
services_running = [service_running(s) for s in services]
return list(zip(services, services_running)), services_running |
def majorMinorVersion(sparkVersion):
"""
Given a Spark version string, return the (major version number, minor version number).
E.g., for 2.0.1-SNAPSHOT, return (2, 0).
>>> sparkVersion = "2.4.0"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 4)
>>> sparkVersion = "2.3.0-SNAPSHOT"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 3)
"""
m = re.search(r'^(\d+)\.(\d+)(\..*)?$', sparkVersion)
if m is not None:
return (int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Spark tried to parse '%s' as a Spark" % sparkVersion +
" version string, but it could not find the major and minor" +
" version numbers.") | Given a Spark version string, return the (major version number, minor version number).
E.g., for 2.0.1-SNAPSHOT, return (2, 0).
>>> sparkVersion = "2.4.0"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 4)
>>> sparkVersion = "2.3.0-SNAPSHOT"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 3) | Below is the the instruction that describes the task:
### Input:
Given a Spark version string, return the (major version number, minor version number).
E.g., for 2.0.1-SNAPSHOT, return (2, 0).
>>> sparkVersion = "2.4.0"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 4)
>>> sparkVersion = "2.3.0-SNAPSHOT"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 3)
### Response:
def majorMinorVersion(sparkVersion):
"""
Given a Spark version string, return the (major version number, minor version number).
E.g., for 2.0.1-SNAPSHOT, return (2, 0).
>>> sparkVersion = "2.4.0"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 4)
>>> sparkVersion = "2.3.0-SNAPSHOT"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 3)
"""
m = re.search(r'^(\d+)\.(\d+)(\..*)?$', sparkVersion)
if m is not None:
return (int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Spark tried to parse '%s' as a Spark" % sparkVersion +
" version string, but it could not find the major and minor" +
" version numbers.") |
def get_default_commands(self):
"""
Returns the default commands of this application.
:rtype: list[cleo.Command]
"""
commands = Application.get_default_commands(self)
self.add(ConstantsCommand())
self.add(LoaderCommand())
self.add(PyStratumCommand())
self.add(WrapperCommand())
return commands | Returns the default commands of this application.
:rtype: list[cleo.Command] | Below is the the instruction that describes the task:
### Input:
Returns the default commands of this application.
:rtype: list[cleo.Command]
### Response:
def get_default_commands(self):
"""
Returns the default commands of this application.
:rtype: list[cleo.Command]
"""
commands = Application.get_default_commands(self)
self.add(ConstantsCommand())
self.add(LoaderCommand())
self.add(PyStratumCommand())
self.add(WrapperCommand())
return commands |
def fromstr(cls, s, *, strict=True):
"""
Construct a JID out of a string containing it.
:param s: The string to parse.
:type s: :class:`str`
:param strict: Whether to enable strict parsing.
:type strict: :class:`bool`
:raises: See :class:`JID`
:return: The parsed JID
:rtype: :class:`JID`
See the :class:`JID` class level documentation for the semantics of
`strict`.
"""
nodedomain, sep, resource = s.partition("/")
if not sep:
resource = None
localpart, sep, domain = nodedomain.partition("@")
if not sep:
domain = localpart
localpart = None
return cls(localpart, domain, resource, strict=strict) | Construct a JID out of a string containing it.
:param s: The string to parse.
:type s: :class:`str`
:param strict: Whether to enable strict parsing.
:type strict: :class:`bool`
:raises: See :class:`JID`
:return: The parsed JID
:rtype: :class:`JID`
See the :class:`JID` class level documentation for the semantics of
`strict`. | Below is the the instruction that describes the task:
### Input:
Construct a JID out of a string containing it.
:param s: The string to parse.
:type s: :class:`str`
:param strict: Whether to enable strict parsing.
:type strict: :class:`bool`
:raises: See :class:`JID`
:return: The parsed JID
:rtype: :class:`JID`
See the :class:`JID` class level documentation for the semantics of
`strict`.
### Response:
def fromstr(cls, s, *, strict=True):
"""
Construct a JID out of a string containing it.
:param s: The string to parse.
:type s: :class:`str`
:param strict: Whether to enable strict parsing.
:type strict: :class:`bool`
:raises: See :class:`JID`
:return: The parsed JID
:rtype: :class:`JID`
See the :class:`JID` class level documentation for the semantics of
`strict`.
"""
nodedomain, sep, resource = s.partition("/")
if not sep:
resource = None
localpart, sep, domain = nodedomain.partition("@")
if not sep:
domain = localpart
localpart = None
return cls(localpart, domain, resource, strict=strict) |
def copy(self):
"""
Return a copy of this ProtoFeed, that is, a feed with all the
same attributes.
"""
other = ProtoFeed()
for key in cs.PROTOFEED_ATTRS:
value = getattr(self, key)
if isinstance(value, pd.DataFrame):
# Pandas copy DataFrame
value = value.copy()
setattr(other, key, value)
return other | Return a copy of this ProtoFeed, that is, a feed with all the
same attributes. | Below is the the instruction that describes the task:
### Input:
Return a copy of this ProtoFeed, that is, a feed with all the
same attributes.
### Response:
def copy(self):
"""
Return a copy of this ProtoFeed, that is, a feed with all the
same attributes.
"""
other = ProtoFeed()
for key in cs.PROTOFEED_ATTRS:
value = getattr(self, key)
if isinstance(value, pd.DataFrame):
# Pandas copy DataFrame
value = value.copy()
setattr(other, key, value)
return other |
def bands(self):
"""
Bandpasses for which StarPopulation has magnitude data
"""
bands = []
for c in self.stars.columns:
if re.search('_mag',c):
bands.append(c)
return bands | Bandpasses for which StarPopulation has magnitude data | Below is the the instruction that describes the task:
### Input:
Bandpasses for which StarPopulation has magnitude data
### Response:
def bands(self):
"""
Bandpasses for which StarPopulation has magnitude data
"""
bands = []
for c in self.stars.columns:
if re.search('_mag',c):
bands.append(c)
return bands |
def changes(self, columns=None, recurse=True, flags=0, inflated=False):
"""
Returns a dictionary of changes that have been made
to the data from this record.
:return { <orb.Column>: ( <variant> old, <variant> new), .. }
"""
output = {}
is_record = self.isRecord()
schema = self.schema()
columns = [schema.column(c) for c in columns] if columns else \
schema.columns(recurse=recurse, flags=flags).values()
context = self.context(inflated=inflated)
with ReadLocker(self.__dataLock):
for col in columns:
old, curr = self.__values.get(col.name(), (None, None))
if col.testFlag(col.Flags.ReadOnly):
continue
elif not is_record:
old = None
check_old = col.restore(old, context)
check_curr = col.restore(curr, context)
try:
different = check_old != check_curr
except StandardError:
different = True
if different:
output[col] = (check_old, check_curr)
return output | Returns a dictionary of changes that have been made
to the data from this record.
:return { <orb.Column>: ( <variant> old, <variant> new), .. } | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of changes that have been made
to the data from this record.
:return { <orb.Column>: ( <variant> old, <variant> new), .. }
### Response:
def changes(self, columns=None, recurse=True, flags=0, inflated=False):
"""
Returns a dictionary of changes that have been made
to the data from this record.
:return { <orb.Column>: ( <variant> old, <variant> new), .. }
"""
output = {}
is_record = self.isRecord()
schema = self.schema()
columns = [schema.column(c) for c in columns] if columns else \
schema.columns(recurse=recurse, flags=flags).values()
context = self.context(inflated=inflated)
with ReadLocker(self.__dataLock):
for col in columns:
old, curr = self.__values.get(col.name(), (None, None))
if col.testFlag(col.Flags.ReadOnly):
continue
elif not is_record:
old = None
check_old = col.restore(old, context)
check_curr = col.restore(curr, context)
try:
different = check_old != check_curr
except StandardError:
different = True
if different:
output[col] = (check_old, check_curr)
return output |
def im_set_topic(self, room_id, topic, **kwargs):
"""Sets the topic for the direct message"""
return self.__call_api_post('im.setTopic', roomId=room_id, topic=topic, kwargs=kwargs) | Sets the topic for the direct message | Below is the the instruction that describes the task:
### Input:
Sets the topic for the direct message
### Response:
def im_set_topic(self, room_id, topic, **kwargs):
"""Sets the topic for the direct message"""
return self.__call_api_post('im.setTopic', roomId=room_id, topic=topic, kwargs=kwargs) |
def _write_enums(self, entity_name, attributes):
""" This method writes the ouput for a particular specification.
"""
self.enum_attrs_for_locale[entity_name] = attributes;
for attribute in attributes:
enum_name = "%s%sEnum" % (entity_name, attribute.name[0].upper() + attribute.name[1:])
self.enum_list.append(enum_name)
filename = "%s%s.js" % (self._class_prefix, enum_name)
self.write(destination = self.enum_directory,
filename=filename,
template_name="enum.js.tpl",
class_prefix = self._class_prefix,
enum_name = enum_name,
allowed_choices = set(attribute.allowed_choices)) | This method writes the ouput for a particular specification. | Below is the the instruction that describes the task:
### Input:
This method writes the ouput for a particular specification.
### Response:
def _write_enums(self, entity_name, attributes):
""" This method writes the ouput for a particular specification.
"""
self.enum_attrs_for_locale[entity_name] = attributes;
for attribute in attributes:
enum_name = "%s%sEnum" % (entity_name, attribute.name[0].upper() + attribute.name[1:])
self.enum_list.append(enum_name)
filename = "%s%s.js" % (self._class_prefix, enum_name)
self.write(destination = self.enum_directory,
filename=filename,
template_name="enum.js.tpl",
class_prefix = self._class_prefix,
enum_name = enum_name,
allowed_choices = set(attribute.allowed_choices)) |
def read_sql(sql, con, filePath, index_col=None, coerce_float=True,
params=None, parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrameModel.
Provide a filePath argument in addition to the *args/**kwargs from
pandas.read_sql and get a DataFrameModel.
NOTE: The chunksize option is overridden to None always (for now).
Reference:
http://pandas.pydata.org/pandas-docs/version/0.18.1/generated/pandas.read_sql.html
pandas.read_sql(sql, con, index_col=None, coerce_float=True,
params=None, parse_dates=None, columns=None, chunksize=None)
:return: DataFrameModel
"""
# TODO: Decide if chunksize is worth keeping and how to handle?
df = pandas.read_sql(sql, con, index_col, coerce_float,
params, parse_dates, columns, chunksize=None)
return DataFrameModel(df, filePath=filePath) | Read SQL query or database table into a DataFrameModel.
Provide a filePath argument in addition to the *args/**kwargs from
pandas.read_sql and get a DataFrameModel.
NOTE: The chunksize option is overridden to None always (for now).
Reference:
http://pandas.pydata.org/pandas-docs/version/0.18.1/generated/pandas.read_sql.html
pandas.read_sql(sql, con, index_col=None, coerce_float=True,
params=None, parse_dates=None, columns=None, chunksize=None)
:return: DataFrameModel | Below is the the instruction that describes the task:
### Input:
Read SQL query or database table into a DataFrameModel.
Provide a filePath argument in addition to the *args/**kwargs from
pandas.read_sql and get a DataFrameModel.
NOTE: The chunksize option is overridden to None always (for now).
Reference:
http://pandas.pydata.org/pandas-docs/version/0.18.1/generated/pandas.read_sql.html
pandas.read_sql(sql, con, index_col=None, coerce_float=True,
params=None, parse_dates=None, columns=None, chunksize=None)
:return: DataFrameModel
### Response:
def read_sql(sql, con, filePath, index_col=None, coerce_float=True,
params=None, parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrameModel.
Provide a filePath argument in addition to the *args/**kwargs from
pandas.read_sql and get a DataFrameModel.
NOTE: The chunksize option is overridden to None always (for now).
Reference:
http://pandas.pydata.org/pandas-docs/version/0.18.1/generated/pandas.read_sql.html
pandas.read_sql(sql, con, index_col=None, coerce_float=True,
params=None, parse_dates=None, columns=None, chunksize=None)
:return: DataFrameModel
"""
# TODO: Decide if chunksize is worth keeping and how to handle?
df = pandas.read_sql(sql, con, index_col, coerce_float,
params, parse_dates, columns, chunksize=None)
return DataFrameModel(df, filePath=filePath) |
def print_raw_data_file(input_file, start_index=0, limit=200, flavor='fei4b', select=None, tdc_trig_dist=False, trigger_data_mode=0, meta_data_v2=True):
"""Printing FEI4 data from raw data file for debugging.
"""
with tb.open_file(input_file + '.h5', mode="r") as file_h5:
if meta_data_v2:
index_start = file_h5.root.meta_data.read(field='index_start')
index_stop = file_h5.root.meta_data.read(field='index_stop')
else:
index_start = file_h5.root.meta_data.read(field='start_index')
index_stop = file_h5.root.meta_data.read(field='stop_index')
total_words = 0
for read_out_index, (index_start, index_stop) in enumerate(np.column_stack((index_start, index_stop))):
if start_index < index_stop:
print "\nchunk %d with length %d (from index %d to %d)\n" % (read_out_index, (index_stop - index_start), index_start, index_stop)
raw_data = file_h5.root.raw_data.read(index_start, index_stop)
total_words += print_raw_data(raw_data=raw_data, start_index=max(start_index - index_start, 0), limit=limit - total_words, flavor=flavor, index_offset=index_start, select=select, tdc_trig_dist=tdc_trig_dist, trigger_data_mode=trigger_data_mode)
if limit and total_words >= limit:
break | Printing FEI4 data from raw data file for debugging. | Below is the the instruction that describes the task:
### Input:
Printing FEI4 data from raw data file for debugging.
### Response:
def print_raw_data_file(input_file, start_index=0, limit=200, flavor='fei4b', select=None, tdc_trig_dist=False, trigger_data_mode=0, meta_data_v2=True):
"""Printing FEI4 data from raw data file for debugging.
"""
with tb.open_file(input_file + '.h5', mode="r") as file_h5:
if meta_data_v2:
index_start = file_h5.root.meta_data.read(field='index_start')
index_stop = file_h5.root.meta_data.read(field='index_stop')
else:
index_start = file_h5.root.meta_data.read(field='start_index')
index_stop = file_h5.root.meta_data.read(field='stop_index')
total_words = 0
for read_out_index, (index_start, index_stop) in enumerate(np.column_stack((index_start, index_stop))):
if start_index < index_stop:
print "\nchunk %d with length %d (from index %d to %d)\n" % (read_out_index, (index_stop - index_start), index_start, index_stop)
raw_data = file_h5.root.raw_data.read(index_start, index_stop)
total_words += print_raw_data(raw_data=raw_data, start_index=max(start_index - index_start, 0), limit=limit - total_words, flavor=flavor, index_offset=index_start, select=select, tdc_trig_dist=tdc_trig_dist, trigger_data_mode=trigger_data_mode)
if limit and total_words >= limit:
break |
def select_delim(self, delim):
'''Select desired delimeter
Args:
delim: The delimeter character you want.
Returns:
None
Raises:
RuntimeError: Delimeter too long.
'''
size = len(delim)
if size > 20:
raise RuntimeError('Delimeter too long')
n1 = size/10
n2 = size%10
self.send('^SS'+chr(n1)+chr(n2)) | Select desired delimeter
Args:
delim: The delimeter character you want.
Returns:
None
Raises:
RuntimeError: Delimeter too long. | Below is the the instruction that describes the task:
### Input:
Select desired delimeter
Args:
delim: The delimeter character you want.
Returns:
None
Raises:
RuntimeError: Delimeter too long.
### Response:
def select_delim(self, delim):
'''Select desired delimeter
Args:
delim: The delimeter character you want.
Returns:
None
Raises:
RuntimeError: Delimeter too long.
'''
size = len(delim)
if size > 20:
raise RuntimeError('Delimeter too long')
n1 = size/10
n2 = size%10
self.send('^SS'+chr(n1)+chr(n2)) |
def checksum(value):
"""
Calculates the checksum char used for the 16th char.
Author: Vincenzo Palazzo
"""
return chr(65 + sum(CHECKSUM_TABLE[index % 2][ALPHANUMERICS_DICT[char]]
for index, char in enumerate(value)) % 26) | Calculates the checksum char used for the 16th char.
Author: Vincenzo Palazzo | Below is the the instruction that describes the task:
### Input:
Calculates the checksum char used for the 16th char.
Author: Vincenzo Palazzo
### Response:
def checksum(value):
"""
Calculates the checksum char used for the 16th char.
Author: Vincenzo Palazzo
"""
return chr(65 + sum(CHECKSUM_TABLE[index % 2][ALPHANUMERICS_DICT[char]]
for index, char in enumerate(value)) % 26) |
def write_dot(build_context, conf: Config, out_f):
"""Write build graph in dot format to `out_f` file-like object."""
not_buildenv_targets = get_not_buildenv_targets(build_context)
prebuilt_targets = get_prebuilt_targets(build_context)
out_f.write('strict digraph {\n')
for node in build_context.target_graph.nodes:
if conf.show_buildenv_deps or node in not_buildenv_targets:
cached = node in prebuilt_targets
fillcolor = 'fillcolor="grey",style=filled' if cached else ''
color = TARGETS_COLORS.get(
build_context.targets[node].builder_name, 'black')
out_f.write(' "{}" [color="{}",{}];\n'.format(node, color,
fillcolor))
out_f.writelines(' "{}" -> "{}";\n'.format(u, v)
for u, v in build_context.target_graph.edges
if conf.show_buildenv_deps or
(u in not_buildenv_targets and v in not_buildenv_targets))
out_f.write('}\n\n') | Write build graph in dot format to `out_f` file-like object. | Below is the the instruction that describes the task:
### Input:
Write build graph in dot format to `out_f` file-like object.
### Response:
def write_dot(build_context, conf: Config, out_f):
"""Write build graph in dot format to `out_f` file-like object."""
not_buildenv_targets = get_not_buildenv_targets(build_context)
prebuilt_targets = get_prebuilt_targets(build_context)
out_f.write('strict digraph {\n')
for node in build_context.target_graph.nodes:
if conf.show_buildenv_deps or node in not_buildenv_targets:
cached = node in prebuilt_targets
fillcolor = 'fillcolor="grey",style=filled' if cached else ''
color = TARGETS_COLORS.get(
build_context.targets[node].builder_name, 'black')
out_f.write(' "{}" [color="{}",{}];\n'.format(node, color,
fillcolor))
out_f.writelines(' "{}" -> "{}";\n'.format(u, v)
for u, v in build_context.target_graph.edges
if conf.show_buildenv_deps or
(u in not_buildenv_targets and v in not_buildenv_targets))
out_f.write('}\n\n') |
def from_json(self, json: Map) -> Maybe[Project]:
''' Try to instantiate a Project from the given json object.
Convert the **type** key to **tpe** and its value to
Maybe.
Make sure **root** is a directory, fall back to resolution
by **tpe/name**.
Reinsert the root dir into the json dict, filter out all keys
that aren't contained in Project's fields.
Try to instantiate.
'''
root = json.get('root')\
.map(mkpath)\
.or_else(
json.get_all('type', 'name')
.flat_map2(self.resolver.type_name))
valid_fields = root\
.map(lambda a: json ** Map(root=a, tpe=json.get('type')))\
.map(lambda a: a.at(*Project._pclass_fields))
return Try(lambda: valid_fields.map(lambda kw: Project(**kw))) | Empty() | Try to instantiate a Project from the given json object.
Convert the **type** key to **tpe** and its value to
Maybe.
Make sure **root** is a directory, fall back to resolution
by **tpe/name**.
Reinsert the root dir into the json dict, filter out all keys
that aren't contained in Project's fields.
Try to instantiate. | Below is the the instruction that describes the task:
### Input:
Try to instantiate a Project from the given json object.
Convert the **type** key to **tpe** and its value to
Maybe.
Make sure **root** is a directory, fall back to resolution
by **tpe/name**.
Reinsert the root dir into the json dict, filter out all keys
that aren't contained in Project's fields.
Try to instantiate.
### Response:
def from_json(self, json: Map) -> Maybe[Project]:
''' Try to instantiate a Project from the given json object.
Convert the **type** key to **tpe** and its value to
Maybe.
Make sure **root** is a directory, fall back to resolution
by **tpe/name**.
Reinsert the root dir into the json dict, filter out all keys
that aren't contained in Project's fields.
Try to instantiate.
'''
root = json.get('root')\
.map(mkpath)\
.or_else(
json.get_all('type', 'name')
.flat_map2(self.resolver.type_name))
valid_fields = root\
.map(lambda a: json ** Map(root=a, tpe=json.get('type')))\
.map(lambda a: a.at(*Project._pclass_fields))
return Try(lambda: valid_fields.map(lambda kw: Project(**kw))) | Empty() |
def popitem(self, last=True): # pylint: disable=arguments-differ
u"""*x.popitem() → (k, v)*
Remove and return the most recently added item as a (key, value) pair
if *last* is True, else the least recently added item.
:raises KeyError: if *x* is empty.
"""
if not self:
raise KeyError('mapping is empty')
key = next((reversed if last else iter)(self))
val = self._pop(key)
return key, val | u"""*x.popitem() → (k, v)*
Remove and return the most recently added item as a (key, value) pair
if *last* is True, else the least recently added item.
:raises KeyError: if *x* is empty. | Below is the the instruction that describes the task:
### Input:
u"""*x.popitem() → (k, v)*
Remove and return the most recently added item as a (key, value) pair
if *last* is True, else the least recently added item.
:raises KeyError: if *x* is empty.
### Response:
def popitem(self, last=True): # pylint: disable=arguments-differ
u"""*x.popitem() → (k, v)*
Remove and return the most recently added item as a (key, value) pair
if *last* is True, else the least recently added item.
:raises KeyError: if *x* is empty.
"""
if not self:
raise KeyError('mapping is empty')
key = next((reversed if last else iter)(self))
val = self._pop(key)
return key, val |
def map_attribute_to_seq(self,
attribute: str,
key_attribute: str,
value_attribute: Optional[str] = None) -> None:
"""Converts a mapping attribute to a sequence.
This function takes an attribute of this Node whose value \
is a mapping or a mapping of mappings and turns it into a \
sequence of mappings. Each entry in the original mapping is \
converted to an entry in the list. If only a key attribute is \
given, then each entry in the original mapping must map to a \
(sub)mapping. This submapping becomes the corresponding list \
entry, with the key added to it as an additional attribute. If a \
value attribute is also given, then an entry in the original \
mapping may map to any object. If the mapped-to object is a \
mapping, the conversion is as before, otherwise a new \
submapping is created, and key and value are added using the \
given key and value attribute names.
An example probably helps. If you have a Node representing \
this piece of YAML::
items:
item1:
description: Basic widget
price: 100.0
item2:
description: Premium quality widget
price: 200.0
and call map_attribute_to_seq('items', 'item_id'), then the \
Node will be modified to represent this::
items:
- item_id: item1
description: Basic widget
price: 100.0
- item_id: item2
description: Premium quality widget
price: 200.0
which once converted to an object is often easier to deal with \
in code.
Slightly more complicated, this YAML::
items:
item1: Basic widget
item2:
description: Premium quality widget
price: 200.0
when passed through map_attribute_to_seq('items', 'item_id', \
'description'), will result in th equivalent of::
items:
- item_id: item1
description: Basic widget
- item_id: item2
description: Premium quality widget
price: 200.0
If the attribute does not exist, or is not a mapping, this \
function will silently do nothing.
With thanks to the makers of the Common Workflow Language for \
the idea.
Args:
attribute: Name of the attribute whose value to modify.
key_attribute: Name of the new attribute in each item to \
add with the value of the key.
value_attribute: Name of the new attribute in each item to \
add with the value of the key.
"""
if not self.has_attribute(attribute):
return
attr_node = self.get_attribute(attribute)
if not attr_node.is_mapping():
return
start_mark = attr_node.yaml_node.start_mark
end_mark = attr_node.yaml_node.end_mark
object_list = []
for item_key, item_value in attr_node.yaml_node.value:
item_value_node = Node(item_value)
if not item_value_node.is_mapping():
if value_attribute is None:
return
ynode = item_value_node.yaml_node
item_value_node.make_mapping()
item_value_node.set_attribute(value_attribute, ynode)
item_value_node.set_attribute(key_attribute, item_key.value)
object_list.append(item_value_node.yaml_node)
seq_node = yaml.SequenceNode('tag:yaml.org,2002:seq', object_list,
start_mark, end_mark)
self.set_attribute(attribute, seq_node) | Converts a mapping attribute to a sequence.
This function takes an attribute of this Node whose value \
is a mapping or a mapping of mappings and turns it into a \
sequence of mappings. Each entry in the original mapping is \
converted to an entry in the list. If only a key attribute is \
given, then each entry in the original mapping must map to a \
(sub)mapping. This submapping becomes the corresponding list \
entry, with the key added to it as an additional attribute. If a \
value attribute is also given, then an entry in the original \
mapping may map to any object. If the mapped-to object is a \
mapping, the conversion is as before, otherwise a new \
submapping is created, and key and value are added using the \
given key and value attribute names.
An example probably helps. If you have a Node representing \
this piece of YAML::
items:
item1:
description: Basic widget
price: 100.0
item2:
description: Premium quality widget
price: 200.0
and call map_attribute_to_seq('items', 'item_id'), then the \
Node will be modified to represent this::
items:
- item_id: item1
description: Basic widget
price: 100.0
- item_id: item2
description: Premium quality widget
price: 200.0
which once converted to an object is often easier to deal with \
in code.
Slightly more complicated, this YAML::
items:
item1: Basic widget
item2:
description: Premium quality widget
price: 200.0
when passed through map_attribute_to_seq('items', 'item_id', \
'description'), will result in th equivalent of::
items:
- item_id: item1
description: Basic widget
- item_id: item2
description: Premium quality widget
price: 200.0
If the attribute does not exist, or is not a mapping, this \
function will silently do nothing.
With thanks to the makers of the Common Workflow Language for \
the idea.
Args:
attribute: Name of the attribute whose value to modify.
key_attribute: Name of the new attribute in each item to \
add with the value of the key.
value_attribute: Name of the new attribute in each item to \
add with the value of the key. | Below is the the instruction that describes the task:
### Input:
Converts a mapping attribute to a sequence.
This function takes an attribute of this Node whose value \
is a mapping or a mapping of mappings and turns it into a \
sequence of mappings. Each entry in the original mapping is \
converted to an entry in the list. If only a key attribute is \
given, then each entry in the original mapping must map to a \
(sub)mapping. This submapping becomes the corresponding list \
entry, with the key added to it as an additional attribute. If a \
value attribute is also given, then an entry in the original \
mapping may map to any object. If the mapped-to object is a \
mapping, the conversion is as before, otherwise a new \
submapping is created, and key and value are added using the \
given key and value attribute names.
An example probably helps. If you have a Node representing \
this piece of YAML::
items:
item1:
description: Basic widget
price: 100.0
item2:
description: Premium quality widget
price: 200.0
and call map_attribute_to_seq('items', 'item_id'), then the \
Node will be modified to represent this::
items:
- item_id: item1
description: Basic widget
price: 100.0
- item_id: item2
description: Premium quality widget
price: 200.0
which once converted to an object is often easier to deal with \
in code.
Slightly more complicated, this YAML::
items:
item1: Basic widget
item2:
description: Premium quality widget
price: 200.0
when passed through map_attribute_to_seq('items', 'item_id', \
'description'), will result in th equivalent of::
items:
- item_id: item1
description: Basic widget
- item_id: item2
description: Premium quality widget
price: 200.0
If the attribute does not exist, or is not a mapping, this \
function will silently do nothing.
With thanks to the makers of the Common Workflow Language for \
the idea.
Args:
attribute: Name of the attribute whose value to modify.
key_attribute: Name of the new attribute in each item to \
add with the value of the key.
value_attribute: Name of the new attribute in each item to \
add with the value of the key.
### Response:
def map_attribute_to_seq(self,
attribute: str,
key_attribute: str,
value_attribute: Optional[str] = None) -> None:
"""Converts a mapping attribute to a sequence.
This function takes an attribute of this Node whose value \
is a mapping or a mapping of mappings and turns it into a \
sequence of mappings. Each entry in the original mapping is \
converted to an entry in the list. If only a key attribute is \
given, then each entry in the original mapping must map to a \
(sub)mapping. This submapping becomes the corresponding list \
entry, with the key added to it as an additional attribute. If a \
value attribute is also given, then an entry in the original \
mapping may map to any object. If the mapped-to object is a \
mapping, the conversion is as before, otherwise a new \
submapping is created, and key and value are added using the \
given key and value attribute names.
An example probably helps. If you have a Node representing \
this piece of YAML::
items:
item1:
description: Basic widget
price: 100.0
item2:
description: Premium quality widget
price: 200.0
and call map_attribute_to_seq('items', 'item_id'), then the \
Node will be modified to represent this::
items:
- item_id: item1
description: Basic widget
price: 100.0
- item_id: item2
description: Premium quality widget
price: 200.0
which once converted to an object is often easier to deal with \
in code.
Slightly more complicated, this YAML::
items:
item1: Basic widget
item2:
description: Premium quality widget
price: 200.0
when passed through map_attribute_to_seq('items', 'item_id', \
'description'), will result in th equivalent of::
items:
- item_id: item1
description: Basic widget
- item_id: item2
description: Premium quality widget
price: 200.0
If the attribute does not exist, or is not a mapping, this \
function will silently do nothing.
With thanks to the makers of the Common Workflow Language for \
the idea.
Args:
attribute: Name of the attribute whose value to modify.
key_attribute: Name of the new attribute in each item to \
add with the value of the key.
value_attribute: Name of the new attribute in each item to \
add with the value of the key.
"""
if not self.has_attribute(attribute):
return
attr_node = self.get_attribute(attribute)
if not attr_node.is_mapping():
return
start_mark = attr_node.yaml_node.start_mark
end_mark = attr_node.yaml_node.end_mark
object_list = []
for item_key, item_value in attr_node.yaml_node.value:
item_value_node = Node(item_value)
if not item_value_node.is_mapping():
if value_attribute is None:
return
ynode = item_value_node.yaml_node
item_value_node.make_mapping()
item_value_node.set_attribute(value_attribute, ynode)
item_value_node.set_attribute(key_attribute, item_key.value)
object_list.append(item_value_node.yaml_node)
seq_node = yaml.SequenceNode('tag:yaml.org,2002:seq', object_list,
start_mark, end_mark)
self.set_attribute(attribute, seq_node) |
def getlang_by_alpha2(code):
"""
Lookup a Language object for language code `code` based on these strategies:
- Special case rules for Hebrew and Chinese Hans/Hant scripts
- Using `alpha_2` lookup in `pycountry.languages` followed by lookup for a
language with the same `name` in the internal representaion
Returns `None` if no matching language is found.
"""
# Handle special cases for language codes returned by YouTube API
if code == 'iw': # handle old Hebrew code 'iw' and return modern code 'he'
return getlang('he')
elif 'zh-Hans' in code:
return getlang('zh-CN') # use code `zh-CN` for all simplified Chinese
elif 'zh-Hant' in code or re.match('zh(.*)?-HK', code):
return getlang('zh-TW') # use code `zh-TW` for all traditional Chinese
# extract prefix only if specified with subcode: e.g. zh-Hans --> zh
first_part = code.split('-')[0]
# See if pycountry can find this language
try:
pyc_lang = pycountry.languages.get(alpha_2=first_part)
if pyc_lang:
if hasattr(pyc_lang, 'inverted_name'):
lang_name = pyc_lang.inverted_name
else:
lang_name = pyc_lang.name
return getlang_by_name(lang_name)
else:
return None
except KeyError:
return None | Lookup a Language object for language code `code` based on these strategies:
- Special case rules for Hebrew and Chinese Hans/Hant scripts
- Using `alpha_2` lookup in `pycountry.languages` followed by lookup for a
language with the same `name` in the internal representaion
Returns `None` if no matching language is found. | Below is the the instruction that describes the task:
### Input:
Lookup a Language object for language code `code` based on these strategies:
- Special case rules for Hebrew and Chinese Hans/Hant scripts
- Using `alpha_2` lookup in `pycountry.languages` followed by lookup for a
language with the same `name` in the internal representaion
Returns `None` if no matching language is found.
### Response:
def getlang_by_alpha2(code):
"""
Lookup a Language object for language code `code` based on these strategies:
- Special case rules for Hebrew and Chinese Hans/Hant scripts
- Using `alpha_2` lookup in `pycountry.languages` followed by lookup for a
language with the same `name` in the internal representaion
Returns `None` if no matching language is found.
"""
# Handle special cases for language codes returned by YouTube API
if code == 'iw': # handle old Hebrew code 'iw' and return modern code 'he'
return getlang('he')
elif 'zh-Hans' in code:
return getlang('zh-CN') # use code `zh-CN` for all simplified Chinese
elif 'zh-Hant' in code or re.match('zh(.*)?-HK', code):
return getlang('zh-TW') # use code `zh-TW` for all traditional Chinese
# extract prefix only if specified with subcode: e.g. zh-Hans --> zh
first_part = code.split('-')[0]
# See if pycountry can find this language
try:
pyc_lang = pycountry.languages.get(alpha_2=first_part)
if pyc_lang:
if hasattr(pyc_lang, 'inverted_name'):
lang_name = pyc_lang.inverted_name
else:
lang_name = pyc_lang.name
return getlang_by_name(lang_name)
else:
return None
except KeyError:
return None |
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId | Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen. | Below is the the instruction that describes the task:
### Input:
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
### Response:
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId |
def _consolidate_auth(ssh_password=None,
ssh_pkey=None,
ssh_pkey_password=None,
allow_agent=True,
host_pkey_directories=None,
logger=None):
"""
Get sure authentication information is in place.
``ssh_pkey`` may be of classes:
- ``str`` - in this case it represents a private key file; public
key will be obtained from it
- ``paramiko.Pkey`` - it will be transparently added to loaded keys
"""
ssh_loaded_pkeys = SSHTunnelForwarder.get_keys(
logger=logger,
host_pkey_directories=host_pkey_directories,
allow_agent=allow_agent
)
if isinstance(ssh_pkey, string_types):
ssh_pkey_expanded = os.path.expanduser(ssh_pkey)
if os.path.exists(ssh_pkey_expanded):
ssh_pkey = SSHTunnelForwarder.read_private_key_file(
pkey_file=ssh_pkey_expanded,
pkey_password=ssh_pkey_password or ssh_password,
logger=logger
)
elif logger:
logger.warning('Private key file not found: {0}'
.format(ssh_pkey))
if isinstance(ssh_pkey, paramiko.pkey.PKey):
ssh_loaded_pkeys.insert(0, ssh_pkey)
if not ssh_password and not ssh_loaded_pkeys:
raise ValueError('No password or public key available!')
return (ssh_password, ssh_loaded_pkeys) | Get sure authentication information is in place.
``ssh_pkey`` may be of classes:
- ``str`` - in this case it represents a private key file; public
key will be obtained from it
- ``paramiko.Pkey`` - it will be transparently added to loaded keys | Below is the the instruction that describes the task:
### Input:
Get sure authentication information is in place.
``ssh_pkey`` may be of classes:
- ``str`` - in this case it represents a private key file; public
key will be obtained from it
- ``paramiko.Pkey`` - it will be transparently added to loaded keys
### Response:
def _consolidate_auth(ssh_password=None,
ssh_pkey=None,
ssh_pkey_password=None,
allow_agent=True,
host_pkey_directories=None,
logger=None):
"""
Get sure authentication information is in place.
``ssh_pkey`` may be of classes:
- ``str`` - in this case it represents a private key file; public
key will be obtained from it
- ``paramiko.Pkey`` - it will be transparently added to loaded keys
"""
ssh_loaded_pkeys = SSHTunnelForwarder.get_keys(
logger=logger,
host_pkey_directories=host_pkey_directories,
allow_agent=allow_agent
)
if isinstance(ssh_pkey, string_types):
ssh_pkey_expanded = os.path.expanduser(ssh_pkey)
if os.path.exists(ssh_pkey_expanded):
ssh_pkey = SSHTunnelForwarder.read_private_key_file(
pkey_file=ssh_pkey_expanded,
pkey_password=ssh_pkey_password or ssh_password,
logger=logger
)
elif logger:
logger.warning('Private key file not found: {0}'
.format(ssh_pkey))
if isinstance(ssh_pkey, paramiko.pkey.PKey):
ssh_loaded_pkeys.insert(0, ssh_pkey)
if not ssh_password and not ssh_loaded_pkeys:
raise ValueError('No password or public key available!')
return (ssh_password, ssh_loaded_pkeys) |
def _clear(self, node, left, right):
"""propagates the lazy updates for this node to the subtrees.
as a result the maxval, minval, sumval values for the node
are up to date.
"""
if self.lazyset[node] is not None: # first do the pending set
val = self.lazyset[node]
self.minval[node] = val
self.maxval[node] = val
self.sumval[node] = val * (right - left)
self.lazyset[node] = None
if left < right - 1: # not a leaf
self.lazyset[2 * node] = val # propagate to direct descendents
self.lazyadd[2 * node] = 0
self.lazyset[2 * node + 1] = val
self.lazyadd[2 * node + 1] = 0
if self.lazyadd[node] != 0: # then do the pending add
val = self.lazyadd[node]
self.minval[node] += val
self.maxval[node] += val
self.sumval[node] += val * (right - left)
self.lazyadd[node] = 0
if left < right - 1: # not at a leaf
self.lazyadd[2 * node] += val # propagate to direct descendents
self.lazyadd[2 * node + 1] += val | propagates the lazy updates for this node to the subtrees.
as a result the maxval, minval, sumval values for the node
are up to date. | Below is the the instruction that describes the task:
### Input:
propagates the lazy updates for this node to the subtrees.
as a result the maxval, minval, sumval values for the node
are up to date.
### Response:
def _clear(self, node, left, right):
"""propagates the lazy updates for this node to the subtrees.
as a result the maxval, minval, sumval values for the node
are up to date.
"""
if self.lazyset[node] is not None: # first do the pending set
val = self.lazyset[node]
self.minval[node] = val
self.maxval[node] = val
self.sumval[node] = val * (right - left)
self.lazyset[node] = None
if left < right - 1: # not a leaf
self.lazyset[2 * node] = val # propagate to direct descendents
self.lazyadd[2 * node] = 0
self.lazyset[2 * node + 1] = val
self.lazyadd[2 * node + 1] = 0
if self.lazyadd[node] != 0: # then do the pending add
val = self.lazyadd[node]
self.minval[node] += val
self.maxval[node] += val
self.sumval[node] += val * (right - left)
self.lazyadd[node] = 0
if left < right - 1: # not at a leaf
self.lazyadd[2 * node] += val # propagate to direct descendents
self.lazyadd[2 * node + 1] += val |
def c14n_uri(sqluri):
"""
Ask the backend to c14n the uri. See register_uri_backend() for
details.
If no backend is found for this uri method, a NotImplementedError
will be raised.
"""
uri_c14n_method = _get_methods_by_uri(sqluri)[METHOD_C14N_URI]
if not uri_c14n_method:
return sqluri
return uri_c14n_method(sqluri) | Ask the backend to c14n the uri. See register_uri_backend() for
details.
If no backend is found for this uri method, a NotImplementedError
will be raised. | Below is the the instruction that describes the task:
### Input:
Ask the backend to c14n the uri. See register_uri_backend() for
details.
If no backend is found for this uri method, a NotImplementedError
will be raised.
### Response:
def c14n_uri(sqluri):
"""
Ask the backend to c14n the uri. See register_uri_backend() for
details.
If no backend is found for this uri method, a NotImplementedError
will be raised.
"""
uri_c14n_method = _get_methods_by_uri(sqluri)[METHOD_C14N_URI]
if not uri_c14n_method:
return sqluri
return uri_c14n_method(sqluri) |
def age_to_date(age):
"""
преобразует возраст в год рождения. (Для фильтрации по дате рождения)
"""
today = datetime.date.today()
date = datetime.date(today.year - age - 1, today.month, today.day) + datetime.timedelta(days=1)
return date | преобразует возраст в год рождения. (Для фильтрации по дате рождения) | Below is the the instruction that describes the task:
### Input:
преобразует возраст в год рождения. (Для фильтрации по дате рождения)
### Response:
def age_to_date(age):
"""
преобразует возраст в год рождения. (Для фильтрации по дате рождения)
"""
today = datetime.date.today()
date = datetime.date(today.year - age - 1, today.month, today.day) + datetime.timedelta(days=1)
return date |
def _store_entry(self, entry):
"""
Stores a new log entry and notifies listeners
:param entry: A LogEntry object
"""
# Get the logger and log the message
self.__logs.append(entry)
# Notify listeners
for listener in self.__listeners.copy():
try:
listener.logged(entry)
except Exception as ex:
# Create a new log entry, without using logging nor notifying
# listener (to avoid a recursion)
err_entry = LogEntry(
logging.WARNING,
"Error notifying logging listener {0}: {1}".format(
listener, ex
),
sys.exc_info(),
self._context.get_bundle(),
None,
)
# Insert the new entry before the real one
self.__logs.pop()
self.__logs.append(err_entry)
self.__logs.append(entry) | Stores a new log entry and notifies listeners
:param entry: A LogEntry object | Below is the the instruction that describes the task:
### Input:
Stores a new log entry and notifies listeners
:param entry: A LogEntry object
### Response:
def _store_entry(self, entry):
"""
Stores a new log entry and notifies listeners
:param entry: A LogEntry object
"""
# Get the logger and log the message
self.__logs.append(entry)
# Notify listeners
for listener in self.__listeners.copy():
try:
listener.logged(entry)
except Exception as ex:
# Create a new log entry, without using logging nor notifying
# listener (to avoid a recursion)
err_entry = LogEntry(
logging.WARNING,
"Error notifying logging listener {0}: {1}".format(
listener, ex
),
sys.exc_info(),
self._context.get_bundle(),
None,
)
# Insert the new entry before the real one
self.__logs.pop()
self.__logs.append(err_entry)
self.__logs.append(entry) |
def phantomjs_fetch(self, url, task):
'''Fetch with phantomjs proxy'''
start_time = time.time()
self.on_fetch('phantomjs', task)
handle_error = lambda x: self.handle_error('phantomjs', url, task, start_time, x)
# check phantomjs proxy is enabled
if not self.phantomjs_proxy:
result = {
"orig_url": url,
"content": "phantomjs is not enabled.",
"headers": {},
"status_code": 501,
"url": url,
"time": time.time() - start_time,
"cookies": {},
"save": task.get('fetch', {}).get('save')
}
logger.warning("[501] %s:%s %s 0s", task.get('project'), task.get('taskid'), url)
raise gen.Return(result)
# setup request parameters
fetch = self.pack_tornado_request_parameters(url, task)
task_fetch = task.get('fetch', {})
for each in task_fetch:
if each not in fetch:
fetch[each] = task_fetch[each]
# robots.txt
if task_fetch.get('robots_txt', False):
user_agent = fetch['headers']['User-Agent']
can_fetch = yield self.can_fetch(user_agent, url)
if not can_fetch:
error = tornado.httpclient.HTTPError(403, 'Disallowed by robots.txt')
raise gen.Return(handle_error(error))
request_conf = {
'follow_redirects': False
}
request_conf['connect_timeout'] = fetch.get('connect_timeout', 20)
request_conf['request_timeout'] = fetch.get('request_timeout', 120) + 1
session = cookies.RequestsCookieJar()
if 'Cookie' in fetch['headers']:
c = http_cookies.SimpleCookie()
try:
c.load(fetch['headers']['Cookie'])
except AttributeError:
c.load(utils.utf8(fetch['headers']['Cookie']))
for key in c:
session.set(key, c[key])
del fetch['headers']['Cookie']
if 'cookies' in fetch:
session.update(fetch['cookies'])
del fetch['cookies']
request = tornado.httpclient.HTTPRequest(url=fetch['url'])
cookie_header = cookies.get_cookie_header(session, request)
if cookie_header:
fetch['headers']['Cookie'] = cookie_header
# making requests
fetch['headers'] = dict(fetch['headers'])
try:
request = tornado.httpclient.HTTPRequest(
url=self.phantomjs_proxy, method="POST",
body=json.dumps(fetch), **request_conf)
except Exception as e:
raise gen.Return(handle_error(e))
try:
response = yield gen.maybe_future(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
if e.response:
response = e.response
else:
raise gen.Return(handle_error(e))
if not response.body:
raise gen.Return(handle_error(Exception('no response from phantomjs: %r' % response)))
result = {}
try:
result = json.loads(utils.text(response.body))
assert 'status_code' in result, result
except Exception as e:
if response.error:
result['error'] = utils.text(response.error)
raise gen.Return(handle_error(e))
if result.get('status_code', 200):
logger.info("[%d] %s:%s %s %.2fs", result['status_code'],
task.get('project'), task.get('taskid'), url, result['time'])
else:
logger.error("[%d] %s:%s %s, %r %.2fs", result['status_code'],
task.get('project'), task.get('taskid'),
url, result['content'], result['time'])
raise gen.Return(result) | Fetch with phantomjs proxy | Below is the the instruction that describes the task:
### Input:
Fetch with phantomjs proxy
### Response:
def phantomjs_fetch(self, url, task):
'''Fetch with phantomjs proxy'''
start_time = time.time()
self.on_fetch('phantomjs', task)
handle_error = lambda x: self.handle_error('phantomjs', url, task, start_time, x)
# check phantomjs proxy is enabled
if not self.phantomjs_proxy:
result = {
"orig_url": url,
"content": "phantomjs is not enabled.",
"headers": {},
"status_code": 501,
"url": url,
"time": time.time() - start_time,
"cookies": {},
"save": task.get('fetch', {}).get('save')
}
logger.warning("[501] %s:%s %s 0s", task.get('project'), task.get('taskid'), url)
raise gen.Return(result)
# setup request parameters
fetch = self.pack_tornado_request_parameters(url, task)
task_fetch = task.get('fetch', {})
for each in task_fetch:
if each not in fetch:
fetch[each] = task_fetch[each]
# robots.txt
if task_fetch.get('robots_txt', False):
user_agent = fetch['headers']['User-Agent']
can_fetch = yield self.can_fetch(user_agent, url)
if not can_fetch:
error = tornado.httpclient.HTTPError(403, 'Disallowed by robots.txt')
raise gen.Return(handle_error(error))
request_conf = {
'follow_redirects': False
}
request_conf['connect_timeout'] = fetch.get('connect_timeout', 20)
request_conf['request_timeout'] = fetch.get('request_timeout', 120) + 1
session = cookies.RequestsCookieJar()
if 'Cookie' in fetch['headers']:
c = http_cookies.SimpleCookie()
try:
c.load(fetch['headers']['Cookie'])
except AttributeError:
c.load(utils.utf8(fetch['headers']['Cookie']))
for key in c:
session.set(key, c[key])
del fetch['headers']['Cookie']
if 'cookies' in fetch:
session.update(fetch['cookies'])
del fetch['cookies']
request = tornado.httpclient.HTTPRequest(url=fetch['url'])
cookie_header = cookies.get_cookie_header(session, request)
if cookie_header:
fetch['headers']['Cookie'] = cookie_header
# making requests
fetch['headers'] = dict(fetch['headers'])
try:
request = tornado.httpclient.HTTPRequest(
url=self.phantomjs_proxy, method="POST",
body=json.dumps(fetch), **request_conf)
except Exception as e:
raise gen.Return(handle_error(e))
try:
response = yield gen.maybe_future(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
if e.response:
response = e.response
else:
raise gen.Return(handle_error(e))
if not response.body:
raise gen.Return(handle_error(Exception('no response from phantomjs: %r' % response)))
result = {}
try:
result = json.loads(utils.text(response.body))
assert 'status_code' in result, result
except Exception as e:
if response.error:
result['error'] = utils.text(response.error)
raise gen.Return(handle_error(e))
if result.get('status_code', 200):
logger.info("[%d] %s:%s %s %.2fs", result['status_code'],
task.get('project'), task.get('taskid'), url, result['time'])
else:
logger.error("[%d] %s:%s %s, %r %.2fs", result['status_code'],
task.get('project'), task.get('taskid'),
url, result['content'], result['time'])
raise gen.Return(result) |
def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge Rock Ridge record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('RR record not yet initialized!')
return b'RR' + struct.pack('=BBB', RRRRRecord.length(), SU_ENTRY_VERSION, self.rr_flags) | Generate a string representing the Rock Ridge Rock Ridge record.
Parameters:
None.
Returns:
String containing the Rock Ridge record. | Below is the the instruction that describes the task:
### Input:
Generate a string representing the Rock Ridge Rock Ridge record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
### Response:
def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge Rock Ridge record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('RR record not yet initialized!')
return b'RR' + struct.pack('=BBB', RRRRRecord.length(), SU_ENTRY_VERSION, self.rr_flags) |
def list(ctx, show_hidden, oath_type, period):
"""
List all credentials.
List all credentials stored on your YubiKey.
"""
ensure_validated(ctx)
controller = ctx.obj['controller']
creds = [cred
for cred in controller.list()
if show_hidden or not cred.is_hidden
]
creds.sort()
for cred in creds:
click.echo(cred.printable_key, nl=False)
if oath_type:
click.echo(u', {}'.format(cred.oath_type.name), nl=False)
if period:
click.echo(', {}'.format(cred.period), nl=False)
click.echo() | List all credentials.
List all credentials stored on your YubiKey. | Below is the the instruction that describes the task:
### Input:
List all credentials.
List all credentials stored on your YubiKey.
### Response:
def list(ctx, show_hidden, oath_type, period):
"""
List all credentials.
List all credentials stored on your YubiKey.
"""
ensure_validated(ctx)
controller = ctx.obj['controller']
creds = [cred
for cred in controller.list()
if show_hidden or not cred.is_hidden
]
creds.sort()
for cred in creds:
click.echo(cred.printable_key, nl=False)
if oath_type:
click.echo(u', {}'.format(cred.oath_type.name), nl=False)
if period:
click.echo(', {}'.format(cred.period), nl=False)
click.echo() |
def _encode(self, obj, context):
"""Encodes a class to a lower-level object using the class' own
to_construct function.
If no such function is defined, returns the object unchanged.
"""
func = getattr(obj, 'to_construct', None)
if callable(func):
return func(context)
else:
return obj | Encodes a class to a lower-level object using the class' own
to_construct function.
If no such function is defined, returns the object unchanged. | Below is the the instruction that describes the task:
### Input:
Encodes a class to a lower-level object using the class' own
to_construct function.
If no such function is defined, returns the object unchanged.
### Response:
def _encode(self, obj, context):
"""Encodes a class to a lower-level object using the class' own
to_construct function.
If no such function is defined, returns the object unchanged.
"""
func = getattr(obj, 'to_construct', None)
if callable(func):
return func(context)
else:
return obj |
def daterange(start, end, delta=timedelta(days=1), lower=Interval.CLOSED, upper=Interval.OPEN):
"""Returns a generator which creates the next value in the range on demand"""
date_interval = Interval(lower=lower, lower_value=start, upper_value=end, upper=upper)
current = start if start in date_interval else start + delta
while current in date_interval:
yield current
current = current + delta | Returns a generator which creates the next value in the range on demand | Below is the the instruction that describes the task:
### Input:
Returns a generator which creates the next value in the range on demand
### Response:
def daterange(start, end, delta=timedelta(days=1), lower=Interval.CLOSED, upper=Interval.OPEN):
"""Returns a generator which creates the next value in the range on demand"""
date_interval = Interval(lower=lower, lower_value=start, upper_value=end, upper=upper)
current = start if start in date_interval else start + delta
while current in date_interval:
yield current
current = current + delta |
def open(self, filename, mode='r'):
"""Returns file object from given filename."""
self._raise_if_none()
fn = path_join(self.path, filename)
return open(fn, mode) | Returns file object from given filename. | Below is the the instruction that describes the task:
### Input:
Returns file object from given filename.
### Response:
def open(self, filename, mode='r'):
"""Returns file object from given filename."""
self._raise_if_none()
fn = path_join(self.path, filename)
return open(fn, mode) |
def commit(self, template_id, ext_json, version, description):
"""
为授权的小程序账号上传小程序代码
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4
:param template_id: 代码库中的代码模板 ID
:param ext_json: 第三方自定义的配置
:param version: 代码版本号,开发者可自定义
:param description: 代码描述,开发者可自定义
"""
return self._post(
'wxa/commit',
data={
'template_id': template_id,
'ext_json': ext_json,
'user_version': version,
'user_desc': description,
},
) | 为授权的小程序账号上传小程序代码
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4
:param template_id: 代码库中的代码模板 ID
:param ext_json: 第三方自定义的配置
:param version: 代码版本号,开发者可自定义
:param description: 代码描述,开发者可自定义 | Below is the the instruction that describes the task:
### Input:
为授权的小程序账号上传小程序代码
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4
:param template_id: 代码库中的代码模板 ID
:param ext_json: 第三方自定义的配置
:param version: 代码版本号,开发者可自定义
:param description: 代码描述,开发者可自定义
### Response:
def commit(self, template_id, ext_json, version, description):
"""
为授权的小程序账号上传小程序代码
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4
:param template_id: 代码库中的代码模板 ID
:param ext_json: 第三方自定义的配置
:param version: 代码版本号,开发者可自定义
:param description: 代码描述,开发者可自定义
"""
return self._post(
'wxa/commit',
data={
'template_id': template_id,
'ext_json': ext_json,
'user_version': version,
'user_desc': description,
},
) |
def file_create(filename, settings):
"""
Creates a file.
Args:
filename (str): Filename.
settings (dict): Must be {"content": actual_content}
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key "
"'content'.")
for k, v in settings.items():
if k == "content":
with open(filename, 'w') as f:
f.write(v) | Creates a file.
Args:
filename (str): Filename.
settings (dict): Must be {"content": actual_content} | Below is the the instruction that describes the task:
### Input:
Creates a file.
Args:
filename (str): Filename.
settings (dict): Must be {"content": actual_content}
### Response:
def file_create(filename, settings):
"""
Creates a file.
Args:
filename (str): Filename.
settings (dict): Must be {"content": actual_content}
"""
if len(settings) != 1:
raise ValueError("Settings must only contain one item with key "
"'content'.")
for k, v in settings.items():
if k == "content":
with open(filename, 'w') as f:
f.write(v) |
def create_virtualenv(venv=VENV):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
print 'done.'
print 'Installing pip in virtualenv...',
if not run_command([WITH_VENV, 'easy_install', 'pip']).strip():
die("Failed to install pip.")
print 'done.'
print 'Installing distribute in virtualenv...'
pip_install('distribute>=0.6.24')
print 'done.' | Creates the virtual environment and installs PIP only into the
virtual environment | Below is the the instruction that describes the task:
### Input:
Creates the virtual environment and installs PIP only into the
virtual environment
### Response:
def create_virtualenv(venv=VENV):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
print 'done.'
print 'Installing pip in virtualenv...',
if not run_command([WITH_VENV, 'easy_install', 'pip']).strip():
die("Failed to install pip.")
print 'done.'
print 'Installing distribute in virtualenv...'
pip_install('distribute>=0.6.24')
print 'done.' |
def diff(name_a, name_b=None, **kwargs):
'''
Display the difference between a snapshot of a given filesystem and
another snapshot of that filesystem from a later time or the current
contents of the filesystem.
name_a : string
name of snapshot
name_b : string
(optional) name of snapshot or filesystem
show_changetime : boolean
display the path's inode change time as the first column of output. (default = True)
show_indication : boolean
display an indication of the type of file. (default = True)
parsable : boolean
if true we don't parse the timestamp to a more readable date (default = True)
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset
'''
## Configure command
# NOTE: initialize the defaults
flags = ['-H']
target = []
# NOTE: set extra config from kwargs
if kwargs.get('show_changetime', True):
flags.append('-t')
if kwargs.get('show_indication', True):
flags.append('-F')
# NOTE: update target
target.append(name_a)
if name_b:
target.append(name_b)
## Diff filesystem/snapshot
res = __salt__['cmd.run_all'](
__utils__['zfs.zfs_command'](
command='diff',
flags=flags,
target=target,
),
python_shell=False,
)
if res['retcode'] != 0:
return __utils__['zfs.parse_command_result'](res)
else:
if not kwargs.get('parsable', True) and kwargs.get('show_changetime', True):
ret = OrderedDict()
for entry in res['stdout'].splitlines():
entry = entry.split()
entry_timestamp = __utils__['dateutils.strftime'](entry[0], '%Y-%m-%d.%H:%M:%S.%f')
entry_data = "\t\t".join(entry[1:])
ret[entry_timestamp] = entry_data
else:
ret = res['stdout'].splitlines()
return ret | Display the difference between a snapshot of a given filesystem and
another snapshot of that filesystem from a later time or the current
contents of the filesystem.
name_a : string
name of snapshot
name_b : string
(optional) name of snapshot or filesystem
show_changetime : boolean
display the path's inode change time as the first column of output. (default = True)
show_indication : boolean
display an indication of the type of file. (default = True)
parsable : boolean
if true we don't parse the timestamp to a more readable date (default = True)
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset | Below is the the instruction that describes the task:
### Input:
Display the difference between a snapshot of a given filesystem and
another snapshot of that filesystem from a later time or the current
contents of the filesystem.
name_a : string
name of snapshot
name_b : string
(optional) name of snapshot or filesystem
show_changetime : boolean
display the path's inode change time as the first column of output. (default = True)
show_indication : boolean
display an indication of the type of file. (default = True)
parsable : boolean
if true we don't parse the timestamp to a more readable date (default = True)
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset
### Response:
def diff(name_a, name_b=None, **kwargs):
'''
Display the difference between a snapshot of a given filesystem and
another snapshot of that filesystem from a later time or the current
contents of the filesystem.
name_a : string
name of snapshot
name_b : string
(optional) name of snapshot or filesystem
show_changetime : boolean
display the path's inode change time as the first column of output. (default = True)
show_indication : boolean
display an indication of the type of file. (default = True)
parsable : boolean
if true we don't parse the timestamp to a more readable date (default = True)
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset
'''
## Configure command
# NOTE: initialize the defaults
flags = ['-H']
target = []
# NOTE: set extra config from kwargs
if kwargs.get('show_changetime', True):
flags.append('-t')
if kwargs.get('show_indication', True):
flags.append('-F')
# NOTE: update target
target.append(name_a)
if name_b:
target.append(name_b)
## Diff filesystem/snapshot
res = __salt__['cmd.run_all'](
__utils__['zfs.zfs_command'](
command='diff',
flags=flags,
target=target,
),
python_shell=False,
)
if res['retcode'] != 0:
return __utils__['zfs.parse_command_result'](res)
else:
if not kwargs.get('parsable', True) and kwargs.get('show_changetime', True):
ret = OrderedDict()
for entry in res['stdout'].splitlines():
entry = entry.split()
entry_timestamp = __utils__['dateutils.strftime'](entry[0], '%Y-%m-%d.%H:%M:%S.%f')
entry_data = "\t\t".join(entry[1:])
ret[entry_timestamp] = entry_data
else:
ret = res['stdout'].splitlines()
return ret |
def _graphql_query_waittime(self, query_hash: str, current_time: float, untracked_queries: bool = False) -> int:
"""Calculate time needed to wait before GraphQL query can be executed."""
sliding_window = 660
if query_hash not in self._graphql_query_timestamps:
self._graphql_query_timestamps[query_hash] = []
self._graphql_query_timestamps[query_hash] = list(filter(lambda t: t > current_time - 60 * 60,
self._graphql_query_timestamps[query_hash]))
reqs_in_sliding_window = list(filter(lambda t: t > current_time - sliding_window,
self._graphql_query_timestamps[query_hash]))
count_per_sliding_window = self._graphql_request_count_per_sliding_window(query_hash)
if len(reqs_in_sliding_window) < count_per_sliding_window and not untracked_queries:
return max(0, self._graphql_earliest_next_request_time - current_time)
next_request_time = min(reqs_in_sliding_window) + sliding_window + 6
if untracked_queries:
self._graphql_earliest_next_request_time = next_request_time
return round(max(next_request_time, self._graphql_earliest_next_request_time) - current_time) | Calculate time needed to wait before GraphQL query can be executed. | Below is the the instruction that describes the task:
### Input:
Calculate time needed to wait before GraphQL query can be executed.
### Response:
def _graphql_query_waittime(self, query_hash: str, current_time: float, untracked_queries: bool = False) -> int:
"""Calculate time needed to wait before GraphQL query can be executed."""
sliding_window = 660
if query_hash not in self._graphql_query_timestamps:
self._graphql_query_timestamps[query_hash] = []
self._graphql_query_timestamps[query_hash] = list(filter(lambda t: t > current_time - 60 * 60,
self._graphql_query_timestamps[query_hash]))
reqs_in_sliding_window = list(filter(lambda t: t > current_time - sliding_window,
self._graphql_query_timestamps[query_hash]))
count_per_sliding_window = self._graphql_request_count_per_sliding_window(query_hash)
if len(reqs_in_sliding_window) < count_per_sliding_window and not untracked_queries:
return max(0, self._graphql_earliest_next_request_time - current_time)
next_request_time = min(reqs_in_sliding_window) + sliding_window + 6
if untracked_queries:
self._graphql_earliest_next_request_time = next_request_time
return round(max(next_request_time, self._graphql_earliest_next_request_time) - current_time) |
def fmsin(N, fnormin=0.05, fnormax=0.45, period=None, t0=None, fnorm0=0.25, pm1=1):
"""
Signal with sinusoidal frequency modulation.
generates a frequency modulation with a sinusoidal frequency.
This sinusoidal modulation is designed such that the instantaneous
frequency at time T0 is equal to FNORM0, and the ambiguity between
increasing or decreasing frequency is solved by PM1.
N : number of points.
FNORMIN : smallest normalized frequency (default: 0.05)
FNORMAX : highest normalized frequency (default: 0.45)
PERIOD : period of the sinusoidal fm (default: N )
T0 : time reference for the phase (default: N/2 )
FNORM0 : normalized frequency at time T0 (default: 0.25)
PM1 : frequency direction at T0 (-1 or +1) (default: +1 )
Returns:
Y : signal
IFLAW : its instantaneous frequency law
Example:
z,i=fmsin(140,0.05,0.45,100,20,0.3,-1.0)
Original MATLAB code F. Auger, July 1995.
(note: Licensed under GPL; see main LICENSE file)
"""
if period==None:
period = N
if t0==None:
t0 = N/2
pm1 = nx.sign(pm1)
fnormid=0.5*(fnormax+fnormin);
delta =0.5*(fnormax-fnormin);
phi =-pm1*nx.arccos((fnorm0-fnormid)/delta);
time =nx.arange(1,N)-t0;
phase =2*nx.pi*fnormid*time+delta*period*(nx.sin(2*nx.pi*time/period+phi)-nx.sin(phi));
y =nx.exp(1j*phase)
iflaw =fnormid+delta*nx.cos(2*nx.pi*time/period+phi);
return y,iflaw | Signal with sinusoidal frequency modulation.
generates a frequency modulation with a sinusoidal frequency.
This sinusoidal modulation is designed such that the instantaneous
frequency at time T0 is equal to FNORM0, and the ambiguity between
increasing or decreasing frequency is solved by PM1.
N : number of points.
FNORMIN : smallest normalized frequency (default: 0.05)
FNORMAX : highest normalized frequency (default: 0.45)
PERIOD : period of the sinusoidal fm (default: N )
T0 : time reference for the phase (default: N/2 )
FNORM0 : normalized frequency at time T0 (default: 0.25)
PM1 : frequency direction at T0 (-1 or +1) (default: +1 )
Returns:
Y : signal
IFLAW : its instantaneous frequency law
Example:
z,i=fmsin(140,0.05,0.45,100,20,0.3,-1.0)
Original MATLAB code F. Auger, July 1995.
(note: Licensed under GPL; see main LICENSE file) | Below is the the instruction that describes the task:
### Input:
Signal with sinusoidal frequency modulation.
generates a frequency modulation with a sinusoidal frequency.
This sinusoidal modulation is designed such that the instantaneous
frequency at time T0 is equal to FNORM0, and the ambiguity between
increasing or decreasing frequency is solved by PM1.
N : number of points.
FNORMIN : smallest normalized frequency (default: 0.05)
FNORMAX : highest normalized frequency (default: 0.45)
PERIOD : period of the sinusoidal fm (default: N )
T0 : time reference for the phase (default: N/2 )
FNORM0 : normalized frequency at time T0 (default: 0.25)
PM1 : frequency direction at T0 (-1 or +1) (default: +1 )
Returns:
Y : signal
IFLAW : its instantaneous frequency law
Example:
z,i=fmsin(140,0.05,0.45,100,20,0.3,-1.0)
Original MATLAB code F. Auger, July 1995.
(note: Licensed under GPL; see main LICENSE file)
### Response:
def fmsin(N, fnormin=0.05, fnormax=0.45, period=None, t0=None, fnorm0=0.25, pm1=1):
"""
Signal with sinusoidal frequency modulation.
generates a frequency modulation with a sinusoidal frequency.
This sinusoidal modulation is designed such that the instantaneous
frequency at time T0 is equal to FNORM0, and the ambiguity between
increasing or decreasing frequency is solved by PM1.
N : number of points.
FNORMIN : smallest normalized frequency (default: 0.05)
FNORMAX : highest normalized frequency (default: 0.45)
PERIOD : period of the sinusoidal fm (default: N )
T0 : time reference for the phase (default: N/2 )
FNORM0 : normalized frequency at time T0 (default: 0.25)
PM1 : frequency direction at T0 (-1 or +1) (default: +1 )
Returns:
Y : signal
IFLAW : its instantaneous frequency law
Example:
z,i=fmsin(140,0.05,0.45,100,20,0.3,-1.0)
Original MATLAB code F. Auger, July 1995.
(note: Licensed under GPL; see main LICENSE file)
"""
if period==None:
period = N
if t0==None:
t0 = N/2
pm1 = nx.sign(pm1)
fnormid=0.5*(fnormax+fnormin);
delta =0.5*(fnormax-fnormin);
phi =-pm1*nx.arccos((fnorm0-fnormid)/delta);
time =nx.arange(1,N)-t0;
phase =2*nx.pi*fnormid*time+delta*period*(nx.sin(2*nx.pi*time/period+phi)-nx.sin(phi));
y =nx.exp(1j*phase)
iflaw =fnormid+delta*nx.cos(2*nx.pi*time/period+phi);
return y,iflaw |
def visit_FunctionBody(self, node):
"""Visitor for `FunctionBody` AST node."""
for child in node.children:
return_value = self.visit(child)
if isinstance(child, ReturnStatement):
return return_value
if isinstance(child, (IfStatement, WhileStatement)):
if return_value is not None:
return return_value
return NoneType() | Visitor for `FunctionBody` AST node. | Below is the the instruction that describes the task:
### Input:
Visitor for `FunctionBody` AST node.
### Response:
def visit_FunctionBody(self, node):
"""Visitor for `FunctionBody` AST node."""
for child in node.children:
return_value = self.visit(child)
if isinstance(child, ReturnStatement):
return return_value
if isinstance(child, (IfStatement, WhileStatement)):
if return_value is not None:
return return_value
return NoneType() |
def putmask(self, value, blc=(), trc=(), inc=()):
"""Put image mask.
Using the arguments blc (bottom left corner), trc (top right corner),
and inc (stride) it is possible to put a data slice. Not all axes
need to be specified. Missing values default to begin, end, and 1.
The data should be a numpy array. Its dimensionality must be the same
as the dimensionality of the image.
Note that the casacore images use the convention that a mask value
True means good and False means bad. However, numpy uses the opposite.
Therefore the mask will be negated, so a numoy masked can be given
directly.
The mask is not written if the image has no mask and if it the entire
mask is False. In that case the mask most likely comes from a getmask
operation on an image without a mask.
"""
# casa and numpy have opposite flags
return self._putmask(~value, self._adjustBlc(blc),
self._adjustInc(inc)) | Put image mask.
Using the arguments blc (bottom left corner), trc (top right corner),
and inc (stride) it is possible to put a data slice. Not all axes
need to be specified. Missing values default to begin, end, and 1.
The data should be a numpy array. Its dimensionality must be the same
as the dimensionality of the image.
Note that the casacore images use the convention that a mask value
True means good and False means bad. However, numpy uses the opposite.
Therefore the mask will be negated, so a numoy masked can be given
directly.
The mask is not written if the image has no mask and if it the entire
mask is False. In that case the mask most likely comes from a getmask
operation on an image without a mask. | Below is the the instruction that describes the task:
### Input:
Put image mask.
Using the arguments blc (bottom left corner), trc (top right corner),
and inc (stride) it is possible to put a data slice. Not all axes
need to be specified. Missing values default to begin, end, and 1.
The data should be a numpy array. Its dimensionality must be the same
as the dimensionality of the image.
Note that the casacore images use the convention that a mask value
True means good and False means bad. However, numpy uses the opposite.
Therefore the mask will be negated, so a numoy masked can be given
directly.
The mask is not written if the image has no mask and if it the entire
mask is False. In that case the mask most likely comes from a getmask
operation on an image without a mask.
### Response:
def putmask(self, value, blc=(), trc=(), inc=()):
"""Put image mask.
Using the arguments blc (bottom left corner), trc (top right corner),
and inc (stride) it is possible to put a data slice. Not all axes
need to be specified. Missing values default to begin, end, and 1.
The data should be a numpy array. Its dimensionality must be the same
as the dimensionality of the image.
Note that the casacore images use the convention that a mask value
True means good and False means bad. However, numpy uses the opposite.
Therefore the mask will be negated, so a numoy masked can be given
directly.
The mask is not written if the image has no mask and if it the entire
mask is False. In that case the mask most likely comes from a getmask
operation on an image without a mask.
"""
# casa and numpy have opposite flags
return self._putmask(~value, self._adjustBlc(blc),
self._adjustInc(inc)) |
Subsets and Splits