code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def getprefixes(self):
"""Add prefixes for each namespace referenced by parameter types."""
namespaces = []
for l in (self.params, self.types):
for t,r in l:
ns = r.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
if Namespace.xs(ns) or Namespace.xsd(ns):
continue
namespaces.append(ns[1])
if t == r: continue
ns = t.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
namespaces.append(ns[1])
i = 0
namespaces.sort()
for u in namespaces:
p = self.nextprefix()
ns = (p, u)
self.prefixes.append(ns) | Add prefixes for each namespace referenced by parameter types. |
def select(table, key='default'):
"""Select dialect
:param key: a key for your dabtabase you wanna use
"""
database = choice(__db[key + '.slave'])
return database.select(table) | Select dialect
:param key: a key for your dabtabase you wanna use |
def dispatch(self, *args, **kwargs):
"""This decorator sets this view to have restricted permissions."""
return super(AnimalDelete, self).dispatch(*args, **kwargs) | This decorator sets this view to have restricted permissions. |
def evolve(self, new_date):
"""
evolve to the new process state at the next date, i.e. do one step in the simulation
:param date new_date: date of the new state
:return State:
"""
self.state = [p.evolve(new_date) for p in self.producers]
return self.state | evolve to the new process state at the next date, i.e. do one step in the simulation
:param date new_date: date of the new state
:return State: |
def run(self):
"""Lunch checks and triggers updates on BIRD configuration."""
# Lunch a thread for each configuration
if not self.services:
self.log.warning("no service checks are configured")
else:
self.log.info("going to lunch %s threads", len(self.services))
if self.config.has_option('daemon', 'splay_startup'):
splay_startup = self.config.getfloat('daemon', 'splay_startup')
else:
splay_startup = None
for service in self.services:
self.log.debug("lunching thread for %s", service)
_config = {}
for option, getter in SERVICE_OPTIONS_TYPE.items():
try:
_config[option] = getattr(self.config, getter)(service,
option)
except NoOptionError:
pass # for optional settings
_thread = ServiceCheck(service, _config, self.action,
splay_startup)
_thread.start()
# Stay running until we are stopped
while True:
# Fetch items from action queue
operation = self.action.get(block=True)
if isinstance(operation, ServiceCheckDiedError):
self.log.critical(operation)
self.log.critical("This is a fatal error and the only way to "
"recover is to restart, thus exiting with a "
"non-zero code and let systemd act by "
"triggering a restart")
sys.exit(1)
self.log.info("returned an item from the queue for %s with IP "
"prefix %s and action to %s Bird configuration",
operation.name,
operation.ip_prefix,
operation)
bird_updated = self._update_bird_conf_file(operation)
self.action.task_done()
if bird_updated:
ip_version = operation.ip_version
if operation.bird_reconfigure_cmd is None:
reconfigure_bird(
self.bird_configuration[ip_version]['reconfigure_cmd'])
else:
run_custom_bird_reconfigure(operation) | Lunch checks and triggers updates on BIRD configuration. |
def open_using_pefile(input_name, input_bytes):
''' Open the PE File using the Python pefile module. '''
try:
pef = pefile.PE(data=input_bytes, fast_load=False)
except (AttributeError, pefile.PEFormatError), error:
print 'warning: pe_fail (with exception from pefile module) on file: %s' % input_name
error_str = '(Exception):, %s' % (str(error))
return None, error_str
# Now test to see if the features are there/extractable if not return FAIL flag
if pef.PE_TYPE is None or pef.OPTIONAL_HEADER is None or len(pef.OPTIONAL_HEADER.DATA_DIRECTORY) < 7:
print 'warning: pe_fail on file: %s' % input_name
error_str = 'warning: pe_fail on file: %s' % input_name
return None, error_str
# Success
return pef, None | Open the PE File using the Python pefile module. |
def get(self):
"""Retrieve a formated text string"""
output = ''
for f in self.functions:
output += self.underline(f) + '\n\n'
if f in self.synopsises and isinstance(self.synopsises[f], str):
output += self.format(self.synopsises[f]) + '\n'
if f in self.descriptions and isinstance(
self.descriptions[f], str
):
output += self.format(self.descriptions[f], indent=8) + '\n'
if f in self.examples and isinstance(self.examples[f], str):
output += self.format(self.examples[f], indent=8) + '\n'
output += '\n'
return output | Retrieve a formated text string |
def _space_delimited_list(value):
'''
validate that a value contains one or more space-delimited values
'''
if isinstance(value, six.string_types):
items = value.split(' ')
valid = items and all(items)
else:
valid = hasattr(value, '__iter__') and (value != [])
if valid:
return True, 'space-delimited string'
return False, '{0} is not a valid list.\n'.format(value) | validate that a value contains one or more space-delimited values |
def numpy_weighted_median(data, weights=None):
"""Calculate the weighted median of an array/list using numpy."""
import numpy as np
if weights is None:
return np.median(np.array(data).flatten())
data, weights = np.array(data).flatten(), np.array(weights).flatten()
if any(weights > 0):
sorted_data, sorted_weights = map(np.array, zip(*sorted(zip(data, weights))))
midpoint = 0.5 * sum(sorted_weights)
if any(weights > midpoint):
return (data[weights == np.max(weights)])[0]
cumulative_weight = np.cumsum(sorted_weights)
below_midpoint_index = np.where(cumulative_weight <= midpoint)[0][-1]
if cumulative_weight[below_midpoint_index] - midpoint < sys.float_info.epsilon:
return np.mean(sorted_data[below_midpoint_index:below_midpoint_index+2])
return sorted_data[below_midpoint_index+1] | Calculate the weighted median of an array/list using numpy. |
def impersonate(self, user, enterprise):
""" Impersonate a user in a enterprise
Args:
user: the name of the user to impersonate
enterprise: the name of the enterprise where to use impersonation
"""
if not user or not enterprise:
raise ValueError('You must set a user name and an enterprise name to begin impersonification')
self._is_impersonating = True
self._impersonation = "%s@%s" % (user, enterprise) | Impersonate a user in a enterprise
Args:
user: the name of the user to impersonate
enterprise: the name of the enterprise where to use impersonation |
def copy_foreign_keys(self, event):
"""Copies possible foreign key values from the object into the Event,
skipping common keys like modified and created.
Args:
event (Event): The Event instance to copy the FKs into
obj (fleaker.db.Model): The object to pull the values from
"""
event_keys = set(event._meta.fields.keys())
obj_keys = self._meta.fields.keys()
matching_keys = event_keys.intersection(obj_keys)
for key in matching_keys:
# Skip created_by because that will always be the current_user
# for the Event.
if key == 'created_by':
continue
# Skip anything that isn't a FK
if not isinstance(self._meta.fields[key], peewee.ForeignKeyField):
continue
setattr(event, key, getattr(self, key))
# Attempt to set the obj's ID in the correct FK field on Event, if it
# exists. If this conflicts with desired behavior, handle this in the
# respective callback. This does rely on the FK matching the lower case
# version of the class name and that the event isn't trying to delete
# the current record, becuase that ends badly.
possible_key = self.__class__.__name__.lower()
if possible_key in event_keys and event.code != 'AUDIT_DELETE':
setattr(event, possible_key, self) | Copies possible foreign key values from the object into the Event,
skipping common keys like modified and created.
Args:
event (Event): The Event instance to copy the FKs into
obj (fleaker.db.Model): The object to pull the values from |
def parse_source_file(file_name):
"""
Parses the AST of Python file for lines containing
references to the argparse module.
returns the collection of ast objects found.
Example client code:
1. parser = ArgumentParser(desc="My help Message")
2. parser.add_argument('filename', help="Name of the file to load")
3. parser.add_argument('-f', '--format', help='Format of output \nOptions: ['md', 'html']
4. args = parser.parse_args()
Variables:
* nodes Primary syntax tree object
* argparse_assignments The assignment of the ArgumentParser (line 1 in example code)
* add_arg_assignments Calls to add_argument() (lines 2-3 in example code)
* parser_var_name The instance variable of the ArgumentParser (line 1 in example code)
* ast_source The curated collection of all parser related nodes in the client code
"""
with open(file_name, 'r') as f:
s = f.read()
nodes = ast.parse(s)
module_imports = get_nodes_by_instance_type(nodes, _ast.Import)
specific_imports = get_nodes_by_instance_type(nodes, _ast.ImportFrom)
assignment_objs = get_nodes_by_instance_type(nodes, _ast.Assign)
call_objects = get_nodes_by_instance_type(nodes, _ast.Call)
argparse_assignments = get_nodes_by_containing_attr(assignment_objs, 'ArgumentParser')
group_arg_assignments = get_nodes_by_containing_attr(assignment_objs, 'add_argument_group')
add_arg_assignments = get_nodes_by_containing_attr(call_objects, 'add_argument')
parse_args_assignment = get_nodes_by_containing_attr(call_objects, 'parse_args')
# there are cases where we have custom argparsers, such as subclassing ArgumentParser. The above
# will fail on this. However, we can use the methods known to ArgumentParser to do a duck-type like
# approach to finding what is the arg parser
if not argparse_assignments:
aa_references = set([i.func.value.id for i in chain(add_arg_assignments, parse_args_assignment)])
argparse_like_objects = [getattr(i.value.func, 'id', None) for p_ref in aa_references for i in get_nodes_by_containing_attr(assignment_objs, p_ref)]
argparse_like_objects = filter(None, argparse_like_objects)
argparse_assignments = [get_nodes_by_containing_attr(assignment_objs, i) for i in argparse_like_objects]
# for now, we just choose one
try:
argparse_assignments = argparse_assignments[0]
except IndexError:
pass
# get things that are assigned inside ArgumentParser or its methods
argparse_assigned_variables = get_node_args_and_keywords(assignment_objs, argparse_assignments, 'ArgumentParser')
add_arg_assigned_variables = get_node_args_and_keywords(assignment_objs, add_arg_assignments, 'add_argument')
parse_args_assigned_variables = get_node_args_and_keywords(assignment_objs, parse_args_assignment, 'parse_args')
ast_argparse_source = chain(
module_imports,
specific_imports,
argparse_assigned_variables,
add_arg_assigned_variables,
parse_args_assigned_variables,
argparse_assignments,
group_arg_assignments,
add_arg_assignments,
)
return ast_argparse_source | Parses the AST of Python file for lines containing
references to the argparse module.
returns the collection of ast objects found.
Example client code:
1. parser = ArgumentParser(desc="My help Message")
2. parser.add_argument('filename', help="Name of the file to load")
3. parser.add_argument('-f', '--format', help='Format of output \nOptions: ['md', 'html']
4. args = parser.parse_args()
Variables:
* nodes Primary syntax tree object
* argparse_assignments The assignment of the ArgumentParser (line 1 in example code)
* add_arg_assignments Calls to add_argument() (lines 2-3 in example code)
* parser_var_name The instance variable of the ArgumentParser (line 1 in example code)
* ast_source The curated collection of all parser related nodes in the client code |
def _deactivate(self):
"""Remove the fetcher from cache and mark it not active."""
self.cache.remove_fetcher(self)
if self.active:
self._deactivated() | Remove the fetcher from cache and mark it not active. |
def GetAttributeNames(self):
"""Retrieves the names of all attributes.
Returns:
list[str]: attribute names.
"""
attribute_names = []
for attribute_name in iter(self.__dict__.keys()):
# Not using startswith to improve performance.
if attribute_name[0] == '_':
continue
attribute_names.append(attribute_name)
return attribute_names | Retrieves the names of all attributes.
Returns:
list[str]: attribute names. |
def yaml_conf_as_dict(file_path, encoding=None):
"""
读入 yaml 配置文件,返回根据配置文件内容生成的字典类型变量
:param:
* file_path: (string) 需要读入的 yaml 配置文件长文件名
* encoding: (string) 文件编码
* msg: (string) 读取配置信息
:return:
* flag: (bool) 读取配置文件是否正确,正确返回 True,错误返回 False
* d: (dict) 如果读取配置文件正确返回的包含配置文件内容的字典,字典内容顺序与配置文件顺序保持一致
举例如下::
print('--- yaml_conf_as_dict demo---')
# 定义配置文件名
conf_filename = 'test_conf.yaml'
# 读取配置文件
ds = yaml_conf_as_dict(conf_filename, encoding='utf-8')
# 显示是否成功,所有 dict 的内容,dict 的 key 数量
print('flag:', ds[0])
print('dict length:', len(ds[1]))
print('msg:', len(ds[1]))
print('conf info: ', ds[1].get('tree'))
print('---')
执行结果::
--- yaml_conf_as_dict demo---
flag: True
dict length: 2
msg: Success
conf info: ['README.md', 'requirements.txt', {'hellopackage': ['__init__.py']},
{'test': ['__init__.py']}, {'doc': ['doc.rst']}]
---
"""
if not pathlib.Path(file_path).is_file():
return False, {}, 'File not exist'
try:
if sys.version > '3':
with open(file_path, 'r', encoding=encoding) as f:
d = OrderedDict(yaml.load(f.read()))
return True, d, 'Success'
else:
with open(file_path, 'r') as f:
d = OrderedDict(yaml.load(f.read()))
return True, d, 'Success'
except:
return False, {}, 'Unknow error' | 读入 yaml 配置文件,返回根据配置文件内容生成的字典类型变量
:param:
* file_path: (string) 需要读入的 yaml 配置文件长文件名
* encoding: (string) 文件编码
* msg: (string) 读取配置信息
:return:
* flag: (bool) 读取配置文件是否正确,正确返回 True,错误返回 False
* d: (dict) 如果读取配置文件正确返回的包含配置文件内容的字典,字典内容顺序与配置文件顺序保持一致
举例如下::
print('--- yaml_conf_as_dict demo---')
# 定义配置文件名
conf_filename = 'test_conf.yaml'
# 读取配置文件
ds = yaml_conf_as_dict(conf_filename, encoding='utf-8')
# 显示是否成功,所有 dict 的内容,dict 的 key 数量
print('flag:', ds[0])
print('dict length:', len(ds[1]))
print('msg:', len(ds[1]))
print('conf info: ', ds[1].get('tree'))
print('---')
执行结果::
--- yaml_conf_as_dict demo---
flag: True
dict length: 2
msg: Success
conf info: ['README.md', 'requirements.txt', {'hellopackage': ['__init__.py']},
{'test': ['__init__.py']}, {'doc': ['doc.rst']}]
--- |
def crawl(plugin):
'''Performs a breadth-first crawl of all possible routes from the
starting path. Will only visit a URL once, even if it is referenced
multiple times in a plugin. Requires user interaction in between each
fetch.
'''
# TODO: use OrderedSet?
paths_visited = set()
paths_to_visit = set(item.get_path() for item in once(plugin))
while paths_to_visit and continue_or_quit():
path = paths_to_visit.pop()
paths_visited.add(path)
# Run the new listitem
patch_plugin(plugin, path)
new_paths = set(item.get_path() for item in once(plugin))
# Filter new items by checking against urls_visited and
# urls_tovisit
paths_to_visit.update(path for path in new_paths
if path not in paths_visited) | Performs a breadth-first crawl of all possible routes from the
starting path. Will only visit a URL once, even if it is referenced
multiple times in a plugin. Requires user interaction in between each
fetch. |
def _get_gos_upper(self, ntpltgo1, max_upper, go2parentids):
"""Plot a GO DAG for the upper portion of a single Group of user GOs."""
# Get GO IDs which are in the hdrgo path
goids_possible = ntpltgo1.gosubdag.go2obj.keys()
# Get upper GO IDs which have the most descendants
return self._get_gosrcs_upper(goids_possible, max_upper, go2parentids) | Plot a GO DAG for the upper portion of a single Group of user GOs. |
def docker_fabric(*args, **kwargs):
"""
:param args: Positional arguments to Docker client.
:param kwargs: Keyword arguments to Docker client.
:return: Docker client.
:rtype: dockerfabric.apiclient.DockerFabricClient | dockerfabric.cli.DockerCliClient
"""
ci = kwargs.get('client_implementation') or env.get('docker_fabric_implementation') or CLIENT_API
if ci == CLIENT_API:
return docker_api(*args, **kwargs)
elif ci == CLIENT_CLI:
return docker_cli(*args, **kwargs)
raise ValueError("Invalid client implementation.", ci) | :param args: Positional arguments to Docker client.
:param kwargs: Keyword arguments to Docker client.
:return: Docker client.
:rtype: dockerfabric.apiclient.DockerFabricClient | dockerfabric.cli.DockerCliClient |
def _hashable_bytes(data):
"""
Coerce strings to hashable bytes.
"""
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode('ascii') # Fail on anything non-ASCII.
else:
raise TypeError(data) | Coerce strings to hashable bytes. |
def displacements(self):
"""Return displacements
Returns
-------
There are two types of displacement dataset. See the docstring
of set_displacement_dataset about types 1 and 2 for displacement
dataset format.
Type-1, List of list
The internal list has 4 elements such as [32, 0.01, 0.0, 0.0]].
The first element is the supercell atom index starting with 0.
The remaining three elements give the displacement in Cartesian
coordinates.
Type-2, array_like
Displacements of all atoms of all supercells in Cartesian
coordinates.
shape=(supercells, natom, 3)
dtype='double'
"""
disps = []
if 'first_atoms' in self._displacement_dataset:
for disp in self._displacement_dataset['first_atoms']:
x = disp['displacement']
disps.append([disp['number'], x[0], x[1], x[2]])
elif 'displacements' in self._displacement_dataset:
disps = self._displacement_dataset['displacements']
return disps | Return displacements
Returns
-------
There are two types of displacement dataset. See the docstring
of set_displacement_dataset about types 1 and 2 for displacement
dataset format.
Type-1, List of list
The internal list has 4 elements such as [32, 0.01, 0.0, 0.0]].
The first element is the supercell atom index starting with 0.
The remaining three elements give the displacement in Cartesian
coordinates.
Type-2, array_like
Displacements of all atoms of all supercells in Cartesian
coordinates.
shape=(supercells, natom, 3)
dtype='double' |
def get_sync_binding_cmds(self, switch_bindings, expected_bindings):
"""Returns the list of commands required to synchronize ACL bindings
1. Delete any unexpected bindings
2. Add any missing bindings
"""
switch_cmds = list()
# Update any necessary switch interface ACLs
bindings_to_delete = switch_bindings - expected_bindings
bindings_to_add = expected_bindings - switch_bindings
for intf, acl, direction in bindings_to_delete:
switch_cmds.extend(['interface %s' % intf,
'no ip access-group %s %s' %
(acl, direction),
'exit'])
for intf, acl, direction in bindings_to_add:
switch_cmds.extend(['interface %s' % intf,
'ip access-group %s %s' % (acl, direction),
'exit'])
return switch_cmds | Returns the list of commands required to synchronize ACL bindings
1. Delete any unexpected bindings
2. Add any missing bindings |
def tcl_list_2_py_list(tcl_list, within_tcl_str=False):
""" Convert Tcl list to Python list using Tcl interpreter.
:param tcl_list: string representing the Tcl string.
:param within_tcl_str: True - Tcl list is embedded within Tcl str. False - native Tcl string.
:return: Python list equivalent to the Tcl ist.
:rtye: list
"""
if not within_tcl_str:
tcl_list = tcl_str(tcl_list)
return tcl_interp_g.eval('join ' + tcl_list + ' LiStSeP').split('LiStSeP') if tcl_list else [] | Convert Tcl list to Python list using Tcl interpreter.
:param tcl_list: string representing the Tcl string.
:param within_tcl_str: True - Tcl list is embedded within Tcl str. False - native Tcl string.
:return: Python list equivalent to the Tcl ist.
:rtye: list |
def select_image_layer(infiles, output_file, log, context):
"""Selects the image layer for the output page. If possible this is the
orientation-corrected input page, or an image of the whole page converted
to PDF."""
options = context.get_options()
page_pdf = next(ii for ii in infiles if ii.endswith('.ocr.oriented.pdf'))
image = next(ii for ii in infiles if ii.endswith('.image'))
if options.lossless_reconstruction:
log.debug(
f"{page_number(page_pdf):4d}: page eligible for lossless reconstruction"
)
re_symlink(page_pdf, output_file, log) # Still points to multipage
return
pageinfo = get_pageinfo(image, context)
# We rasterize a square DPI version of each page because most image
# processing tools don't support rectangular DPI. Use the square DPI as it
# accurately describes the image. It would be possible to resample the image
# at this stage back to non-square DPI to more closely resemble the input,
# except that the hocr renderer does not understand non-square DPI. The
# sandwich renderer would be fine.
dpi = get_page_square_dpi(pageinfo, options)
layout_fun = img2pdf.get_fixed_dpi_layout_fun((dpi, dpi))
# This create a single page PDF
with open(image, 'rb') as imfile, open(output_file, 'wb') as pdf:
log.debug(f'{page_number(page_pdf):4d}: convert')
img2pdf.convert(
imfile, with_pdfrw=False, layout_fun=layout_fun, outputstream=pdf
)
log.debug(f'{page_number(page_pdf):4d}: convert done') | Selects the image layer for the output page. If possible this is the
orientation-corrected input page, or an image of the whole page converted
to PDF. |
def getSenderNumberMgtURL(self, CorpNum, UserID):
""" 팩스 전송내역 팝업 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/FAX/?TG=SENDER', CorpNum, UserID)
return result.url | 팩스 전송내역 팝업 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException |
def approximator(molecules, options, sort_order=None, frameworks=[], ensemble=[]):
"""
recursively rank queries
:param molecules:
:param options:
:param sort_order:
:param frameworks:
:param ensemble:
:return:
"""
# set variables
ensemble_size = options.ensemble_size
if not sort_order:
sort_order = classification.get_sort_order(molecules)
# construct ensemble
print("Performing calculations for ensemble size {s}".format(s=(len(ensemble) + 1)))
ensemble = rank_queries(molecules, ensemble, sort_order, options)
# write stats & ensemble
output.write_ensemble(list(ensemble), options)
if len(ensemble) == ensemble_size:
return 1
else:
return approximator(molecules, options, sort_order, frameworks, ensemble) | recursively rank queries
:param molecules:
:param options:
:param sort_order:
:param frameworks:
:param ensemble:
:return: |
def delete_option_value_by_id(cls, option_value_id, **kwargs):
"""Delete OptionValue
Delete an instance of OptionValue by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_option_value_by_id(option_value_id, async=True)
>>> result = thread.get()
:param async bool
:param str option_value_id: ID of optionValue to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_option_value_by_id_with_http_info(option_value_id, **kwargs)
else:
(data) = cls._delete_option_value_by_id_with_http_info(option_value_id, **kwargs)
return data | Delete OptionValue
Delete an instance of OptionValue by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_option_value_by_id(option_value_id, async=True)
>>> result = thread.get()
:param async bool
:param str option_value_id: ID of optionValue to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def sort_canonical(keyword, stmts):
"""Sort all `stmts` in the canonical order defined by `keyword`.
Return the sorted list. The `stmt` list is not modified.
If `keyword` does not have a canonical order, the list is returned
as is.
"""
try:
(_arg_type, subspec) = stmt_map[keyword]
except KeyError:
return stmts
res = []
# keep the order of data definition statements and case
keep = [s[0] for s in data_def_stmts] + ['case']
for (kw, _spec) in flatten_spec(subspec):
# keep comments before a statement together with that statement
comments = []
for s in stmts:
if s.keyword == '_comment':
comments.append(s)
elif s.keyword == kw and kw not in keep:
res.extend(comments)
comments = []
res.append(s)
else:
comments = []
# then copy all other statements (extensions)
res.extend([stmt for stmt in stmts if stmt not in res])
return res | Sort all `stmts` in the canonical order defined by `keyword`.
Return the sorted list. The `stmt` list is not modified.
If `keyword` does not have a canonical order, the list is returned
as is. |
def render_arrow(self, label, start, end, direction, i):
"""Render individual arrow.
label (unicode): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (unicode): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (unicode): Rendered SVG markup.
"""
level = self.levels.index(end - start) + 1
x_start = self.offset_x + start * self.distance + self.arrow_spacing
if self.direction == "rtl":
x_start = self.width - x_start
y = self.offset_y
x_end = (
self.offset_x
+ (end - start) * self.distance
+ start * self.distance
- self.arrow_spacing * (self.highest_level - level) / 4
)
if self.direction == "rtl":
x_end = self.width - x_end
y_curve = self.offset_y - level * self.distance / 2
if self.compact:
y_curve = self.offset_y - level * self.distance / 6
if y_curve == 0 and len(self.levels) > 5:
y_curve = -self.distance
arrowhead = self.get_arrowhead(direction, x_start, y, x_end)
arc = self.get_arc(x_start, y, y_curve, x_end)
label_side = "right" if self.direction == "rtl" else "left"
return TPL_DEP_ARCS.format(
id=self.id,
i=i,
stroke=self.arrow_stroke,
head=arrowhead,
label=label,
label_side=label_side,
arc=arc,
) | Render individual arrow.
label (unicode): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (unicode): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (unicode): Rendered SVG markup. |
def _get_top_level_secrets(self):
"""
Convert the top-level 'secrets' directive to the Docker format
:return: secrets dict
"""
top_level_secrets = dict()
if self.secrets:
for secret, secret_definition in iteritems(self.secrets):
if isinstance(secret_definition, dict):
for key, value in iteritems(secret_definition):
name = '{}_{}'.format(secret, key)
top_level_secrets[name] = dict(external=True)
elif isinstance(secret_definition, string_types):
top_level_secrets[secret] = dict(external=True)
return top_level_secrets | Convert the top-level 'secrets' directive to the Docker format
:return: secrets dict |
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
if self.metadata_version in ('1.0', '1.1'):
values = values.replace('\n', '\n ')
else:
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value) | Write the PKG-INFO format data to a file object. |
def _execute_comprehension(self, node: Union[ast.ListComp, ast.SetComp, ast.GeneratorExp, ast.DictComp]) -> Any:
"""Compile the generator or comprehension from the node and execute the compiled code."""
args = [ast.arg(arg=name) for name in sorted(self._name_to_value.keys())]
func_def_node = ast.FunctionDef(
name="generator_expr",
args=ast.arguments(args=args, kwonlyargs=[], kw_defaults=[], defaults=[]),
decorator_list=[],
body=[ast.Return(node)])
module_node = ast.Module(body=[func_def_node])
ast.fix_missing_locations(module_node)
code = compile(source=module_node, filename='<ast>', mode='exec')
module_locals = {} # type: Dict[str, Any]
module_globals = {} # type: Dict[str, Any]
exec(code, module_globals, module_locals) # pylint: disable=exec-used
generator_expr_func = module_locals["generator_expr"]
return generator_expr_func(**self._name_to_value) | Compile the generator or comprehension from the node and execute the compiled code. |
def date(name=None):
"""
Creates the grammar for a Date (D) field, accepting only numbers in a
certain pattern.
:param name: name for the field
:return: grammar for the date field
"""
if name is None:
name = 'Date Field'
# Basic field
# This regex allows values from 00000101 to 99991231
field = pp.Regex('[0-9][0-9][0-9][0-9](0[1-9]|1[0-2])'
'(0[1-9]|[1-2][0-9]|3[0-1])')
# Parse action
field.setParseAction(lambda d: datetime.datetime.strptime(d[0], '%Y%m%d')
.date())
# Name
field.setName(name)
# White spaces are not removed
field.leaveWhitespace()
return field | Creates the grammar for a Date (D) field, accepting only numbers in a
certain pattern.
:param name: name for the field
:return: grammar for the date field |
def set_color_temperature(self, temperature, effect=EFFECT_SUDDEN, transition_time=MIN_TRANSITION_TIME):
"""
Set the white color temperature. The bulb must be switched on.
:param temperature: color temperature to set. It can be between 1700 and 6500 K
:param effect: if the change is made suddenly or smoothly
:param transition_time: in case the change is made smoothly, time in ms that change last
:type temperature: int
:type effect: str
:type transition_time : int
"""
# Check bulb state
if self.is_off():
raise Exception("set_color_temperature can't be used if the bulb is off. Turn it on first")
# Input validation
schema = Schema({'temperature': All(int, Range(min=1700, max=6500)),
'effect': Any(self.EFFECT_SUDDEN, self.EFFECT_SMOOTH),
'transition_time': All(int, Range(min=30))})
schema({'temperature': temperature, 'effect': effect, 'transition_time': transition_time})
# Send command
params = [temperature, effect, transition_time]
self.api_call.operate_on_bulb("set_ct_abx", params)
# Update property
self.property[self.PROPERTY_NAME_COLOR_TEMPERATURE] = temperature | Set the white color temperature. The bulb must be switched on.
:param temperature: color temperature to set. It can be between 1700 and 6500 K
:param effect: if the change is made suddenly or smoothly
:param transition_time: in case the change is made smoothly, time in ms that change last
:type temperature: int
:type effect: str
:type transition_time : int |
def output_to_terminal(sources):
"""Print statistics to the terminal"""
results = OrderedDict()
for source in sources:
if source.get_is_available():
source.update()
results.update(source.get_summary())
for key, value in results.items():
sys.stdout.write(str(key) + ": " + str(value) + ", ")
sys.stdout.write("\n")
sys.exit() | Print statistics to the terminal |
def reverse_transform(self, column):
"""Applies the natural logarithm function to turn positive values into real ranged values.
Args:
column (pandas.DataFrame): Data to transform.
Returns:
pd.DataFrame
"""
self.check_data_type()
return pd.DataFrame({self.col_name: np.log(column[self.col_name])}) | Applies the natural logarithm function to turn positive values into real ranged values.
Args:
column (pandas.DataFrame): Data to transform.
Returns:
pd.DataFrame |
def link(self, mu, dist):
"""
glm link function
this is useful for going from mu to the linear prediction
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
lp : np.array of length n
"""
return np.log(mu) - np.log(dist.levels - mu) | glm link function
this is useful for going from mu to the linear prediction
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
lp : np.array of length n |
def node2geoff(node_name, properties, encoder):
"""converts a NetworkX node into a Geoff string.
Parameters
----------
node_name : str or int
the ID of a NetworkX node
properties : dict
a dictionary of node attributes
encoder : json.JSONEncoder
an instance of a JSON encoder (e.g. `json.JSONEncoder`)
Returns
-------
geoff : str
a Geoff string
"""
if properties:
return '({0} {1})'.format(node_name,
encoder.encode(properties))
else:
return '({0})'.format(node_name) | converts a NetworkX node into a Geoff string.
Parameters
----------
node_name : str or int
the ID of a NetworkX node
properties : dict
a dictionary of node attributes
encoder : json.JSONEncoder
an instance of a JSON encoder (e.g. `json.JSONEncoder`)
Returns
-------
geoff : str
a Geoff string |
def data(self):
"""load and cache data in json format
"""
if self.is_obsolete():
data = self.get_data()
for datum in data:
if 'published_parsed' in datum:
datum['published_parsed'] = \
self.parse_time(datum['published_parsed'])
try:
dumped_data = json.dumps(data)
except:
self.update_cache(data)
else:
self.update_cache(dumped_data)
return data
try:
return json.loads(self.cache_data)
except:
return self.cache_data
return self.get_data() | load and cache data in json format |
def enabled(self, value):
"""
Setter for **self.__enabled** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("enabled", value)
self.__enabled = value | Setter for **self.__enabled** attribute.
:param value: Attribute value.
:type value: bool |
def set_target(self, target):
'''
:param target: target object
'''
self.target = target
if target:
self.target.set_fuzzer(self)
return self | :param target: target object |
def _create_hashes(self,count):
"""
Breaks up our hash into slots, so we can pull them out later.
Essentially, it splits our SHA/MD5/etc into X parts.
"""
for i in range(0,count):
#Get 1/numblocks of the hash
blocksize = int(len(self.hexdigest) / count)
currentstart = (1 + i) * blocksize - blocksize
currentend = (1 +i) * blocksize
self.hasharray.append(int(self.hexdigest[currentstart:currentend],16))
# Workaround for adding more sets in 2019.
# We run out of blocks, because we use some for each set, whether it's called or not.
# I can't easily change this without invalidating every hash so far :/
# This shouldn't reduce the security since it should only draw from one set of these in practice.
self.hasharray = self.hasharray + self.hasharray | Breaks up our hash into slots, so we can pull them out later.
Essentially, it splits our SHA/MD5/etc into X parts. |
def render_tag(tag, attrs=None, content=None, close=True):
"""
Render a HTML tag
"""
builder = "<{tag}{attrs}>{content}"
if content or close:
builder += "</{tag}>"
return format_html(
builder,
tag=tag,
attrs=mark_safe(flatatt(attrs)) if attrs else "",
content=text_value(content),
) | Render a HTML tag |
def extract_data(self, page):
"""Extract the AppNexus object or list of objects from the response"""
response_keys = set(page.keys())
uncommon_keys = response_keys - self.common_keys
for possible_data_key in uncommon_keys:
element = page[possible_data_key]
if isinstance(element, dict):
return [self.representation(self.client, self.service_name,
element)]
if isinstance(element, list):
return [self.representation(self.client, self.service_name, x)
for x in element] | Extract the AppNexus object or list of objects from the response |
def balance(address):
"""
Takes a single address and returns the current balance.
"""
txhistory = Address.transactions(address)
balance = 0
for i in txhistory:
if i.recipientId == address:
balance += i.amount
if i.senderId == address:
balance -= (i.amount + i.fee)
delegates = Delegate.delegates()
for i in delegates:
if address == i.address:
forged_blocks = Delegate.blocks(i.pubkey)
for block in forged_blocks:
balance += (block.reward + block.totalFee)
if balance < 0:
height = Node.height()
logger.fatal('Negative balance for address {0}, Nodeheight: {1)'.format(address, height))
raise NegativeBalanceError('Negative balance for address {0}, Nodeheight: {1)'.format(address, height))
return balance | Takes a single address and returns the current balance. |
def set(self, section, key, value):
"""
Creates the section value if it does not exists and sets the value.
Use write_config to actually set the value.
"""
if not section in self.config:
self.config.add_section(section)
self.config.set(section, key, value) | Creates the section value if it does not exists and sets the value.
Use write_config to actually set the value. |
def form_valid(self, form):
"""
Processes a valid form submittal.
:param form: the form instance.
:rtype: django.http.HttpResponse.
"""
#noinspection PyAttributeOutsideInit
self.object = form.save()
meta = getattr(self.object, '_meta')
# Index the object.
for backend in get_search_backends():
backend.add(object)
#noinspection PyUnresolvedReferences
messages.success(
self.request,
_(u'{0} "{1}" saved.').format(
meta.verbose_name,
str(self.object)
),
buttons=[messages.button(
reverse(
'{0}:edit'.format(self.url_namespace),
args=(self.object.id,)
),
_(u'Edit')
)]
)
return redirect(self.get_success_url()) | Processes a valid form submittal.
:param form: the form instance.
:rtype: django.http.HttpResponse. |
def profile_form_factory():
"""Create a profile form."""
if current_app.config['USERPROFILES_EMAIL_ENABLED']:
return EmailProfileForm(
formdata=None,
username=current_userprofile.username,
full_name=current_userprofile.full_name,
email=current_user.email,
email_repeat=current_user.email,
prefix='profile', )
else:
return ProfileForm(
formdata=None,
obj=current_userprofile,
prefix='profile', ) | Create a profile form. |
def remove_properties_containing_None(properties_dict):
"""
removes keys from a dict those values == None
json schema validation might fail if they are set and
the type or format of the property does not match
"""
# remove empty properties - as validations may fail
new_dict = dict()
for key in properties_dict.keys():
value = properties_dict[key]
if value is not None:
new_dict[key] = value
return new_dict | removes keys from a dict those values == None
json schema validation might fail if they are set and
the type or format of the property does not match |
def replace_in_files(search, replace, depth=0, paths=None, confirm=True):
"""
Does a line-by-line search and replace, but only up to the "depth" line.
"""
# have the user select some files
if paths==None:
paths = _s.dialogs.MultipleFiles('DIS AND DAT|*.*')
if paths == []: return
for path in paths:
lines = read_lines(path)
if depth: N=min(len(lines),depth)
else: N=len(lines)
for n in range(0,N):
if lines[n].find(search) >= 0:
lines[n] = lines[n].replace(search,replace)
print(path.split(_os.path.pathsep)[-1]+ ': "'+lines[n]+'"')
# only write if we're not confirming
if not confirm:
_os.rename(path, path+".backup")
write_to_file(path, join(lines, ''))
if confirm:
if input("yes? ")=="yes":
replace_in_files(search,replace,depth,paths,False)
return | Does a line-by-line search and replace, but only up to the "depth" line. |
def host_create(host, groups, interfaces, **kwargs):
'''
.. versionadded:: 2016.3.0
Create new host
.. note::
This function accepts all standard host properties: keyword argument
names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/host/object#host
:param host: technical name of the host
:param groups: groupids of host groups to add the host to
:param interfaces: interfaces to be created for the host
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:param visible_name: string with visible name of the host, use
'visible_name' instead of 'name' parameter to not mess with value
supplied from Salt sls file.
return: ID of the created host.
CLI Example:
.. code-block:: bash
salt '*' zabbix.host_create technicalname 4
interfaces='{type: 1, main: 1, useip: 1, ip: "192.168.3.1", dns: "", port: 10050}'
visible_name='Host Visible Name' inventory_mode=0 inventory='{"alias": "something"}'
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'host.create'
params = {"host": host}
# Groups
if not isinstance(groups, list):
groups = [groups]
grps = []
for group in groups:
grps.append({"groupid": group})
params['groups'] = grps
# Interfaces
if not isinstance(interfaces, list):
interfaces = [interfaces]
params['interfaces'] = interfaces
params = _params_extend(params, _ignore_name=True, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['hostids']
else:
raise KeyError
except KeyError:
return ret | .. versionadded:: 2016.3.0
Create new host
.. note::
This function accepts all standard host properties: keyword argument
names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/host/object#host
:param host: technical name of the host
:param groups: groupids of host groups to add the host to
:param interfaces: interfaces to be created for the host
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:param visible_name: string with visible name of the host, use
'visible_name' instead of 'name' parameter to not mess with value
supplied from Salt sls file.
return: ID of the created host.
CLI Example:
.. code-block:: bash
salt '*' zabbix.host_create technicalname 4
interfaces='{type: 1, main: 1, useip: 1, ip: "192.168.3.1", dns: "", port: 10050}'
visible_name='Host Visible Name' inventory_mode=0 inventory='{"alias": "something"}' |
def create_app():
""" Flask application factory """
# Setup Flask and load app.config
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Setup Flask-MongoEngine
db = MongoEngine(app)
# Define the User document.
# NB: Make sure to add flask_user UserMixin !!!
class User(db.Document, UserMixin):
active = db.BooleanField(default=True)
# User authentication information
username = db.StringField(default='')
password = db.StringField()
# User information
first_name = db.StringField(default='')
last_name = db.StringField(default='')
# Relationships
roles = db.ListField(db.StringField(), default=[])
# Setup Flask-User and specify the User data-model
user_manager = UserManager(app, db, User)
# The Home page is accessible to anyone
@app.route('/')
def home_page():
# String-based templates
return render_template_string("""
{% extends "flask_user_layout.html" %}
{% block content %}
<h2>Home page</h2>
<p><a href={{ url_for('user.register') }}>Register</a></p>
<p><a href={{ url_for('user.login') }}>Sign in</a></p>
<p><a href={{ url_for('home_page') }}>Home page</a> (accessible to anyone)</p>
<p><a href={{ url_for('member_page') }}>Member page</a> (login required)</p>
<p><a href={{ url_for('user.logout') }}>Sign out</a></p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users via the @login_required decorator
@app.route('/members')
@login_required # User must be authenticated
def member_page():
# String-based templates
return render_template_string("""
{% extends "flask_user_layout.html" %}
{% block content %}
<h2>Members page</h2>
<p><a href={{ url_for('user.register') }}>Register</a></p>
<p><a href={{ url_for('user.login') }}>Sign in</a></p>
<p><a href={{ url_for('home_page') }}>Home page</a> (accessible to anyone)</p>
<p><a href={{ url_for('member_page') }}>Member page</a> (login required)</p>
<p><a href={{ url_for('user.logout') }}>Sign out</a></p>
{% endblock %}
""")
return app | Flask application factory |
def _app_exec(self, package, action, params=None):
"""
meta method for all interactions with apps
:param package: name of package/app
:type package: str
:param action: the action to be executed
:type action: str
:param params: optional parameters for this action
:type params: dict
:return: None
:rtype: None
"""
# get list of possible commands from app.actions
allowed_commands = []
for app in self.get_apps_list():
if app.package == package:
allowed_commands = list(app.actions.keys())
break
# check if action is in this list
assert(action in allowed_commands)
cmd, url = DEVICE_URLS["do_action"]
# get widget id for the package
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
json_data = {"id": action}
if params is not None:
json_data["params"] = params
self.result = self._exec(cmd, url, json_data=json_data) | meta method for all interactions with apps
:param package: name of package/app
:type package: str
:param action: the action to be executed
:type action: str
:param params: optional parameters for this action
:type params: dict
:return: None
:rtype: None |
def copy(self, tx_ins=None, tx_outs=None, lock_time=None,
expiry_height=None, tx_joinsplits=None, joinsplit_pubkey=None,
joinsplit_sig=None):
'''
OverwinterTx, ... -> OverwinterTx
Makes a copy. Allows over-writing specific pieces.
'''
return OverwinterTx(
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
expiry_height=(expiry_height if expiry_height is not None
else self.expiry_height),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig)) | OverwinterTx, ... -> OverwinterTx
Makes a copy. Allows over-writing specific pieces. |
def _merge_patches(self):
"""Injects object patches into their original object definitions."""
for patched_item, patched_namespace in self._patch_data_by_canonical_name.values():
patched_item_base_name = self._get_base_name(patched_item.name, patched_namespace.name)
if patched_item_base_name not in self._item_by_canonical_name:
raise InvalidSpec('Patch {} must correspond to a pre-existing data_type.'.format(
quote(patched_item.name)), patched_item.lineno, patched_item.path)
existing_item = self._item_by_canonical_name[patched_item_base_name]
self._check_patch_type_mismatch(patched_item, existing_item)
if isinstance(patched_item, (AstStructPatch, AstUnionPatch)):
self._check_field_names_unique(existing_item, patched_item)
existing_item.fields += patched_item.fields
self._inject_patched_examples(existing_item, patched_item)
else:
raise AssertionError('Unknown Patch Object Type {}'.format(
patched_item.__class__.__name__)) | Injects object patches into their original object definitions. |
def from_pyfile(cls: Type["Config"], filename: FilePath) -> "Config":
"""Create a configuration from a Python file.
.. code-block:: python
Config.from_pyfile('hypercorn_config.py')
Arguments:
filename: The filename which gives the path to the file.
"""
file_path = os.fspath(filename)
spec = importlib.util.spec_from_file_location("module.name", file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return cls.from_object(module) | Create a configuration from a Python file.
.. code-block:: python
Config.from_pyfile('hypercorn_config.py')
Arguments:
filename: The filename which gives the path to the file. |
def _get_existing_report(self, mask, report):
"""Returns the aggregated report that matches report"""
for existing_report in self._reports:
if existing_report['namespace'] == report['namespace']:
if mask == existing_report['queryMask']:
return existing_report
return None | Returns the aggregated report that matches report |
def _marker(self, lat, long, text, xmap, color=None, icon=None,
text_mark=False, style=None):
"""
Adds a marker to the default map
"""
kwargs = {}
if icon is not None:
kwargs["icon"] = icon
if color is not None:
kwargs["color"] = color
if style is None:
style = "font-size:18pt;font-weight:bold;" + \
"color:black;border-radius:0.5"
try:
xicon1 = folium.Icon(**kwargs)
if text_mark is True:
xicon = DivIcon(
icon_size=(150, 36),
icon_anchor=(0, 0),
html='<div style="' + style + '">' + text + '</div>',
)
folium.Marker([lat, long], popup=text,
icon=xicon).add_to(xmap)
folium.Marker([lat, long], popup=text,
icon=xicon1).add_to(xmap)
return xmap
except Exception as e:
self.err(e, self._marker, "Can not get marker") | Adds a marker to the default map |
def tokenize(self, data):
'''
Tokenize sentence.
Args:
[n-gram, n-gram, n-gram, ...]
'''
super().tokenize(data)
token_tuple_zip = self.n_gram.generate_tuple_zip(self.token, self.n)
token_list = []
self.token = ["".join(list(token_tuple)) for token_tuple in token_tuple_zip] | Tokenize sentence.
Args:
[n-gram, n-gram, n-gram, ...] |
def _cleaned(_pipeline_objects):
"""Return standardized pipeline objects to be used for comparing
Remove year, month, and day components of the startDateTime so that data
pipelines with the same time of day but different days are considered
equal.
"""
pipeline_objects = copy.deepcopy(_pipeline_objects)
for pipeline_object in pipeline_objects:
if pipeline_object['id'] == 'DefaultSchedule':
for field_object in pipeline_object['fields']:
if field_object['key'] == 'startDateTime':
start_date_time_string = field_object['stringValue']
start_date_time = datetime.datetime.strptime(start_date_time_string,
"%Y-%m-%dT%H:%M:%S")
field_object['stringValue'] = start_date_time.strftime("%H:%M:%S")
return pipeline_objects | Return standardized pipeline objects to be used for comparing
Remove year, month, and day components of the startDateTime so that data
pipelines with the same time of day but different days are considered
equal. |
def simulate(self, steps, stimulus):
"""!
@brief Simulates chaotic neural network with extrnal stimulus during specified steps.
@details Stimulus are considered as a coordinates of neurons and in line with that weights
are initialized.
@param[in] steps (uint): Amount of steps for simulation.
@param[in] stimulus (list): Stimulus that are used for simulation.
@return (cnn_dynamic) Output dynamic of the chaotic neural network.
"""
self.__create_weights(stimulus)
self.__location = stimulus
dynamic = cnn_dynamic([], [])
dynamic.output.append(self.__output)
dynamic.time.append(0)
for step in range(1, steps, 1):
self.__output = self.__calculate_states()
dynamic.output.append(self.__output)
dynamic.time.append(step)
return dynamic | !
@brief Simulates chaotic neural network with extrnal stimulus during specified steps.
@details Stimulus are considered as a coordinates of neurons and in line with that weights
are initialized.
@param[in] steps (uint): Amount of steps for simulation.
@param[in] stimulus (list): Stimulus that are used for simulation.
@return (cnn_dynamic) Output dynamic of the chaotic neural network. |
def length(min=None, max=None):
"""
Validates that a field value's length is between the bounds given to this
validator.
"""
def validate(value):
if min and len(value) < min:
return e("{} does not have a length of at least {}", value, min)
if max and len(value) > max:
return e("{} does not have a length of at most {}", value, max)
return validate | Validates that a field value's length is between the bounds given to this
validator. |
def reset(self, clear=False):
"""
Overridden to customize the order that the banners are printed
"""
if self._executing:
self._executing = False
self._request_info['execute'] = {}
self._reading = False
self._highlighter.highlighting_on = False
if clear:
self._control.clear()
if self._display_banner:
if self.kernel_banner:
self._append_plain_text(self.kernel_banner)
self._append_plain_text(self.banner)
# update output marker for stdout/stderr, so that startup
# messages appear after banner:
self._show_interpreter_prompt() | Overridden to customize the order that the banners are printed |
def handle_request(self, environ, start_response):
"""Handle an HTTP request from the client.
This is the entry point of the Engine.IO application, using the same
interface as a WSGI application. For the typical usage, this function
is invoked by the :class:`Middleware` instance, but it can be invoked
directly when the middleware is not used.
:param environ: The WSGI environment.
:param start_response: The WSGI ``start_response`` function.
This function returns the HTTP response body to deliver to the client
as a byte sequence.
"""
method = environ['REQUEST_METHOD']
query = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
if 'j' in query:
self.logger.warning('JSONP requests are not supported')
r = self._bad_request()
else:
sid = query['sid'][0] if 'sid' in query else None
b64 = False
if 'b64' in query:
if query['b64'][0] == "1" or query['b64'][0].lower() == "true":
b64 = True
if method == 'GET':
if sid is None:
transport = query.get('transport', ['polling'])[0]
if transport != 'polling' and transport != 'websocket':
self.logger.warning('Invalid transport %s', transport)
r = self._bad_request()
else:
r = self._handle_connect(environ, start_response,
transport, b64)
else:
if sid not in self.sockets:
self.logger.warning('Invalid session %s', sid)
r = self._bad_request()
else:
socket = self._get_socket(sid)
try:
packets = socket.handle_get_request(
environ, start_response)
if isinstance(packets, list):
r = self._ok(packets, b64=b64)
else:
r = packets
except exceptions.EngineIOError:
if sid in self.sockets: # pragma: no cover
self.disconnect(sid)
r = self._bad_request()
if sid in self.sockets and self.sockets[sid].closed:
del self.sockets[sid]
elif method == 'POST':
if sid is None or sid not in self.sockets:
self.logger.warning('Invalid session %s', sid)
r = self._bad_request()
else:
socket = self._get_socket(sid)
try:
socket.handle_post_request(environ)
r = self._ok()
except exceptions.EngineIOError:
if sid in self.sockets: # pragma: no cover
self.disconnect(sid)
r = self._bad_request()
except: # pragma: no cover
# for any other unexpected errors, we log the error
# and keep going
self.logger.exception('post request handler error')
r = self._ok()
elif method == 'OPTIONS':
r = self._ok()
else:
self.logger.warning('Method %s not supported', method)
r = self._method_not_found()
if not isinstance(r, dict):
return r or []
if self.http_compression and \
len(r['response']) >= self.compression_threshold:
encodings = [e.split(';')[0].strip() for e in
environ.get('HTTP_ACCEPT_ENCODING', '').split(',')]
for encoding in encodings:
if encoding in self.compression_methods:
r['response'] = \
getattr(self, '_' + encoding)(r['response'])
r['headers'] += [('Content-Encoding', encoding)]
break
cors_headers = self._cors_headers(environ)
start_response(r['status'], r['headers'] + cors_headers)
return [r['response']] | Handle an HTTP request from the client.
This is the entry point of the Engine.IO application, using the same
interface as a WSGI application. For the typical usage, this function
is invoked by the :class:`Middleware` instance, but it can be invoked
directly when the middleware is not used.
:param environ: The WSGI environment.
:param start_response: The WSGI ``start_response`` function.
This function returns the HTTP response body to deliver to the client
as a byte sequence. |
def delete_policy(self, scaling_group, policy):
"""
Deletes the specified policy from the scaling group.
"""
uri = "/%s/%s/policies/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
resp, resp_body = self.api.method_delete(uri) | Deletes the specified policy from the scaling group. |
def songs(self):
"""Get a listing of library songs.
Returns:
list: Song dicts.
"""
song_list = []
for chunk in self.songs_iter(page_size=49995):
song_list.extend(chunk)
return song_list | Get a listing of library songs.
Returns:
list: Song dicts. |
def models_descriptive(self):
""" list all stored models in given file.
Returns
-------
dict: {model_name: {'repr' : 'string representation, 'created': 'human readable date', ...}
"""
f = self._parent
return {name: {a: f[name].attrs[a]
for a in H5File.stored_attributes}
for name in f.keys()} | list all stored models in given file.
Returns
-------
dict: {model_name: {'repr' : 'string representation, 'created': 'human readable date', ...} |
def set_mode(self, controlmode, drivemode):
"""Higher level abstraction for setting the mode register. This will
set the mode according the the @controlmode and @drivemode you specify.
@controlmode and @drivemode should come from the ControlMode and DriveMode
class respectively."""
self.set_register(Addr.Mode, [0x01 | controlmode | drivemode]) | Higher level abstraction for setting the mode register. This will
set the mode according the the @controlmode and @drivemode you specify.
@controlmode and @drivemode should come from the ControlMode and DriveMode
class respectively. |
def parameters(self, parameters):
"""Setter method; for a description see the getter method."""
# pylint: disable=attribute-defined-outside-init
self._parameters = NocaseDict()
if parameters:
try:
# This is used for iterables:
iterator = parameters.items()
except AttributeError:
# This is used for dictionaries:
iterator = parameters
for item in iterator:
if isinstance(item, CIMParameter):
key = item.name
value = item
elif isinstance(item, tuple):
key, value = item
else:
raise TypeError(
_format("Input object for parameters has invalid item "
"in iterable: {0!A}", item))
self.parameters[key] = value | Setter method; for a description see the getter method. |
def validate_url(url):
"""Validate URL is valid
NOTE: only support http & https
"""
schemes = ['http', 'https']
netloc_re = re.compile(
r'^'
r'(?:\S+(?::\S*)?@)?' # user:pass auth
r'(?:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])'
r'(?:\.(?:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9]))*' # host
r'(?::[0-9]{2,5})?' # port
r'$', re.IGNORECASE
)
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
raise Invalid('Invalid URL')
if scheme not in schemes:
raise Invalid('Missing URL scheme')
if not netloc_re.search(netloc):
raise Invalid('Invalid URL')
return url | Validate URL is valid
NOTE: only support http & https |
def from_geo(geo, level):
"""
Constucts a quadkey representation from geo and level
geo => (lat, lon)
If lat or lon are outside of bounds, they will be clipped
If level is outside of bounds, an AssertionError is raised
"""
pixel = TileSystem.geo_to_pixel(geo, level)
tile = TileSystem.pixel_to_tile(pixel)
key = TileSystem.tile_to_quadkey(tile, level)
return QuadKey(key) | Constucts a quadkey representation from geo and level
geo => (lat, lon)
If lat or lon are outside of bounds, they will be clipped
If level is outside of bounds, an AssertionError is raised |
def _server_error_handler(self, code: int):
"""处理500~599段状态码,抛出对应警告.
Parameters:
(code): - 响应的状态码
Return:
(bool): - 已知的警告类型则返回True,否则返回False
Raise:
(ServerException): - 当返回为服务异常时则抛出对应异常
"""
if code == 501:
self._login_fut.set_result(False)
else:
self.clean()
raise abort(code)
return True | 处理500~599段状态码,抛出对应警告.
Parameters:
(code): - 响应的状态码
Return:
(bool): - 已知的警告类型则返回True,否则返回False
Raise:
(ServerException): - 当返回为服务异常时则抛出对应异常 |
def cli(file1, file2, comments) -> int:
""" Compare file1 to file2 using a filter """
sys.exit(compare_files(file1, file2, comments)) | Compare file1 to file2 using a filter |
def _generate_packets(file_h, header, layers=0):
"""
Read packets one by one from the capture file. Expects the file
handle to point to the location immediately after the header (24
bytes).
"""
hdrp = ctypes.pointer(header)
while True:
pkt = _read_a_packet(file_h, hdrp, layers)
if pkt:
yield pkt
else:
break | Read packets one by one from the capture file. Expects the file
handle to point to the location immediately after the header (24
bytes). |
def init_blueprint(self, blueprint):
"""Initialize a Flask Blueprint, similar to init_app, but without the access
to the application config.
Keyword Arguments:
blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize (Default: {None})
"""
if self._route is not None:
raise TypeError("route cannot be set when using blueprints!")
blueprint.rak = self
blueprint.add_url_rule("", view_func=getattr(self, self._view_name), methods=['POST']) | Initialize a Flask Blueprint, similar to init_app, but without the access
to the application config.
Keyword Arguments:
blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize (Default: {None}) |
def get_info_content(go_id, termcounts):
'''
Calculates the information content of a GO term.
'''
# Get the observed frequency of the GO term
freq = termcounts.get_term_freq(go_id)
# Calculate the information content (i.e., -log("freq of GO term")
return -1.0 * math.log(freq) if freq else 0 | Calculates the information content of a GO term. |
def create_capitan_images(self, raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
symbols = self.load_capitan_symbols(raw_data_directory)
self.draw_capitan_stroke_images(symbols, destination_directory, stroke_thicknesses)
self.draw_capitan_score_images(symbols, destination_directory) | Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16 |
def __make_response(self, data, default_renderer=None):
"""
Creates a Flask response object from the specified data.
The appropriated encoder is taken based on the request header Accept.
If there is not data to be serialized the response status code is 204.
:param data: The Python object to be serialized.
:return: A Flask response object.
"""
status = headers = None
if isinstance(data, tuple):
data, status, headers = unpack(data)
if data is None:
data = self.__app.response_class(status=204)
elif not isinstance(data, self.__app.response_class):
renderer, mimetype = self.content_negotiation.select_renderer(request, self.default_renderers)
if not renderer:
if not default_renderer:
raise NotAcceptable()
renderer = default_renderer
mimetype = default_renderer.mimetype
data_bytes = renderer.render(data, mimetype)
data = self.__app.response_class(data_bytes, mimetype=str(mimetype))
if status is not None:
data.status_code = status
if headers:
data.headers.extend(headers)
return data | Creates a Flask response object from the specified data.
The appropriated encoder is taken based on the request header Accept.
If there is not data to be serialized the response status code is 204.
:param data: The Python object to be serialized.
:return: A Flask response object. |
def update_variant(self, variant_obj):
"""Update one variant document in the database.
This means that the variant in the database will be replaced by variant_obj.
Args:
variant_obj(dict)
Returns:
new_variant(dict)
"""
LOG.debug('Updating variant %s', variant_obj.get('simple_id'))
new_variant = self.variant_collection.find_one_and_replace(
{'_id': variant_obj['_id']},
variant_obj,
return_document=pymongo.ReturnDocument.AFTER
)
return new_variant | Update one variant document in the database.
This means that the variant in the database will be replaced by variant_obj.
Args:
variant_obj(dict)
Returns:
new_variant(dict) |
def _get_or_create_service_key(self):
"""
Get a service key or create one if needed.
"""
keys = self.service._get_service_keys(self.name)
for key in keys['resources']:
if key['entity']['name'] == self.service_name:
return self.service.get_service_key(self.name,
self.service_name)
self.service.create_service_key(self.name, self.service_name)
return self.service.get_service_key(self.name, self.service_name) | Get a service key or create one if needed. |
def check_var_coverage_content_type(self, ds):
'''
Check coverage content type against valid ISO-19115-1 codes
:param netCDF4.Dataset ds: An open netCDF dataset
'''
results = []
for variable in cfutil.get_geophysical_variables(ds):
msgs = []
ctype = getattr(ds.variables[variable],
'coverage_content_type', None)
check = ctype is not None
if not check:
msgs.append("coverage_content_type")
results.append(Result(BaseCheck.HIGH, check,
self._var_header.format(variable), msgs))
continue
# ISO 19115-1 codes
valid_ctypes = {
'image',
'thematicClassification',
'physicalMeasurement',
'auxiliaryInformation',
'qualityInformation',
'referenceInformation',
'modelResult',
'coordinate'
}
if ctype not in valid_ctypes:
msgs.append("coverage_content_type in \"%s\""
% (variable, sorted(valid_ctypes)))
results.append(Result(BaseCheck.HIGH, check, # append to list
self._var_header.format(variable), msgs))
return results | Check coverage content type against valid ISO-19115-1 codes
:param netCDF4.Dataset ds: An open netCDF dataset |
def QA_fetch_stock_full(date, format='numpy', collections=DATABASE.stock_day):
'获取全市场的某一日的数据'
Date = str(date)[0:10]
if QA_util_date_valid(Date) is True:
__data = []
for item in collections.find({
"date_stamp": QA_util_date_stamp(Date)}, batch_size=10000):
__data.append([str(item['code']), float(item['open']), float(item['high']), float(
item['low']), float(item['close']), float(item['vol']), item['date']])
# 多种数据格式
if format in ['n', 'N', 'numpy']:
__data = numpy.asarray(__data)
elif format in ['list', 'l', 'L']:
__data = __data
elif format in ['P', 'p', 'pandas', 'pd']:
__data = DataFrame(__data, columns=[
'code', 'open', 'high', 'low', 'close', 'volume', 'date'])
__data['date'] = pd.to_datetime(__data['date'])
__data = __data.set_index('date', drop=False)
else:
print("QA Error QA_fetch_stock_full format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return __data
else:
QA_util_log_info(
'QA Error QA_fetch_stock_full data parameter date=%s not right' % date) | 获取全市场的某一日的数据 |
def export(self, directory):
"""Exports the ConnectorDB user into the given directory.
The resulting export can be imported by using the import command(cdb.import(directory)),
Note that Python cannot export passwords, since the REST API does
not expose password hashes. Therefore, the imported user will have
password same as username.
The user export function is different than device and stream exports because
it outputs a format compatible directly with connectorDB's import functionality:
connectordb import < mydatabase > <directory >
This also means that you can export multiple users into the same directory without issue
"""
exportInfoFile = os.path.join(directory, "connectordb.json")
if os.path.exists(directory):
# Ensure that there is an export there already, and it is version 1
if not os.path.exists(exportInfoFile):
raise FileExistsError(
"The export directory already exsits, and is not a ConnectorDB export.")
with open(exportInfoFile) as f:
exportInfo = json.load(f)
if exportInfo["Version"] != 1:
raise ValueError(
"Could not export to directory: incompatible export versions.")
else:
# The folder doesn't exist. Make it.
os.mkdir(directory)
with open(exportInfoFile, "w") as f:
json.dump(
{"Version": 1, "ConnectorDB": self.db.get("meta/version").text}, f)
# Now we create the user directory
udir = os.path.join(directory, self.name)
os.mkdir(udir)
# Write the user's info
with open(os.path.join(udir, "user.json"), "w") as f:
json.dump(self.data, f)
# Now export the devices one by one
for d in self.devices():
d.export(os.path.join(udir, d.name)) | Exports the ConnectorDB user into the given directory.
The resulting export can be imported by using the import command(cdb.import(directory)),
Note that Python cannot export passwords, since the REST API does
not expose password hashes. Therefore, the imported user will have
password same as username.
The user export function is different than device and stream exports because
it outputs a format compatible directly with connectorDB's import functionality:
connectordb import < mydatabase > <directory >
This also means that you can export multiple users into the same directory without issue |
def pop(self, index=-1):
"""
Remove and return item at *index* (default last). Raises IndexError if
set is empty or index is out of range. Negative indexes are supported,
as for slice indices.
"""
# pylint: disable=arguments-differ
value = self._list.pop(index)
self._set.remove(value)
return value | Remove and return item at *index* (default last). Raises IndexError if
set is empty or index is out of range. Negative indexes are supported,
as for slice indices. |
def learnTransitions(self):
"""
Train the location layer to do path integration. For every location, teach
it each previous-location + motor command pair.
"""
print "Learning transitions"
for (i, j), locationSDR in self.locations.iteritems():
print "i, j", (i, j)
for (di, dj), transitionSDR in self.transitions.iteritems():
i2 = i + di
j2 = j + dj
if (0 <= i2 < self.diameter and
0 <= j2 < self.diameter):
for _ in xrange(5):
self.locationLayer.reset()
self.locationLayer.compute(newLocation=self.locations[(i,j)])
self.locationLayer.compute(deltaLocation=transitionSDR,
newLocation=self.locations[(i2, j2)])
self.locationLayer.reset() | Train the location layer to do path integration. For every location, teach
it each previous-location + motor command pair. |
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphitePickleHandler, self).get_default_config()
config.update({
'port': 2004,
})
return config | Return the default config for the handler |
def company_vat(self):
"""
Returns 10 character tax identification number,
Polish: Numer identyfikacji podatkowej.
https://pl.wikipedia.org/wiki/NIP
"""
vat_digits = []
for _ in range(3):
vat_digits.append(self.random_digit_not_null())
for _ in range(6):
vat_digits.append(self.random_digit())
check_digit = company_vat_checksum(vat_digits)
# in this case we must generate a tax number again, because check_digit
# cannot be 10
if check_digit == 10:
return self.company_vat()
vat_digits.append(check_digit)
return ''.join(str(digit) for digit in vat_digits) | Returns 10 character tax identification number,
Polish: Numer identyfikacji podatkowej.
https://pl.wikipedia.org/wiki/NIP |
def read(self, len=1024, buffer=None):
"""Read data from connection
Read up to len bytes and return them.
Arguments:
len -- maximum number of bytes to read
Return value:
string containing read bytes
"""
try:
return self._wrap_socket_library_call(
lambda: SSL_read(self._ssl.value, len, buffer), ERR_READ_TIMEOUT)
except openssl_error() as err:
if err.ssl_error == SSL_ERROR_SYSCALL and err.result == -1:
raise_ssl_error(ERR_PORT_UNREACHABLE, err)
raise | Read data from connection
Read up to len bytes and return them.
Arguments:
len -- maximum number of bytes to read
Return value:
string containing read bytes |
def list(self, url_components=()):
"""
Send list request for all members of a collection
"""
resp = self.get(url_components)
return resp.get(self.result_key, []) | Send list request for all members of a collection |
def getFoundIn(self, foundin_name, projectarea_id=None,
projectarea_name=None, archived=False):
"""Get :class:`rtcclient.models.FoundIn` object by its name
:param foundin_name: the foundin name
:param projectarea_id: the :class:`rtcclient.project_area.ProjectArea`
id
:param projectarea_name: the project area name
:param archived: (default is False) whether the foundin is archived
:return: the :class:`rtcclient.models.FoundIn` object
:rtype: rtcclient.models.FoundIn
"""
self.log.debug("Try to get <FoundIn %s>", foundin_name)
if not isinstance(foundin_name,
six.string_types) or not foundin_name:
excp_msg = "Please specify a valid PlannedFor name"
self.log.error(excp_msg)
raise exception.BadValue(excp_msg)
foundins = self._getFoundIns(projectarea_id=projectarea_id,
projectarea_name=projectarea_name,
archived=archived,
foundin_name=foundin_name)
if foundins is not None:
foundin = foundins[0]
self.log.info("Find <FoundIn %s>", foundin)
return foundin
self.log.error("No FoundIn named %s", foundin_name)
raise exception.NotFound("No FoundIn named %s" % foundin_name) | Get :class:`rtcclient.models.FoundIn` object by its name
:param foundin_name: the foundin name
:param projectarea_id: the :class:`rtcclient.project_area.ProjectArea`
id
:param projectarea_name: the project area name
:param archived: (default is False) whether the foundin is archived
:return: the :class:`rtcclient.models.FoundIn` object
:rtype: rtcclient.models.FoundIn |
def group_audit_ranks(filenames, measurer, similarity_bound=0.05):
"""
Given a list of audit files, rank them using the `measurer` and
return the features that never deviate more than `similarity_bound`
across repairs.
"""
def _partition_groups(feature_scores):
groups = []
for feature, score in feature_scores:
added_to_group = False
# Check to see if the feature belongs in a group with any other features.
for i, group in enumerate(groups):
mean_score, group_feature_scores = group
if abs(mean_score - score) < similarity_bound:
groups[i][1].append( (feature, score) )
# Recalculate the representative mean.
groups[i][0] = sum([s for _, s in group_feature_scores])/len(group_feature_scores)
added_to_group = True
break
# If this feature did not much with the current groups, create another group.
if not added_to_group:
groups.append( [score, [(feature,score)]] )
# Return just the features.
return [[feature for feature, score in group] for _, group in groups]
score_dict = {}
features = []
for filename in filenames:
with open(filename) as audit_file:
header_line = audit_file.readline()[:-1] # Remove the trailing endline.
feature = header_line[header_line.index(":")+1:]
features.append(feature)
confusion_matrices = load_audit_confusion_matrices(filename)
for rep_level, matrix in confusion_matrices:
score = measurer(matrix)
if rep_level not in score_dict:
score_dict[rep_level] = {}
score_dict[rep_level][feature] = score
# Sort by repair level increasing repair level.
score_keys = sorted(score_dict.keys())
groups = [features]
while score_keys:
key = score_keys.pop()
new_groups = []
for group in groups:
group_features = [(f, score_dict[key][f]) for f in group]
sub_groups = _partition_groups(group_features)
new_groups.extend(sub_groups)
groups = new_groups
return groups | Given a list of audit files, rank them using the `measurer` and
return the features that never deviate more than `similarity_bound`
across repairs. |
def key_press(self, key, x, y):
"Close the application when the player presses ESCAPE"
if ord(key) == 27:
# print "Escape!"
if bool(glutLeaveMainLoop):
glutLeaveMainLoop()
else:
raise Exception("Application quit") | Close the application when the player presses ESCAPE |
def parseAnchorName(
anchorName,
markPrefix=MARK_PREFIX,
ligaSeparator=LIGA_SEPARATOR,
ligaNumRE=LIGA_NUM_RE,
ignoreRE=None,
):
"""Parse anchor name and return a tuple that specifies:
1) whether the anchor is a "mark" anchor (bool);
2) the "key" name of the anchor, i.e. the name after stripping all the
prefixes and suffixes, which identifies the class it belongs to (str);
3) An optional number (int), starting from 1, which identifies that index
of the ligature component the anchor refers to.
The 'ignoreRE' argument is an optional regex pattern (str) identifying
sub-strings in the anchor name that should be ignored when parsing the
three elements above.
"""
number = None
if ignoreRE is not None:
anchorName = re.sub(ignoreRE, "", anchorName)
m = ligaNumRE.match(anchorName)
if not m:
key = anchorName
else:
number = m.group(1)
key = anchorName.rstrip(number)
separator = ligaSeparator
if key.endswith(separator):
assert separator
key = key[: -len(separator)]
number = int(number)
else:
# not a valid ligature anchor name
key = anchorName
number = None
if anchorName.startswith(markPrefix) and key:
if number is not None:
raise ValueError("mark anchor cannot be numbered: %r" % anchorName)
isMark = True
key = key[len(markPrefix) :]
if not key:
raise ValueError("mark anchor key is nil: %r" % anchorName)
else:
isMark = False
return isMark, key, number | Parse anchor name and return a tuple that specifies:
1) whether the anchor is a "mark" anchor (bool);
2) the "key" name of the anchor, i.e. the name after stripping all the
prefixes and suffixes, which identifies the class it belongs to (str);
3) An optional number (int), starting from 1, which identifies that index
of the ligature component the anchor refers to.
The 'ignoreRE' argument is an optional regex pattern (str) identifying
sub-strings in the anchor name that should be ignored when parsing the
three elements above. |
def arp_ip(opcode, src_mac, src_ip, dst_mac, dst_ip):
"""A convenient wrapper for IPv4 ARP for Ethernet.
This is an equivalent of the following code.
arp(ARP_HW_TYPE_ETHERNET, ether.ETH_TYPE_IP, \
6, 4, opcode, src_mac, src_ip, dst_mac, dst_ip)
"""
return arp(ARP_HW_TYPE_ETHERNET, ether.ETH_TYPE_IP,
6, # ether mac address length
4, # ipv4 address length,
opcode, src_mac, src_ip, dst_mac, dst_ip) | A convenient wrapper for IPv4 ARP for Ethernet.
This is an equivalent of the following code.
arp(ARP_HW_TYPE_ETHERNET, ether.ETH_TYPE_IP, \
6, 4, opcode, src_mac, src_ip, dst_mac, dst_ip) |
def get_yes_no(self, question):
"""Checks if question is yes (True) or no (False)
:param question: Question to ask user
:return: User answer
"""
user_answer = self.get_answer(question).lower()
if user_answer in self.yes_input:
return True
if user_answer in self.no_input:
return False
is_yes = self.is_yes(user_answer) # check if similar to yes/no choices
is_no = self.is_no(user_answer)
if is_yes and not is_no:
return True
if is_no and not is_yes:
return False
if self.interactive:
self.show_help()
return self.get_yes_no(self.last_question)
return False | Checks if question is yes (True) or no (False)
:param question: Question to ask user
:return: User answer |
def request(self, name, *args, **kwargs):
"""Wrapper for nvim.request."""
return self._session.request(name, self, *args, **kwargs) | Wrapper for nvim.request. |
def __retry_session(self, retries=10, backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None):
"""
Retry the connection using requests if it fails. Use this as a wrapper
to request from datapoint
"""
# requests.Session allows finer control, which is needed to use the
# retrying code
the_session = session or requests.Session()
# The Retry object manages the actual retrying
retry = Retry(total=retries, read=retries, connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
the_session.mount('http://', adapter)
the_session.mount('https://', adapter)
return the_session | Retry the connection using requests if it fails. Use this as a wrapper
to request from datapoint |
def open_url_in_browser(url, browsername=None, fallback=False):
r"""
Opens a url in the specified or default browser
Args:
url (str): web url
CommandLine:
python -m utool.util_grabdata --test-open_url_in_browser
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_grabdata import * # NOQA
>>> url = 'http://www.jrsoftware.org/isdl.php'
>>> open_url_in_browser(url, 'chrome')
"""
import webbrowser
print('[utool] Opening url=%r in browser' % (url,))
if browsername is None:
browser = webbrowser.open(url)
else:
browser = get_prefered_browser(pref_list=[browsername], fallback=fallback)
return browser.open(url) | r"""
Opens a url in the specified or default browser
Args:
url (str): web url
CommandLine:
python -m utool.util_grabdata --test-open_url_in_browser
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_grabdata import * # NOQA
>>> url = 'http://www.jrsoftware.org/isdl.php'
>>> open_url_in_browser(url, 'chrome') |
def putcell(self, rownr, value):
"""Put a value into one or more table cells.
(see :func:`table.putcell`)"""
return self._table.putcell(self._column, rownr, value) | Put a value into one or more table cells.
(see :func:`table.putcell`) |
def array(self):
"""Get all resources and return the result as an array
Returns:
array of str: Array of resources
"""
url = "{}/{}".format(__endpoint__, self.type.RESOURCE)
return RestClient.get(url, self.params)[self.type.RESOURCE] | Get all resources and return the result as an array
Returns:
array of str: Array of resources |
def search(self, filters=None, fields=None, limit=None, page=1):
"""
Retrieve order list by options using search api. Using this result can
be paginated
:param options: Dictionary of options.
:param filters: `{<attribute>:{<operator>:<value>}}`
:param fields: [<String: magento field names>, ...]
:param limit: `page limit`
:param page: `current page`
:return: `list` of `dict`
"""
options = {
'imported': False,
'filters': filters or {},
'fields': fields or [],
'limit': limit or 1000,
'page': page,
}
return self.call('sales_order.search', [options]) | Retrieve order list by options using search api. Using this result can
be paginated
:param options: Dictionary of options.
:param filters: `{<attribute>:{<operator>:<value>}}`
:param fields: [<String: magento field names>, ...]
:param limit: `page limit`
:param page: `current page`
:return: `list` of `dict` |
Subsets and Splits