Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
2,800 | pyusb/pyusb | usb/core.py | show_devices | def show_devices(verbose=False, **kwargs):
"""Show information about connected devices.
The verbose flag sets to verbose or not.
**kwargs are passed directly to the find() function.
"""
kwargs["find_all"] = True
devices = find(**kwargs)
strings = ""
for device in devices:
if not verbose:
strings += "%s, %s\n" % (device._str(), _try_lookup(
_lu.device_classes, device.bDeviceClass))
else:
strings += "%s\n\n" % str(device)
return _DescriptorInfo(strings) | python | def show_devices(verbose=False, **kwargs):
"""Show information about connected devices.
The verbose flag sets to verbose or not.
**kwargs are passed directly to the find() function.
"""
kwargs["find_all"] = True
devices = find(**kwargs)
strings = ""
for device in devices:
if not verbose:
strings += "%s, %s\n" % (device._str(), _try_lookup(
_lu.device_classes, device.bDeviceClass))
else:
strings += "%s\n\n" % str(device)
return _DescriptorInfo(strings) | ['def', 'show_devices', '(', 'verbose', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', '"find_all"', ']', '=', 'True', 'devices', '=', 'find', '(', '*', '*', 'kwargs', ')', 'strings', '=', '""', 'for', 'device', 'in', 'devices', ':', 'if', 'not', 'verbose', ':', 'strings', '+=', '"%s, %s\\n"', '%', '(', 'device', '.', '_str', '(', ')', ',', '_try_lookup', '(', '_lu', '.', 'device_classes', ',', 'device', '.', 'bDeviceClass', ')', ')', 'else', ':', 'strings', '+=', '"%s\\n\\n"', '%', 'str', '(', 'device', ')', 'return', '_DescriptorInfo', '(', 'strings', ')'] | Show information about connected devices.
The verbose flag sets to verbose or not.
**kwargs are passed directly to the find() function. | ['Show', 'information', 'about', 'connected', 'devices', '.'] | train | https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/core.py#L1275-L1291 |
2,801 | base4sistemas/satcomum | satcomum/br.py | as_cnpj | def as_cnpj(numero):
"""Formata um número de CNPJ. Se o número não for um CNPJ válido apenas
retorna o argumento sem qualquer modificação.
"""
_num = digitos(numero)
if is_cnpj(_num):
return '{}.{}.{}/{}-{}'.format(
_num[:2], _num[2:5], _num[5:8], _num[8:12], _num[12:])
return numero | python | def as_cnpj(numero):
"""Formata um número de CNPJ. Se o número não for um CNPJ válido apenas
retorna o argumento sem qualquer modificação.
"""
_num = digitos(numero)
if is_cnpj(_num):
return '{}.{}.{}/{}-{}'.format(
_num[:2], _num[2:5], _num[5:8], _num[8:12], _num[12:])
return numero | ['def', 'as_cnpj', '(', 'numero', ')', ':', '_num', '=', 'digitos', '(', 'numero', ')', 'if', 'is_cnpj', '(', '_num', ')', ':', 'return', "'{}.{}.{}/{}-{}'", '.', 'format', '(', '_num', '[', ':', '2', ']', ',', '_num', '[', '2', ':', '5', ']', ',', '_num', '[', '5', ':', '8', ']', ',', '_num', '[', '8', ':', '12', ']', ',', '_num', '[', '12', ':', ']', ')', 'return', 'numero'] | Formata um número de CNPJ. Se o número não for um CNPJ válido apenas
retorna o argumento sem qualquer modificação. | ['Formata', 'um', 'número', 'de', 'CNPJ', '.', 'Se', 'o', 'número', 'não', 'for', 'um', 'CNPJ', 'válido', 'apenas', 'retorna', 'o', 'argumento', 'sem', 'qualquer', 'modificação', '.'] | train | https://github.com/base4sistemas/satcomum/blob/b42bec06cb0fb0ad2f6b1a2644a1e8fc8403f2c3/satcomum/br.py#L171-L179 |
2,802 | AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | lx4num | def lx4num(string, first):
"""
Scan a string from a specified starting position for the
end of a number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lx4num_c.html
:param string: Any character string.
:type string: str
:param first: First character to scan from in string.
:type first: int
:return: last and nchar
:rtype: tuple
"""
string = stypes.stringToCharP(string)
first = ctypes.c_int(first)
last = ctypes.c_int()
nchar = ctypes.c_int()
libspice.lx4num_c(string, first, ctypes.byref(last), ctypes.byref(nchar))
return last.value, nchar.value | python | def lx4num(string, first):
"""
Scan a string from a specified starting position for the
end of a number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lx4num_c.html
:param string: Any character string.
:type string: str
:param first: First character to scan from in string.
:type first: int
:return: last and nchar
:rtype: tuple
"""
string = stypes.stringToCharP(string)
first = ctypes.c_int(first)
last = ctypes.c_int()
nchar = ctypes.c_int()
libspice.lx4num_c(string, first, ctypes.byref(last), ctypes.byref(nchar))
return last.value, nchar.value | ['def', 'lx4num', '(', 'string', ',', 'first', ')', ':', 'string', '=', 'stypes', '.', 'stringToCharP', '(', 'string', ')', 'first', '=', 'ctypes', '.', 'c_int', '(', 'first', ')', 'last', '=', 'ctypes', '.', 'c_int', '(', ')', 'nchar', '=', 'ctypes', '.', 'c_int', '(', ')', 'libspice', '.', 'lx4num_c', '(', 'string', ',', 'first', ',', 'ctypes', '.', 'byref', '(', 'last', ')', ',', 'ctypes', '.', 'byref', '(', 'nchar', ')', ')', 'return', 'last', '.', 'value', ',', 'nchar', '.', 'value'] | Scan a string from a specified starting position for the
end of a number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lx4num_c.html
:param string: Any character string.
:type string: str
:param first: First character to scan from in string.
:type first: int
:return: last and nchar
:rtype: tuple | ['Scan', 'a', 'string', 'from', 'a', 'specified', 'starting', 'position', 'for', 'the', 'end', 'of', 'a', 'number', '.'] | train | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L8282-L8301 |
2,803 | openvax/topiary | topiary/filters.py | apply_effect_expression_filters | def apply_effect_expression_filters(
effects,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
"""
Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
"""
if gene_expression_dict:
effects = apply_filter(
lambda effect: (
gene_expression_dict.get(effect.gene_id, 0.0) >=
gene_expression_threshold),
effects,
result_fn=effects.clone_with_new_elements,
filter_name="Effect gene expression (min = %0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
effects = apply_filter(
lambda effect: (
transcript_expression_dict.get(effect.transcript_id, 0.0) >=
transcript_expression_threshold
),
effects,
result_fn=effects.clone_with_new_elements,
filter_name=(
"Effect transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return effects | python | def apply_effect_expression_filters(
effects,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
"""
Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
"""
if gene_expression_dict:
effects = apply_filter(
lambda effect: (
gene_expression_dict.get(effect.gene_id, 0.0) >=
gene_expression_threshold),
effects,
result_fn=effects.clone_with_new_elements,
filter_name="Effect gene expression (min = %0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
effects = apply_filter(
lambda effect: (
transcript_expression_dict.get(effect.transcript_id, 0.0) >=
transcript_expression_threshold
),
effects,
result_fn=effects.clone_with_new_elements,
filter_name=(
"Effect transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return effects | ['def', 'apply_effect_expression_filters', '(', 'effects', ',', 'gene_expression_dict', ',', 'gene_expression_threshold', ',', 'transcript_expression_dict', ',', 'transcript_expression_threshold', ')', ':', 'if', 'gene_expression_dict', ':', 'effects', '=', 'apply_filter', '(', 'lambda', 'effect', ':', '(', 'gene_expression_dict', '.', 'get', '(', 'effect', '.', 'gene_id', ',', '0.0', ')', '>=', 'gene_expression_threshold', ')', ',', 'effects', ',', 'result_fn', '=', 'effects', '.', 'clone_with_new_elements', ',', 'filter_name', '=', '"Effect gene expression (min = %0.4f)"', '%', 'gene_expression_threshold', ')', 'if', 'transcript_expression_dict', ':', 'effects', '=', 'apply_filter', '(', 'lambda', 'effect', ':', '(', 'transcript_expression_dict', '.', 'get', '(', 'effect', '.', 'transcript_id', ',', '0.0', ')', '>=', 'transcript_expression_threshold', ')', ',', 'effects', ',', 'result_fn', '=', 'effects', '.', 'clone_with_new_elements', ',', 'filter_name', '=', '(', '"Effect transcript expression (min=%0.4f)"', '%', '(', 'transcript_expression_threshold', ',', ')', ')', ')', 'return', 'effects'] | Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float | ['Filter', 'collection', 'of', 'varcode', 'effects', 'by', 'given', 'gene', 'and', 'transcript', 'expression', 'thresholds', '.'] | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/filters.py#L109-L151 |
2,804 | svenevs/exhale | exhale/configs.py | apply_sphinx_configurations | def apply_sphinx_configurations(app):
'''
This method applies the various configurations users place in their ``conf.py``, in
the dictionary ``exhale_args``. The error checking seems to be robust, and
borderline obsessive, but there may very well be some glaring flaws.
When the user requests for the ``treeView`` to be created, this method is also
responsible for adding the various CSS / JavaScript to the Sphinx Application
to support the hierarchical views.
.. danger::
This method is **not** supposed to be called directly. See
``exhale/__init__.py`` for how this function is called indirectly via the Sphinx
API.
**Parameters**
``app`` (:class:`sphinx.application.Sphinx`)
The Sphinx Application running the documentation build.
'''
# Import local to function to prevent circular imports elsewhere in the framework.
from . import deploy
from . import utils
####################################################################################
# Make sure they have the `breathe` configs setup in a way that we can use them. #
####################################################################################
# Breathe allows users to have multiple projects to configure in one `conf.py`
# A dictionary of keys := project names, values := path to Doxygen xml output dir
breathe_projects = app.config.breathe_projects
if not breathe_projects:
raise ConfigError("You must set the `breathe_projects` in `conf.py`.")
elif type(breathe_projects) is not dict:
raise ConfigError("The type of `breathe_projects` in `conf.py` must be a dictionary.")
# The breathe_default_project is required by `exhale` to determine where to look for
# the doxygen xml.
#
# TODO: figure out how to allow multiple breathe projects?
breathe_default_project = app.config.breathe_default_project
if not breathe_default_project:
raise ConfigError("You must set the `breathe_default_project` in `conf.py`.")
elif not isinstance(breathe_default_project, six.string_types):
raise ConfigError("The type of `breathe_default_project` must be a string.")
if breathe_default_project not in breathe_projects:
raise ConfigError(
"The given breathe_default_project='{0}' was not a valid key in `breathe_projects`:\n{1}".format(
breathe_default_project, breathe_projects
)
)
# Grab where the Doxygen xml output is supposed to go, make sure it is a string,
# defer validation of existence until after potentially running Doxygen based on
# the configs given to exhale
doxy_xml_dir = breathe_projects[breathe_default_project]
if not isinstance(doxy_xml_dir, six.string_types):
raise ConfigError(
"The type of `breathe_projects[breathe_default_project]` from `conf.py` was not a string."
)
# Make doxy_xml_dir relative to confdir (where conf.py is)
if not os.path.isabs(doxy_xml_dir):
doxy_xml_dir = os.path.abspath(os.path.join(app.confdir, doxy_xml_dir))
####################################################################################
# Initial sanity-check that we have the arguments needed. #
####################################################################################
exhale_args = app.config.exhale_args
if not exhale_args:
raise ConfigError("You must set the `exhale_args` dictionary in `conf.py`.")
elif type(exhale_args) is not dict:
raise ConfigError("The type of `exhale_args` in `conf.py` must be a dictionary.")
####################################################################################
# In order to be able to loop through things below, we want to grab the globals #
# dictionary (rather than needing to do `global containmentFolder` etc for every #
# setting that is being changed). #
####################################################################################
configs_globals = globals()
# Used for internal verification of available keys
keys_available = []
# At the end of input processing, fail out if unrecognized keys were found.
keys_processed = []
####################################################################################
# Gather the mandatory input for exhale. #
####################################################################################
key_error = "Did not find required key `{key}` in `exhale_args`."
val_error = "The type of the value for key `{key}` must be `{exp}`, but was `{got}`."
req_kv = [
("containmentFolder", six.string_types, True),
("rootFileName", six.string_types, False),
("rootFileTitle", six.string_types, False),
("doxygenStripFromPath", six.string_types, True)
]
for key, expected_type, make_absolute in req_kv:
# Used in error checking later
keys_available.append(key)
# Make sure we have the key
if key not in exhale_args:
raise ConfigError(key_error.format(key=key))
# Make sure the value is at the very least the correct type
val = exhale_args[key]
if not isinstance(val, expected_type):
val_t = type(val)
raise ConfigError(val_error.format(key=key, exp=expected_type, got=val_t))
# Make sure that a value was provided (e.g. no empty strings)
if not val:
raise ConfigError("Non-empty value for key [{0}] required.".format(key))
# If the string represents a path, make it absolute
if make_absolute:
# Directories are made absolute relative to app.confdir (where conf.py is)
if not os.path.isabs(val):
val = os.path.abspath(os.path.join(os.path.abspath(app.confdir), val))
# Set the config for use later
try:
configs_globals[key] = val
keys_processed.append(key)
except Exception as e:
raise ExtensionError(
"Critical error: unable to set `global {0}` to `{1}` in exhale.configs:\n{2}".format(
key, val, e
)
)
####################################################################################
# Validate what can be checked from the required arguments at this time. #
####################################################################################
global _the_app
_the_app = app
# Make sure they know this is a bad idea. The order of these checks is important.
# This assumes the path given was not the empty string (3 will break if it is).
#
# 1. If containmentFolder and app.srcdir are the same, problem.
# 2. If app.srcdir is not at the beginning of containmentFolder, problem.
# 3. If the first two checks have not raised a problem, the final check is to make
# sure that a subdirectory was actually used, as opposed to something that just
# starts with the same path.
#
# Note for the third check lazy evaluation is the only thing that makes checking
# _parts[1] acceptable ;)
_one = containmentFolder == app.srcdir
_two = not containmentFolder.startswith(app.srcdir)
_parts = containmentFolder.split(app.srcdir)
_three = _parts[0] != "" or len(_parts[1].split(os.path.sep)) > 2 or \
os.path.join(app.srcdir, _parts[1].replace(os.path.sep, "", 1)) != containmentFolder # noqa
# If they are equal, containmentFolder points somewhere entirely differently, or the
# relative path (made absolute again) does not have the srcdir
if _one or _two or _three:
raise ConfigError(
"The given `containmentFolder` [{0}] must be a *SUBDIRECTORY* of [{1}].".format(
containmentFolder, app.srcdir
)
)
global _app_src_dir
_app_src_dir = os.path.abspath(app.srcdir)
# We *ONLY* generate reStructuredText, make sure Sphinx is expecting this as well as
# the to-be-generated library root file is correctly suffixed.
if not rootFileName.endswith(".rst"):
raise ConfigError(
"The given `rootFileName` ({0}) did not end with '.rst'; Exhale is reStructuredText only.".format(
rootFileName
)
)
if ".rst" not in app.config.source_suffix:
raise ConfigError(
"Exhale is reStructuredText only, but '.rst' was not found in `source_suffix` list of `conf.py`."
)
# Make sure the doxygen strip path is an exclude-able path
if not os.path.exists(doxygenStripFromPath):
raise ConfigError(
"The path given as `doxygenStripFromPath` ({0}) does not exist!".format(doxygenStripFromPath)
)
####################################################################################
# Gather the optional input for exhale. #
####################################################################################
# TODO: `list` -> `(list, tuple)`, update docs too.
opt_kv = [
# Build Process Logging, Colors, and Debugging
("verboseBuild", bool),
("alwaysColorize", bool),
("generateBreatheFileDirectives", bool),
# Root API Document Customization and Treeview
("afterTitleDescription", six.string_types),
("afterHierarchyDescription", six.string_types),
("fullApiSubSectionTitle", six.string_types),
("afterBodySummary", six.string_types),
("fullToctreeMaxDepth", int),
("listingExclude", list),
# Clickable Hierarchies <3
("createTreeView", bool),
("minifyTreeView", bool),
("treeViewIsBootstrap", bool),
("treeViewBootstrapTextSpanClass", six.string_types),
("treeViewBootstrapIconMimicColor", six.string_types),
("treeViewBootstrapOnhoverColor", six.string_types),
("treeViewBootstrapUseBadgeTags", bool),
("treeViewBootstrapExpandIcon", six.string_types),
("treeViewBootstrapCollapseIcon", six.string_types),
("treeViewBootstrapLevels", int),
# Page Level Customization
("includeTemplateParamOrderList", bool),
("pageLevelConfigMeta", six.string_types),
("repoRedirectURL", six.string_types),
("contentsDirectives", bool),
("contentsTitle", six.string_types),
("contentsSpecifiers", list),
("kindsWithContentsDirectives", list),
# Breathe Customization
("customSpecificationsMapping", dict),
# Doxygen Execution and Customization
("exhaleExecutesDoxygen", bool),
("exhaleUseDoxyfile", bool),
("exhaleDoxygenStdin", six.string_types),
("exhaleSilentDoxygen", bool),
# Programlisting Customization
("lexerMapping", dict)
]
for key, expected_type in opt_kv:
# Used in error checking later
keys_available.append(key)
# Override the default settings if the key was provided
if key in exhale_args:
# Make sure the value is at the very least the correct type
val = exhale_args[key]
if not isinstance(val, expected_type):
val_t = type(val)
raise ConfigError(val_error.format(key=key, exp=expected_type, got=val_t))
# Set the config for use later
try:
configs_globals[key] = val
keys_processed.append(key)
except Exception as e:
raise ExtensionError(
"Critical error: unable to set `global {0}` to `{1}` in exhale.configs:\n{2}".format(
key, val, e
)
)
# These two need to be lists of strings, check to make sure
def _list_of_strings(lst, title):
for spec in lst:
if not isinstance(spec, six.string_types):
raise ConfigError(
"`{title}` must be a list of strings. `{spec}` was of type `{spec_t}`".format(
title=title,
spec=spec,
spec_t=type(spec)
)
)
_list_of_strings( contentsSpecifiers, "contentsSpecifiers")
_list_of_strings(kindsWithContentsDirectives, "kindsWithContentsDirectives")
# Make sure the kinds they specified are valid
for kind in kindsWithContentsDirectives:
if kind not in utils.AVAILABLE_KINDS:
raise ConfigError(
"Unknown `{kind}` given in `kindsWithContentsDirectives`. See utils.AVAILABLE_KINDS.".format(
kind=kind
)
)
# Make sure the listingExlcude is usable
if "listingExclude" in exhale_args:
import re
# TODO: remove this once config objects are in. Reset needed for testing suite.
configs_globals["_compiled_listing_exclude"] = []
# used for error printing, tries to create string out of item otherwise
# returns 'at index {idx}'
def item_or_index(item, idx):
try:
return "`{item}`".format(item=item)
except:
return "at index {idx}".format(idx=idx)
exclusions = exhale_args["listingExclude"]
for idx in range(len(exclusions)):
# Gather the `pattern` and `flags` parameters for `re.compile`
item = exclusions[idx]
if isinstance(item, six.string_types):
pattern = item
flags = 0
else:
try:
pattern, flags = item
except Exception as e:
raise ConfigError(
"listingExclude item {0} cannot be unpacked as `pattern, flags = item`:\n{1}".format(
item_or_index(item, idx), e
)
)
# Compile the regular expression object.
try:
regex = re.compile(pattern, flags)
except Exception as e:
raise ConfigError(
"Unable to compile specified listingExclude {0}:\n{1}".format(
item_or_index(item, idx), e
)
)
configs_globals["_compiled_listing_exclude"].append(regex)
# Make sure the lexerMapping is usable
if "lexerMapping" in exhale_args:
from pygments import lexers
import re
# TODO: remove this once config objects are in. Reset needed for testing suite.
configs_globals["_compiled_lexer_mapping"] = {}
lexer_mapping = exhale_args["lexerMapping"]
for key in lexer_mapping:
val = lexer_mapping[key]
# Make sure both are strings
if not isinstance(key, six.string_types) or not isinstance(val, six.string_types):
raise ConfigError("All keys and values in `lexerMapping` must be strings.")
# Make sure the key is a valid regular expression
try:
regex = re.compile(key)
except Exception as e:
raise ConfigError(
"The `lexerMapping` key [{0}] is not a valid regular expression: {1}".format(key, e)
)
# Make sure the provided lexer is available
try:
lex = lexers.find_lexer_class_by_name(val)
except Exception as e:
raise ConfigError(
"The `lexerMapping` value of [{0}] for key [{1}] is not a valid Pygments lexer.".format(
val, key
)
)
# Everything works, stash for later processing
configs_globals["_compiled_lexer_mapping"][regex] = val
####################################################################################
# Internal consistency check to make sure available keys are accurate. #
####################################################################################
# See naming conventions described at top of file for why this is ok!
keys_expected = []
for key in configs_globals.keys():
val = configs_globals[key]
# Ignore modules and functions
if not isinstance(val, FunctionType) and not isinstance(val, ModuleType):
if key != "logger": # band-aid for logging api with Sphinx prior to config objects
# Ignore specials like __name__ and internal variables like _the_app
if "_" not in key and len(key) > 0: # don't think there can be zero length ones...
first = key[0]
if first.isalpha() and first.islower():
keys_expected.append(key)
keys_expected = set(keys_expected)
keys_available = set(keys_available)
if keys_expected != keys_available:
err = StringIO()
err.write(textwrap.dedent('''
CRITICAL: Exhale encountered an internal error, please raise an Issue on GitHub:
https://github.com/svenevs/exhale/issues
Please paste the following in the issue report:
Expected keys:
'''))
for key in keys_expected:
err.write("- {0}\n".format(key))
err.write(textwrap.dedent('''
Available keys:
'''))
for key in keys_available:
err.write("- {0}\n".format(key))
err.write(textwrap.dedent('''
The Mismatch(es):
'''))
for key in (keys_available ^ keys_expected):
err.write("- {0}\n".format(key))
err_msg = err.getvalue()
err.close()
raise ExtensionError(err_msg)
####################################################################################
# See if unexpected keys were presented. #
####################################################################################
all_keys = set(exhale_args.keys())
keys_processed = set(keys_processed)
if all_keys != keys_processed:
# Much love: https://stackoverflow.com/a/17388505/3814202
from difflib import SequenceMatcher
def similar(a, b):
return SequenceMatcher(None, a, b).ratio() * 100.0
# If there are keys left over after taking the differences of keys_processed
# (which is all keys Exhale expects to see), inform the user of keys they might
# have been trying to provide.
#
# Convert everything to lower case for better matching success
potential_keys = keys_available - keys_processed
potential_keys_lower = {key.lower(): key for key in potential_keys}
extras = all_keys - keys_processed
extra_error = StringIO()
extra_error.write("Exhale found unexpected keys in `exhale_args`:\n")
for key in extras:
extra_error.write(" - Extra key: {0}\n".format(key))
potentials = []
for mate in potential_keys_lower:
similarity = similar(key, mate)
if similarity > 50.0:
# Output results with the non-lower version they should put in exhale_args
potentials.append((similarity, potential_keys_lower[mate]))
if potentials:
potentials = reversed(sorted(potentials))
for rank, mate in potentials:
extra_error.write(" - {0:2.2f}% match with: {1}\n".format(rank, mate))
extra_error_str = extra_error.getvalue()
extra_error.close()
raise ConfigError(extra_error_str)
####################################################################################
# Verify some potentially inconsistent or ignored settings. #
####################################################################################
# treeViewIsBootstrap only takes meaning when createTreeView is True
if not createTreeView and treeViewIsBootstrap:
logger.warning("Exhale: `treeViewIsBootstrap=True` ignored since `createTreeView=False`")
# fullToctreeMaxDepth > 5 may produce other sphinx issues unrelated to exhale
if fullToctreeMaxDepth > 5:
logger.ingwarn(
"Exhale: `fullToctreeMaxDepth={0}` is greater than 5 and may build errors for non-html.".format(
fullToctreeMaxDepth
)
)
# Make sure that we received a valid mapping created by utils.makeCustomSpecificationsMapping
sanity = _closure_map_sanity_check
insane = "`customSpecificationsMapping` *MUST* be made using exhale.utils.makeCustomSpecificationsMapping"
if customSpecificationsMapping:
# Sanity check to make sure exhale made this mapping
if sanity not in customSpecificationsMapping:
raise ConfigError(insane)
elif customSpecificationsMapping[sanity] != sanity: # LOL
raise ConfigError(insane)
# Sanity check #2: enforce no new additions were made
expected_keys = set([sanity]) | set(utils.AVAILABLE_KINDS)
provided_keys = set(customSpecificationsMapping.keys())
diff = provided_keys - expected_keys
if diff:
raise ConfigError("Found extra keys in `customSpecificationsMapping`: {0}".format(diff))
# Sanity check #3: make sure the return values are all strings
for key in customSpecificationsMapping:
val_t = type(customSpecificationsMapping[key])
if not isinstance(key, six.string_types):
raise ConfigError(
"`customSpecificationsMapping` key `{key}` gave value type `{val_t}` (need `str`).".format(
key=key, val_t=val_t
)
)
# Specify where the doxygen output should be going
global _doxygen_xml_output_directory
_doxygen_xml_output_directory = doxy_xml_dir
# If requested, the time is nigh for executing doxygen. The strategy:
# 1. Execute doxygen if requested
# 2. Verify that the expected doxy_xml_dir (specified to `breathe`) was created
# 3. Assuming everything went to plan, let exhale take over and create all of the .rst docs
if exhaleExecutesDoxygen:
# Cannot use both, only one or the other
if exhaleUseDoxyfile and (exhaleDoxygenStdin is not None):
raise ConfigError("You must choose one of `exhaleUseDoxyfile` or `exhaleDoxygenStdin`, not both.")
# The Doxyfile *must* be at the same level as conf.py
# This is done so that when separate source / build directories are being used,
# we can guarantee where the Doxyfile is.
if exhaleUseDoxyfile:
doxyfile_path = os.path.abspath(os.path.join(app.confdir, "Doxyfile"))
if not os.path.exists(doxyfile_path):
raise ConfigError("The file [{0}] does not exist".format(doxyfile_path))
here = os.path.abspath(os.curdir)
if here == app.confdir:
returnPath = None
else:
returnPath = here
# All necessary information ready, go to where the Doxyfile is, run Doxygen
# and then return back (where applicable) so sphinx can continue
start = utils.get_time()
if returnPath:
logger.info(utils.info(
"Exhale: changing directories to [{0}] to execute Doxygen.".format(app.confdir)
))
os.chdir(app.confdir)
logger.info(utils.info("Exhale: executing doxygen."))
status = deploy.generateDoxygenXML()
# Being overly-careful to put sphinx back where it was before potentially erroring out
if returnPath:
logger.info(utils.info(
"Exhale: changing directories back to [{0}] after Doxygen.".format(returnPath)
))
os.chdir(returnPath)
if status:
raise ExtensionError(status)
else:
end = utils.get_time()
logger.info(utils.progress(
"Exhale: doxygen ran successfully in {0}.".format(utils.time_string(start, end))
))
else:
if exhaleUseDoxyfile:
logger.warning("Exhale: `exhaleUseDoxyfile` ignored since `exhaleExecutesDoxygen=False`")
if exhaleDoxygenStdin is not None:
logger.warning("Exhale: `exhaleDoxygenStdin` ignored since `exhaleExecutesDoxygen=False`")
if exhaleSilentDoxygen:
logger.warning("Exhale: `exhaleSilentDoxygen=True` ignored since `exhaleExecutesDoxygen=False`")
# Either Doxygen was run prior to this being called, or we just finished running it.
# Make sure that the files we need are actually there.
if not os.path.isdir(doxy_xml_dir):
raise ConfigError(
"Exhale: the specified folder [{0}] does not exist. Has Doxygen been run?".format(doxy_xml_dir)
)
index = os.path.join(doxy_xml_dir, "index.xml")
if not os.path.isfile(index):
raise ConfigError("Exhale: the file [{0}] does not exist. Has Doxygen been run?".format(index))
# Legacy / debugging feature, warn of its purpose
if generateBreatheFileDirectives:
logger.warning("Exhale: `generateBreatheFileDirectives` is a debugging feature not intended for production.")
####################################################################################
# If using a fancy treeView, add the necessary frontend files. #
####################################################################################
if createTreeView:
if treeViewIsBootstrap:
tree_data_static_base = "treeView-bootstrap"
tree_data_css = [os.path.join("bootstrap-treeview", "bootstrap-treeview.min.css")]
tree_data_js = [
os.path.join("bootstrap-treeview", "bootstrap-treeview.min.js"),
# os.path.join("bootstrap-treeview", "apply-bootstrap-treview.js")
]
tree_data_ext = []
else:
tree_data_static_base = "treeView"
tree_data_css = [os.path.join("collapsible-lists", "css", "tree_view.css")]
tree_data_js = [
os.path.join("collapsible-lists", "js", "CollapsibleLists.compressed.js"),
os.path.join("collapsible-lists", "js", "apply-collapsible-lists.js")
]
# The tree_view.css file uses these
tree_data_ext = [
os.path.join("collapsible-lists", "css", "button-closed.png"),
os.path.join("collapsible-lists", "css", "button-open.png"),
os.path.join("collapsible-lists", "css", "button.png"),
os.path.join("collapsible-lists", "css", "list-item-contents.png"),
os.path.join("collapsible-lists", "css", "list-item-last-open.png"),
os.path.join("collapsible-lists", "css", "list-item-last.png"),
os.path.join("collapsible-lists", "css", "list-item-open.png"),
os.path.join("collapsible-lists", "css", "list-item.png"),
os.path.join("collapsible-lists", "css", "list-item-root.png"),
]
# Make sure we have everything we need
collapse_data = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data", tree_data_static_base)
if not os.path.isdir(collapse_data):
raise ExtensionError(
"Exhale: the path to [{0}] was not found, possible installation error.".format(collapse_data)
)
else:
all_files = tree_data_css + tree_data_js + tree_data_ext
missing = []
for file in all_files:
path = os.path.join(collapse_data, file)
if not os.path.isfile(path):
missing.append(path)
if missing:
raise ExtensionError(
"Exhale: the path(s) {0} were not found, possible installation error.".format(missing)
)
# We have all the files we need, the extra files will be copied automatically by
# sphinx to the correct _static/ location, but stylesheets and javascript need
# to be added explicitly
logger.info(utils.info("Exhale: adding tree view css / javascript."))
app.config.html_static_path.append(collapse_data)
# In Sphinx 1.8+ these have been renamed.
# - app.add_stylesheet -> app.add_css_file
# - app.add_javascript -> app.add_js_file
#
# RemovedInSphinx40Warning:
# - The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
# - The app.add_javascript() is deprecated. Please use app.add_js_file() instead.
#
# So we'll need to keep this funky `getattr` chain for a little while ;)
# Or else pin min sphinx version to 1.8 or higher. Probably when 2.0 is out?
add_css_file = getattr(app, "add_css_file", getattr(app, "add_stylesheet", None))
add_js_file = getattr(app, "add_js_file", getattr(app, "add_javascript", None))
# Add the stylesheets
for css in tree_data_css:
add_css_file(css)
# Add the javascript
for js in tree_data_js:
add_js_file(js)
logger.info(utils.progress("Exhale: added tree view css / javascript.")) | python | def apply_sphinx_configurations(app):
'''
This method applies the various configurations users place in their ``conf.py``, in
the dictionary ``exhale_args``. The error checking seems to be robust, and
borderline obsessive, but there may very well be some glaring flaws.
When the user requests for the ``treeView`` to be created, this method is also
responsible for adding the various CSS / JavaScript to the Sphinx Application
to support the hierarchical views.
.. danger::
This method is **not** supposed to be called directly. See
``exhale/__init__.py`` for how this function is called indirectly via the Sphinx
API.
**Parameters**
``app`` (:class:`sphinx.application.Sphinx`)
The Sphinx Application running the documentation build.
'''
# Import local to function to prevent circular imports elsewhere in the framework.
from . import deploy
from . import utils
####################################################################################
# Make sure they have the `breathe` configs setup in a way that we can use them. #
####################################################################################
# Breathe allows users to have multiple projects to configure in one `conf.py`
# A dictionary of keys := project names, values := path to Doxygen xml output dir
breathe_projects = app.config.breathe_projects
if not breathe_projects:
raise ConfigError("You must set the `breathe_projects` in `conf.py`.")
elif type(breathe_projects) is not dict:
raise ConfigError("The type of `breathe_projects` in `conf.py` must be a dictionary.")
# The breathe_default_project is required by `exhale` to determine where to look for
# the doxygen xml.
#
# TODO: figure out how to allow multiple breathe projects?
breathe_default_project = app.config.breathe_default_project
if not breathe_default_project:
raise ConfigError("You must set the `breathe_default_project` in `conf.py`.")
elif not isinstance(breathe_default_project, six.string_types):
raise ConfigError("The type of `breathe_default_project` must be a string.")
if breathe_default_project not in breathe_projects:
raise ConfigError(
"The given breathe_default_project='{0}' was not a valid key in `breathe_projects`:\n{1}".format(
breathe_default_project, breathe_projects
)
)
# Grab where the Doxygen xml output is supposed to go, make sure it is a string,
# defer validation of existence until after potentially running Doxygen based on
# the configs given to exhale
doxy_xml_dir = breathe_projects[breathe_default_project]
if not isinstance(doxy_xml_dir, six.string_types):
raise ConfigError(
"The type of `breathe_projects[breathe_default_project]` from `conf.py` was not a string."
)
# Make doxy_xml_dir relative to confdir (where conf.py is)
if not os.path.isabs(doxy_xml_dir):
doxy_xml_dir = os.path.abspath(os.path.join(app.confdir, doxy_xml_dir))
####################################################################################
# Initial sanity-check that we have the arguments needed. #
####################################################################################
exhale_args = app.config.exhale_args
if not exhale_args:
raise ConfigError("You must set the `exhale_args` dictionary in `conf.py`.")
elif type(exhale_args) is not dict:
raise ConfigError("The type of `exhale_args` in `conf.py` must be a dictionary.")
####################################################################################
# In order to be able to loop through things below, we want to grab the globals #
# dictionary (rather than needing to do `global containmentFolder` etc for every #
# setting that is being changed). #
####################################################################################
configs_globals = globals()
# Used for internal verification of available keys
keys_available = []
# At the end of input processing, fail out if unrecognized keys were found.
keys_processed = []
####################################################################################
# Gather the mandatory input for exhale. #
####################################################################################
key_error = "Did not find required key `{key}` in `exhale_args`."
val_error = "The type of the value for key `{key}` must be `{exp}`, but was `{got}`."
req_kv = [
("containmentFolder", six.string_types, True),
("rootFileName", six.string_types, False),
("rootFileTitle", six.string_types, False),
("doxygenStripFromPath", six.string_types, True)
]
for key, expected_type, make_absolute in req_kv:
# Used in error checking later
keys_available.append(key)
# Make sure we have the key
if key not in exhale_args:
raise ConfigError(key_error.format(key=key))
# Make sure the value is at the very least the correct type
val = exhale_args[key]
if not isinstance(val, expected_type):
val_t = type(val)
raise ConfigError(val_error.format(key=key, exp=expected_type, got=val_t))
# Make sure that a value was provided (e.g. no empty strings)
if not val:
raise ConfigError("Non-empty value for key [{0}] required.".format(key))
# If the string represents a path, make it absolute
if make_absolute:
# Directories are made absolute relative to app.confdir (where conf.py is)
if not os.path.isabs(val):
val = os.path.abspath(os.path.join(os.path.abspath(app.confdir), val))
# Set the config for use later
try:
configs_globals[key] = val
keys_processed.append(key)
except Exception as e:
raise ExtensionError(
"Critical error: unable to set `global {0}` to `{1}` in exhale.configs:\n{2}".format(
key, val, e
)
)
####################################################################################
# Validate what can be checked from the required arguments at this time. #
####################################################################################
global _the_app
_the_app = app
# Make sure they know this is a bad idea. The order of these checks is important.
# This assumes the path given was not the empty string (3 will break if it is).
#
# 1. If containmentFolder and app.srcdir are the same, problem.
# 2. If app.srcdir is not at the beginning of containmentFolder, problem.
# 3. If the first two checks have not raised a problem, the final check is to make
# sure that a subdirectory was actually used, as opposed to something that just
# starts with the same path.
#
# Note for the third check lazy evaluation is the only thing that makes checking
# _parts[1] acceptable ;)
_one = containmentFolder == app.srcdir
_two = not containmentFolder.startswith(app.srcdir)
_parts = containmentFolder.split(app.srcdir)
_three = _parts[0] != "" or len(_parts[1].split(os.path.sep)) > 2 or \
os.path.join(app.srcdir, _parts[1].replace(os.path.sep, "", 1)) != containmentFolder # noqa
# If they are equal, containmentFolder points somewhere entirely differently, or the
# relative path (made absolute again) does not have the srcdir
if _one or _two or _three:
raise ConfigError(
"The given `containmentFolder` [{0}] must be a *SUBDIRECTORY* of [{1}].".format(
containmentFolder, app.srcdir
)
)
global _app_src_dir
_app_src_dir = os.path.abspath(app.srcdir)
# We *ONLY* generate reStructuredText, make sure Sphinx is expecting this as well as
# the to-be-generated library root file is correctly suffixed.
if not rootFileName.endswith(".rst"):
raise ConfigError(
"The given `rootFileName` ({0}) did not end with '.rst'; Exhale is reStructuredText only.".format(
rootFileName
)
)
if ".rst" not in app.config.source_suffix:
raise ConfigError(
"Exhale is reStructuredText only, but '.rst' was not found in `source_suffix` list of `conf.py`."
)
# Make sure the doxygen strip path is an exclude-able path
if not os.path.exists(doxygenStripFromPath):
raise ConfigError(
"The path given as `doxygenStripFromPath` ({0}) does not exist!".format(doxygenStripFromPath)
)
####################################################################################
# Gather the optional input for exhale. #
####################################################################################
# TODO: `list` -> `(list, tuple)`, update docs too.
opt_kv = [
# Build Process Logging, Colors, and Debugging
("verboseBuild", bool),
("alwaysColorize", bool),
("generateBreatheFileDirectives", bool),
# Root API Document Customization and Treeview
("afterTitleDescription", six.string_types),
("afterHierarchyDescription", six.string_types),
("fullApiSubSectionTitle", six.string_types),
("afterBodySummary", six.string_types),
("fullToctreeMaxDepth", int),
("listingExclude", list),
# Clickable Hierarchies <3
("createTreeView", bool),
("minifyTreeView", bool),
("treeViewIsBootstrap", bool),
("treeViewBootstrapTextSpanClass", six.string_types),
("treeViewBootstrapIconMimicColor", six.string_types),
("treeViewBootstrapOnhoverColor", six.string_types),
("treeViewBootstrapUseBadgeTags", bool),
("treeViewBootstrapExpandIcon", six.string_types),
("treeViewBootstrapCollapseIcon", six.string_types),
("treeViewBootstrapLevels", int),
# Page Level Customization
("includeTemplateParamOrderList", bool),
("pageLevelConfigMeta", six.string_types),
("repoRedirectURL", six.string_types),
("contentsDirectives", bool),
("contentsTitle", six.string_types),
("contentsSpecifiers", list),
("kindsWithContentsDirectives", list),
# Breathe Customization
("customSpecificationsMapping", dict),
# Doxygen Execution and Customization
("exhaleExecutesDoxygen", bool),
("exhaleUseDoxyfile", bool),
("exhaleDoxygenStdin", six.string_types),
("exhaleSilentDoxygen", bool),
# Programlisting Customization
("lexerMapping", dict)
]
for key, expected_type in opt_kv:
# Used in error checking later
keys_available.append(key)
# Override the default settings if the key was provided
if key in exhale_args:
# Make sure the value is at the very least the correct type
val = exhale_args[key]
if not isinstance(val, expected_type):
val_t = type(val)
raise ConfigError(val_error.format(key=key, exp=expected_type, got=val_t))
# Set the config for use later
try:
configs_globals[key] = val
keys_processed.append(key)
except Exception as e:
raise ExtensionError(
"Critical error: unable to set `global {0}` to `{1}` in exhale.configs:\n{2}".format(
key, val, e
)
)
# These two need to be lists of strings, check to make sure
def _list_of_strings(lst, title):
for spec in lst:
if not isinstance(spec, six.string_types):
raise ConfigError(
"`{title}` must be a list of strings. `{spec}` was of type `{spec_t}`".format(
title=title,
spec=spec,
spec_t=type(spec)
)
)
_list_of_strings( contentsSpecifiers, "contentsSpecifiers")
_list_of_strings(kindsWithContentsDirectives, "kindsWithContentsDirectives")
# Make sure the kinds they specified are valid
for kind in kindsWithContentsDirectives:
if kind not in utils.AVAILABLE_KINDS:
raise ConfigError(
"Unknown `{kind}` given in `kindsWithContentsDirectives`. See utils.AVAILABLE_KINDS.".format(
kind=kind
)
)
# Make sure the listingExlcude is usable
if "listingExclude" in exhale_args:
import re
# TODO: remove this once config objects are in. Reset needed for testing suite.
configs_globals["_compiled_listing_exclude"] = []
# used for error printing, tries to create string out of item otherwise
# returns 'at index {idx}'
def item_or_index(item, idx):
try:
return "`{item}`".format(item=item)
except:
return "at index {idx}".format(idx=idx)
exclusions = exhale_args["listingExclude"]
for idx in range(len(exclusions)):
# Gather the `pattern` and `flags` parameters for `re.compile`
item = exclusions[idx]
if isinstance(item, six.string_types):
pattern = item
flags = 0
else:
try:
pattern, flags = item
except Exception as e:
raise ConfigError(
"listingExclude item {0} cannot be unpacked as `pattern, flags = item`:\n{1}".format(
item_or_index(item, idx), e
)
)
# Compile the regular expression object.
try:
regex = re.compile(pattern, flags)
except Exception as e:
raise ConfigError(
"Unable to compile specified listingExclude {0}:\n{1}".format(
item_or_index(item, idx), e
)
)
configs_globals["_compiled_listing_exclude"].append(regex)
# Make sure the lexerMapping is usable
if "lexerMapping" in exhale_args:
from pygments import lexers
import re
# TODO: remove this once config objects are in. Reset needed for testing suite.
configs_globals["_compiled_lexer_mapping"] = {}
lexer_mapping = exhale_args["lexerMapping"]
for key in lexer_mapping:
val = lexer_mapping[key]
# Make sure both are strings
if not isinstance(key, six.string_types) or not isinstance(val, six.string_types):
raise ConfigError("All keys and values in `lexerMapping` must be strings.")
# Make sure the key is a valid regular expression
try:
regex = re.compile(key)
except Exception as e:
raise ConfigError(
"The `lexerMapping` key [{0}] is not a valid regular expression: {1}".format(key, e)
)
# Make sure the provided lexer is available
try:
lex = lexers.find_lexer_class_by_name(val)
except Exception as e:
raise ConfigError(
"The `lexerMapping` value of [{0}] for key [{1}] is not a valid Pygments lexer.".format(
val, key
)
)
# Everything works, stash for later processing
configs_globals["_compiled_lexer_mapping"][regex] = val
####################################################################################
# Internal consistency check to make sure available keys are accurate. #
####################################################################################
# See naming conventions described at top of file for why this is ok!
keys_expected = []
for key in configs_globals.keys():
val = configs_globals[key]
# Ignore modules and functions
if not isinstance(val, FunctionType) and not isinstance(val, ModuleType):
if key != "logger": # band-aid for logging api with Sphinx prior to config objects
# Ignore specials like __name__ and internal variables like _the_app
if "_" not in key and len(key) > 0: # don't think there can be zero length ones...
first = key[0]
if first.isalpha() and first.islower():
keys_expected.append(key)
keys_expected = set(keys_expected)
keys_available = set(keys_available)
if keys_expected != keys_available:
err = StringIO()
err.write(textwrap.dedent('''
CRITICAL: Exhale encountered an internal error, please raise an Issue on GitHub:
https://github.com/svenevs/exhale/issues
Please paste the following in the issue report:
Expected keys:
'''))
for key in keys_expected:
err.write("- {0}\n".format(key))
err.write(textwrap.dedent('''
Available keys:
'''))
for key in keys_available:
err.write("- {0}\n".format(key))
err.write(textwrap.dedent('''
The Mismatch(es):
'''))
for key in (keys_available ^ keys_expected):
err.write("- {0}\n".format(key))
err_msg = err.getvalue()
err.close()
raise ExtensionError(err_msg)
####################################################################################
# See if unexpected keys were presented. #
####################################################################################
all_keys = set(exhale_args.keys())
keys_processed = set(keys_processed)
if all_keys != keys_processed:
# Much love: https://stackoverflow.com/a/17388505/3814202
from difflib import SequenceMatcher
def similar(a, b):
return SequenceMatcher(None, a, b).ratio() * 100.0
# If there are keys left over after taking the differences of keys_processed
# (which is all keys Exhale expects to see), inform the user of keys they might
# have been trying to provide.
#
# Convert everything to lower case for better matching success
potential_keys = keys_available - keys_processed
potential_keys_lower = {key.lower(): key for key in potential_keys}
extras = all_keys - keys_processed
extra_error = StringIO()
extra_error.write("Exhale found unexpected keys in `exhale_args`:\n")
for key in extras:
extra_error.write(" - Extra key: {0}\n".format(key))
potentials = []
for mate in potential_keys_lower:
similarity = similar(key, mate)
if similarity > 50.0:
# Output results with the non-lower version they should put in exhale_args
potentials.append((similarity, potential_keys_lower[mate]))
if potentials:
potentials = reversed(sorted(potentials))
for rank, mate in potentials:
extra_error.write(" - {0:2.2f}% match with: {1}\n".format(rank, mate))
extra_error_str = extra_error.getvalue()
extra_error.close()
raise ConfigError(extra_error_str)
####################################################################################
# Verify some potentially inconsistent or ignored settings. #
####################################################################################
# treeViewIsBootstrap only takes meaning when createTreeView is True
if not createTreeView and treeViewIsBootstrap:
logger.warning("Exhale: `treeViewIsBootstrap=True` ignored since `createTreeView=False`")
# fullToctreeMaxDepth > 5 may produce other sphinx issues unrelated to exhale
if fullToctreeMaxDepth > 5:
logger.ingwarn(
"Exhale: `fullToctreeMaxDepth={0}` is greater than 5 and may build errors for non-html.".format(
fullToctreeMaxDepth
)
)
# Make sure that we received a valid mapping created by utils.makeCustomSpecificationsMapping
sanity = _closure_map_sanity_check
insane = "`customSpecificationsMapping` *MUST* be made using exhale.utils.makeCustomSpecificationsMapping"
if customSpecificationsMapping:
# Sanity check to make sure exhale made this mapping
if sanity not in customSpecificationsMapping:
raise ConfigError(insane)
elif customSpecificationsMapping[sanity] != sanity: # LOL
raise ConfigError(insane)
# Sanity check #2: enforce no new additions were made
expected_keys = set([sanity]) | set(utils.AVAILABLE_KINDS)
provided_keys = set(customSpecificationsMapping.keys())
diff = provided_keys - expected_keys
if diff:
raise ConfigError("Found extra keys in `customSpecificationsMapping`: {0}".format(diff))
# Sanity check #3: make sure the return values are all strings
for key in customSpecificationsMapping:
val_t = type(customSpecificationsMapping[key])
if not isinstance(key, six.string_types):
raise ConfigError(
"`customSpecificationsMapping` key `{key}` gave value type `{val_t}` (need `str`).".format(
key=key, val_t=val_t
)
)
# Specify where the doxygen output should be going
global _doxygen_xml_output_directory
_doxygen_xml_output_directory = doxy_xml_dir
# If requested, the time is nigh for executing doxygen. The strategy:
# 1. Execute doxygen if requested
# 2. Verify that the expected doxy_xml_dir (specified to `breathe`) was created
# 3. Assuming everything went to plan, let exhale take over and create all of the .rst docs
if exhaleExecutesDoxygen:
# Cannot use both, only one or the other
if exhaleUseDoxyfile and (exhaleDoxygenStdin is not None):
raise ConfigError("You must choose one of `exhaleUseDoxyfile` or `exhaleDoxygenStdin`, not both.")
# The Doxyfile *must* be at the same level as conf.py
# This is done so that when separate source / build directories are being used,
# we can guarantee where the Doxyfile is.
if exhaleUseDoxyfile:
doxyfile_path = os.path.abspath(os.path.join(app.confdir, "Doxyfile"))
if not os.path.exists(doxyfile_path):
raise ConfigError("The file [{0}] does not exist".format(doxyfile_path))
here = os.path.abspath(os.curdir)
if here == app.confdir:
returnPath = None
else:
returnPath = here
# All necessary information ready, go to where the Doxyfile is, run Doxygen
# and then return back (where applicable) so sphinx can continue
start = utils.get_time()
if returnPath:
logger.info(utils.info(
"Exhale: changing directories to [{0}] to execute Doxygen.".format(app.confdir)
))
os.chdir(app.confdir)
logger.info(utils.info("Exhale: executing doxygen."))
status = deploy.generateDoxygenXML()
# Being overly-careful to put sphinx back where it was before potentially erroring out
if returnPath:
logger.info(utils.info(
"Exhale: changing directories back to [{0}] after Doxygen.".format(returnPath)
))
os.chdir(returnPath)
if status:
raise ExtensionError(status)
else:
end = utils.get_time()
logger.info(utils.progress(
"Exhale: doxygen ran successfully in {0}.".format(utils.time_string(start, end))
))
else:
if exhaleUseDoxyfile:
logger.warning("Exhale: `exhaleUseDoxyfile` ignored since `exhaleExecutesDoxygen=False`")
if exhaleDoxygenStdin is not None:
logger.warning("Exhale: `exhaleDoxygenStdin` ignored since `exhaleExecutesDoxygen=False`")
if exhaleSilentDoxygen:
logger.warning("Exhale: `exhaleSilentDoxygen=True` ignored since `exhaleExecutesDoxygen=False`")
# Either Doxygen was run prior to this being called, or we just finished running it.
# Make sure that the files we need are actually there.
if not os.path.isdir(doxy_xml_dir):
raise ConfigError(
"Exhale: the specified folder [{0}] does not exist. Has Doxygen been run?".format(doxy_xml_dir)
)
index = os.path.join(doxy_xml_dir, "index.xml")
if not os.path.isfile(index):
raise ConfigError("Exhale: the file [{0}] does not exist. Has Doxygen been run?".format(index))
# Legacy / debugging feature, warn of its purpose
if generateBreatheFileDirectives:
logger.warning("Exhale: `generateBreatheFileDirectives` is a debugging feature not intended for production.")
####################################################################################
# If using a fancy treeView, add the necessary frontend files. #
####################################################################################
if createTreeView:
if treeViewIsBootstrap:
tree_data_static_base = "treeView-bootstrap"
tree_data_css = [os.path.join("bootstrap-treeview", "bootstrap-treeview.min.css")]
tree_data_js = [
os.path.join("bootstrap-treeview", "bootstrap-treeview.min.js"),
# os.path.join("bootstrap-treeview", "apply-bootstrap-treview.js")
]
tree_data_ext = []
else:
tree_data_static_base = "treeView"
tree_data_css = [os.path.join("collapsible-lists", "css", "tree_view.css")]
tree_data_js = [
os.path.join("collapsible-lists", "js", "CollapsibleLists.compressed.js"),
os.path.join("collapsible-lists", "js", "apply-collapsible-lists.js")
]
# The tree_view.css file uses these
tree_data_ext = [
os.path.join("collapsible-lists", "css", "button-closed.png"),
os.path.join("collapsible-lists", "css", "button-open.png"),
os.path.join("collapsible-lists", "css", "button.png"),
os.path.join("collapsible-lists", "css", "list-item-contents.png"),
os.path.join("collapsible-lists", "css", "list-item-last-open.png"),
os.path.join("collapsible-lists", "css", "list-item-last.png"),
os.path.join("collapsible-lists", "css", "list-item-open.png"),
os.path.join("collapsible-lists", "css", "list-item.png"),
os.path.join("collapsible-lists", "css", "list-item-root.png"),
]
# Make sure we have everything we need
collapse_data = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data", tree_data_static_base)
if not os.path.isdir(collapse_data):
raise ExtensionError(
"Exhale: the path to [{0}] was not found, possible installation error.".format(collapse_data)
)
else:
all_files = tree_data_css + tree_data_js + tree_data_ext
missing = []
for file in all_files:
path = os.path.join(collapse_data, file)
if not os.path.isfile(path):
missing.append(path)
if missing:
raise ExtensionError(
"Exhale: the path(s) {0} were not found, possible installation error.".format(missing)
)
# We have all the files we need, the extra files will be copied automatically by
# sphinx to the correct _static/ location, but stylesheets and javascript need
# to be added explicitly
logger.info(utils.info("Exhale: adding tree view css / javascript."))
app.config.html_static_path.append(collapse_data)
# In Sphinx 1.8+ these have been renamed.
# - app.add_stylesheet -> app.add_css_file
# - app.add_javascript -> app.add_js_file
#
# RemovedInSphinx40Warning:
# - The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
# - The app.add_javascript() is deprecated. Please use app.add_js_file() instead.
#
# So we'll need to keep this funky `getattr` chain for a little while ;)
# Or else pin min sphinx version to 1.8 or higher. Probably when 2.0 is out?
add_css_file = getattr(app, "add_css_file", getattr(app, "add_stylesheet", None))
add_js_file = getattr(app, "add_js_file", getattr(app, "add_javascript", None))
# Add the stylesheets
for css in tree_data_css:
add_css_file(css)
# Add the javascript
for js in tree_data_js:
add_js_file(js)
logger.info(utils.progress("Exhale: added tree view css / javascript.")) | ['def', 'apply_sphinx_configurations', '(', 'app', ')', ':', '# Import local to function to prevent circular imports elsewhere in the framework.', 'from', '.', 'import', 'deploy', 'from', '.', 'import', 'utils', '####################################################################################', '# Make sure they have the `breathe` configs setup in a way that we can use them. #', '####################################################################################', '# Breathe allows users to have multiple projects to configure in one `conf.py`', '# A dictionary of keys := project names, values := path to Doxygen xml output dir', 'breathe_projects', '=', 'app', '.', 'config', '.', 'breathe_projects', 'if', 'not', 'breathe_projects', ':', 'raise', 'ConfigError', '(', '"You must set the `breathe_projects` in `conf.py`."', ')', 'elif', 'type', '(', 'breathe_projects', ')', 'is', 'not', 'dict', ':', 'raise', 'ConfigError', '(', '"The type of `breathe_projects` in `conf.py` must be a dictionary."', ')', '# The breathe_default_project is required by `exhale` to determine where to look for', '# the doxygen xml.', '#', '# TODO: figure out how to allow multiple breathe projects?', 'breathe_default_project', '=', 'app', '.', 'config', '.', 'breathe_default_project', 'if', 'not', 'breathe_default_project', ':', 'raise', 'ConfigError', '(', '"You must set the `breathe_default_project` in `conf.py`."', ')', 'elif', 'not', 'isinstance', '(', 'breathe_default_project', ',', 'six', '.', 'string_types', ')', ':', 'raise', 'ConfigError', '(', '"The type of `breathe_default_project` must be a string."', ')', 'if', 'breathe_default_project', 'not', 'in', 'breathe_projects', ':', 'raise', 'ConfigError', '(', '"The given breathe_default_project=\'{0}\' was not a valid key in `breathe_projects`:\\n{1}"', '.', 'format', '(', 'breathe_default_project', ',', 'breathe_projects', ')', ')', '# Grab where the Doxygen xml output is supposed to go, make sure it is a string,', '# defer validation of existence until after potentially running Doxygen based on', '# the configs given to exhale', 'doxy_xml_dir', '=', 'breathe_projects', '[', 'breathe_default_project', ']', 'if', 'not', 'isinstance', '(', 'doxy_xml_dir', ',', 'six', '.', 'string_types', ')', ':', 'raise', 'ConfigError', '(', '"The type of `breathe_projects[breathe_default_project]` from `conf.py` was not a string."', ')', '# Make doxy_xml_dir relative to confdir (where conf.py is)', 'if', 'not', 'os', '.', 'path', '.', 'isabs', '(', 'doxy_xml_dir', ')', ':', 'doxy_xml_dir', '=', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'app', '.', 'confdir', ',', 'doxy_xml_dir', ')', ')', '####################################################################################', '# Initial sanity-check that we have the arguments needed. #', '####################################################################################', 'exhale_args', '=', 'app', '.', 'config', '.', 'exhale_args', 'if', 'not', 'exhale_args', ':', 'raise', 'ConfigError', '(', '"You must set the `exhale_args` dictionary in `conf.py`."', ')', 'elif', 'type', '(', 'exhale_args', ')', 'is', 'not', 'dict', ':', 'raise', 'ConfigError', '(', '"The type of `exhale_args` in `conf.py` must be a dictionary."', ')', '####################################################################################', '# In order to be able to loop through things below, we want to grab the globals #', '# dictionary (rather than needing to do `global containmentFolder` etc for every #', '# setting that is being changed). #', '####################################################################################', 'configs_globals', '=', 'globals', '(', ')', '# Used for internal verification of available keys', 'keys_available', '=', '[', ']', '# At the end of input processing, fail out if unrecognized keys were found.', 'keys_processed', '=', '[', ']', '####################################################################################', '# Gather the mandatory input for exhale. #', '####################################################################################', 'key_error', '=', '"Did not find required key `{key}` in `exhale_args`."', 'val_error', '=', '"The type of the value for key `{key}` must be `{exp}`, but was `{got}`."', 'req_kv', '=', '[', '(', '"containmentFolder"', ',', 'six', '.', 'string_types', ',', 'True', ')', ',', '(', '"rootFileName"', ',', 'six', '.', 'string_types', ',', 'False', ')', ',', '(', '"rootFileTitle"', ',', 'six', '.', 'string_types', ',', 'False', ')', ',', '(', '"doxygenStripFromPath"', ',', 'six', '.', 'string_types', ',', 'True', ')', ']', 'for', 'key', ',', 'expected_type', ',', 'make_absolute', 'in', 'req_kv', ':', '# Used in error checking later', 'keys_available', '.', 'append', '(', 'key', ')', '# Make sure we have the key', 'if', 'key', 'not', 'in', 'exhale_args', ':', 'raise', 'ConfigError', '(', 'key_error', '.', 'format', '(', 'key', '=', 'key', ')', ')', '# Make sure the value is at the very least the correct type', 'val', '=', 'exhale_args', '[', 'key', ']', 'if', 'not', 'isinstance', '(', 'val', ',', 'expected_type', ')', ':', 'val_t', '=', 'type', '(', 'val', ')', 'raise', 'ConfigError', '(', 'val_error', '.', 'format', '(', 'key', '=', 'key', ',', 'exp', '=', 'expected_type', ',', 'got', '=', 'val_t', ')', ')', '# Make sure that a value was provided (e.g. no empty strings)', 'if', 'not', 'val', ':', 'raise', 'ConfigError', '(', '"Non-empty value for key [{0}] required."', '.', 'format', '(', 'key', ')', ')', '# If the string represents a path, make it absolute', 'if', 'make_absolute', ':', '# Directories are made absolute relative to app.confdir (where conf.py is)', 'if', 'not', 'os', '.', 'path', '.', 'isabs', '(', 'val', ')', ':', 'val', '=', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'abspath', '(', 'app', '.', 'confdir', ')', ',', 'val', ')', ')', '# Set the config for use later', 'try', ':', 'configs_globals', '[', 'key', ']', '=', 'val', 'keys_processed', '.', 'append', '(', 'key', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'ExtensionError', '(', '"Critical error: unable to set `global {0}` to `{1}` in exhale.configs:\\n{2}"', '.', 'format', '(', 'key', ',', 'val', ',', 'e', ')', ')', '####################################################################################', '# Validate what can be checked from the required arguments at this time. #', '####################################################################################', 'global', '_the_app', '_the_app', '=', 'app', '# Make sure they know this is a bad idea. The order of these checks is important.', '# This assumes the path given was not the empty string (3 will break if it is).', '#', '# 1. If containmentFolder and app.srcdir are the same, problem.', '# 2. If app.srcdir is not at the beginning of containmentFolder, problem.', '# 3. If the first two checks have not raised a problem, the final check is to make', '# sure that a subdirectory was actually used, as opposed to something that just', '# starts with the same path.', '#', '# Note for the third check lazy evaluation is the only thing that makes checking', '# _parts[1] acceptable ;)', '_one', '=', 'containmentFolder', '==', 'app', '.', 'srcdir', '_two', '=', 'not', 'containmentFolder', '.', 'startswith', '(', 'app', '.', 'srcdir', ')', '_parts', '=', 'containmentFolder', '.', 'split', '(', 'app', '.', 'srcdir', ')', '_three', '=', '_parts', '[', '0', ']', '!=', '""', 'or', 'len', '(', '_parts', '[', '1', ']', '.', 'split', '(', 'os', '.', 'path', '.', 'sep', ')', ')', '>', '2', 'or', 'os', '.', 'path', '.', 'join', '(', 'app', '.', 'srcdir', ',', '_parts', '[', '1', ']', '.', 'replace', '(', 'os', '.', 'path', '.', 'sep', ',', '""', ',', '1', ')', ')', '!=', 'containmentFolder', '# noqa', '# If they are equal, containmentFolder points somewhere entirely differently, or the', '# relative path (made absolute again) does not have the srcdir', 'if', '_one', 'or', '_two', 'or', '_three', ':', 'raise', 'ConfigError', '(', '"The given `containmentFolder` [{0}] must be a *SUBDIRECTORY* of [{1}]."', '.', 'format', '(', 'containmentFolder', ',', 'app', '.', 'srcdir', ')', ')', 'global', '_app_src_dir', '_app_src_dir', '=', 'os', '.', 'path', '.', 'abspath', '(', 'app', '.', 'srcdir', ')', '# We *ONLY* generate reStructuredText, make sure Sphinx is expecting this as well as', '# the to-be-generated library root file is correctly suffixed.', 'if', 'not', 'rootFileName', '.', 'endswith', '(', '".rst"', ')', ':', 'raise', 'ConfigError', '(', '"The given `rootFileName` ({0}) did not end with \'.rst\'; Exhale is reStructuredText only."', '.', 'format', '(', 'rootFileName', ')', ')', 'if', '".rst"', 'not', 'in', 'app', '.', 'config', '.', 'source_suffix', ':', 'raise', 'ConfigError', '(', '"Exhale is reStructuredText only, but \'.rst\' was not found in `source_suffix` list of `conf.py`."', ')', '# Make sure the doxygen strip path is an exclude-able path', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'doxygenStripFromPath', ')', ':', 'raise', 'ConfigError', '(', '"The path given as `doxygenStripFromPath` ({0}) does not exist!"', '.', 'format', '(', 'doxygenStripFromPath', ')', ')', '####################################################################################', '# Gather the optional input for exhale. #', '####################################################################################', '# TODO: `list` -> `(list, tuple)`, update docs too.', 'opt_kv', '=', '[', '# Build Process Logging, Colors, and Debugging', '(', '"verboseBuild"', ',', 'bool', ')', ',', '(', '"alwaysColorize"', ',', 'bool', ')', ',', '(', '"generateBreatheFileDirectives"', ',', 'bool', ')', ',', '# Root API Document Customization and Treeview', '(', '"afterTitleDescription"', ',', 'six', '.', 'string_types', ')', ',', '(', '"afterHierarchyDescription"', ',', 'six', '.', 'string_types', ')', ',', '(', '"fullApiSubSectionTitle"', ',', 'six', '.', 'string_types', ')', ',', '(', '"afterBodySummary"', ',', 'six', '.', 'string_types', ')', ',', '(', '"fullToctreeMaxDepth"', ',', 'int', ')', ',', '(', '"listingExclude"', ',', 'list', ')', ',', '# Clickable Hierarchies <3', '(', '"createTreeView"', ',', 'bool', ')', ',', '(', '"minifyTreeView"', ',', 'bool', ')', ',', '(', '"treeViewIsBootstrap"', ',', 'bool', ')', ',', '(', '"treeViewBootstrapTextSpanClass"', ',', 'six', '.', 'string_types', ')', ',', '(', '"treeViewBootstrapIconMimicColor"', ',', 'six', '.', 'string_types', ')', ',', '(', '"treeViewBootstrapOnhoverColor"', ',', 'six', '.', 'string_types', ')', ',', '(', '"treeViewBootstrapUseBadgeTags"', ',', 'bool', ')', ',', '(', '"treeViewBootstrapExpandIcon"', ',', 'six', '.', 'string_types', ')', ',', '(', '"treeViewBootstrapCollapseIcon"', ',', 'six', '.', 'string_types', ')', ',', '(', '"treeViewBootstrapLevels"', ',', 'int', ')', ',', '# Page Level Customization', '(', '"includeTemplateParamOrderList"', ',', 'bool', ')', ',', '(', '"pageLevelConfigMeta"', ',', 'six', '.', 'string_types', ')', ',', '(', '"repoRedirectURL"', ',', 'six', '.', 'string_types', ')', ',', '(', '"contentsDirectives"', ',', 'bool', ')', ',', '(', '"contentsTitle"', ',', 'six', '.', 'string_types', ')', ',', '(', '"contentsSpecifiers"', ',', 'list', ')', ',', '(', '"kindsWithContentsDirectives"', ',', 'list', ')', ',', '# Breathe Customization', '(', '"customSpecificationsMapping"', ',', 'dict', ')', ',', '# Doxygen Execution and Customization', '(', '"exhaleExecutesDoxygen"', ',', 'bool', ')', ',', '(', '"exhaleUseDoxyfile"', ',', 'bool', ')', ',', '(', '"exhaleDoxygenStdin"', ',', 'six', '.', 'string_types', ')', ',', '(', '"exhaleSilentDoxygen"', ',', 'bool', ')', ',', '# Programlisting Customization', '(', '"lexerMapping"', ',', 'dict', ')', ']', 'for', 'key', ',', 'expected_type', 'in', 'opt_kv', ':', '# Used in error checking later', 'keys_available', '.', 'append', '(', 'key', ')', '# Override the default settings if the key was provided', 'if', 'key', 'in', 'exhale_args', ':', '# Make sure the value is at the very least the correct type', 'val', '=', 'exhale_args', '[', 'key', ']', 'if', 'not', 'isinstance', '(', 'val', ',', 'expected_type', ')', ':', 'val_t', '=', 'type', '(', 'val', ')', 'raise', 'ConfigError', '(', 'val_error', '.', 'format', '(', 'key', '=', 'key', ',', 'exp', '=', 'expected_type', ',', 'got', '=', 'val_t', ')', ')', '# Set the config for use later', 'try', ':', 'configs_globals', '[', 'key', ']', '=', 'val', 'keys_processed', '.', 'append', '(', 'key', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'ExtensionError', '(', '"Critical error: unable to set `global {0}` to `{1}` in exhale.configs:\\n{2}"', '.', 'format', '(', 'key', ',', 'val', ',', 'e', ')', ')', '# These two need to be lists of strings, check to make sure', 'def', '_list_of_strings', '(', 'lst', ',', 'title', ')', ':', 'for', 'spec', 'in', 'lst', ':', 'if', 'not', 'isinstance', '(', 'spec', ',', 'six', '.', 'string_types', ')', ':', 'raise', 'ConfigError', '(', '"`{title}` must be a list of strings. `{spec}` was of type `{spec_t}`"', '.', 'format', '(', 'title', '=', 'title', ',', 'spec', '=', 'spec', ',', 'spec_t', '=', 'type', '(', 'spec', ')', ')', ')', '_list_of_strings', '(', 'contentsSpecifiers', ',', '"contentsSpecifiers"', ')', '_list_of_strings', '(', 'kindsWithContentsDirectives', ',', '"kindsWithContentsDirectives"', ')', '# Make sure the kinds they specified are valid', 'for', 'kind', 'in', 'kindsWithContentsDirectives', ':', 'if', 'kind', 'not', 'in', 'utils', '.', 'AVAILABLE_KINDS', ':', 'raise', 'ConfigError', '(', '"Unknown `{kind}` given in `kindsWithContentsDirectives`. See utils.AVAILABLE_KINDS."', '.', 'format', '(', 'kind', '=', 'kind', ')', ')', '# Make sure the listingExlcude is usable', 'if', '"listingExclude"', 'in', 'exhale_args', ':', 'import', 're', '# TODO: remove this once config objects are in. Reset needed for testing suite.', 'configs_globals', '[', '"_compiled_listing_exclude"', ']', '=', '[', ']', '# used for error printing, tries to create string out of item otherwise', "# returns 'at index {idx}'", 'def', 'item_or_index', '(', 'item', ',', 'idx', ')', ':', 'try', ':', 'return', '"`{item}`"', '.', 'format', '(', 'item', '=', 'item', ')', 'except', ':', 'return', '"at index {idx}"', '.', 'format', '(', 'idx', '=', 'idx', ')', 'exclusions', '=', 'exhale_args', '[', '"listingExclude"', ']', 'for', 'idx', 'in', 'range', '(', 'len', '(', 'exclusions', ')', ')', ':', '# Gather the `pattern` and `flags` parameters for `re.compile`', 'item', '=', 'exclusions', '[', 'idx', ']', 'if', 'isinstance', '(', 'item', ',', 'six', '.', 'string_types', ')', ':', 'pattern', '=', 'item', 'flags', '=', '0', 'else', ':', 'try', ':', 'pattern', ',', 'flags', '=', 'item', 'except', 'Exception', 'as', 'e', ':', 'raise', 'ConfigError', '(', '"listingExclude item {0} cannot be unpacked as `pattern, flags = item`:\\n{1}"', '.', 'format', '(', 'item_or_index', '(', 'item', ',', 'idx', ')', ',', 'e', ')', ')', '# Compile the regular expression object.', 'try', ':', 'regex', '=', 're', '.', 'compile', '(', 'pattern', ',', 'flags', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'ConfigError', '(', '"Unable to compile specified listingExclude {0}:\\n{1}"', '.', 'format', '(', 'item_or_index', '(', 'item', ',', 'idx', ')', ',', 'e', ')', ')', 'configs_globals', '[', '"_compiled_listing_exclude"', ']', '.', 'append', '(', 'regex', ')', '# Make sure the lexerMapping is usable', 'if', '"lexerMapping"', 'in', 'exhale_args', ':', 'from', 'pygments', 'import', 'lexers', 'import', 're', '# TODO: remove this once config objects are in. Reset needed for testing suite.', 'configs_globals', '[', '"_compiled_lexer_mapping"', ']', '=', '{', '}', 'lexer_mapping', '=', 'exhale_args', '[', '"lexerMapping"', ']', 'for', 'key', 'in', 'lexer_mapping', ':', 'val', '=', 'lexer_mapping', '[', 'key', ']', '# Make sure both are strings', 'if', 'not', 'isinstance', '(', 'key', ',', 'six', '.', 'string_types', ')', 'or', 'not', 'isinstance', '(', 'val', ',', 'six', '.', 'string_types', ')', ':', 'raise', 'ConfigError', '(', '"All keys and values in `lexerMapping` must be strings."', ')', '# Make sure the key is a valid regular expression', 'try', ':', 'regex', '=', 're', '.', 'compile', '(', 'key', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'ConfigError', '(', '"The `lexerMapping` key [{0}] is not a valid regular expression: {1}"', '.', 'format', '(', 'key', ',', 'e', ')', ')', '# Make sure the provided lexer is available', 'try', ':', 'lex', '=', 'lexers', '.', 'find_lexer_class_by_name', '(', 'val', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'ConfigError', '(', '"The `lexerMapping` value of [{0}] for key [{1}] is not a valid Pygments lexer."', '.', 'format', '(', 'val', ',', 'key', ')', ')', '# Everything works, stash for later processing', 'configs_globals', '[', '"_compiled_lexer_mapping"', ']', '[', 'regex', ']', '=', 'val', '####################################################################################', '# Internal consistency check to make sure available keys are accurate. #', '####################################################################################', '# See naming conventions described at top of file for why this is ok!', 'keys_expected', '=', '[', ']', 'for', 'key', 'in', 'configs_globals', '.', 'keys', '(', ')', ':', 'val', '=', 'configs_globals', '[', 'key', ']', '# Ignore modules and functions', 'if', 'not', 'isinstance', '(', 'val', ',', 'FunctionType', ')', 'and', 'not', 'isinstance', '(', 'val', ',', 'ModuleType', ')', ':', 'if', 'key', '!=', '"logger"', ':', '# band-aid for logging api with Sphinx prior to config objects', '# Ignore specials like __name__ and internal variables like _the_app', 'if', '"_"', 'not', 'in', 'key', 'and', 'len', '(', 'key', ')', '>', '0', ':', "# don't think there can be zero length ones...", 'first', '=', 'key', '[', '0', ']', 'if', 'first', '.', 'isalpha', '(', ')', 'and', 'first', '.', 'islower', '(', ')', ':', 'keys_expected', '.', 'append', '(', 'key', ')', 'keys_expected', '=', 'set', '(', 'keys_expected', ')', 'keys_available', '=', 'set', '(', 'keys_available', ')', 'if', 'keys_expected', '!=', 'keys_available', ':', 'err', '=', 'StringIO', '(', ')', 'err', '.', 'write', '(', 'textwrap', '.', 'dedent', '(', "'''\n CRITICAL: Exhale encountered an internal error, please raise an Issue on GitHub:\n\n https://github.com/svenevs/exhale/issues\n\n Please paste the following in the issue report:\n\n Expected keys:\n\n '''", ')', ')', 'for', 'key', 'in', 'keys_expected', ':', 'err', '.', 'write', '(', '"- {0}\\n"', '.', 'format', '(', 'key', ')', ')', 'err', '.', 'write', '(', 'textwrap', '.', 'dedent', '(', "'''\n Available keys:\n\n '''", ')', ')', 'for', 'key', 'in', 'keys_available', ':', 'err', '.', 'write', '(', '"- {0}\\n"', '.', 'format', '(', 'key', ')', ')', 'err', '.', 'write', '(', 'textwrap', '.', 'dedent', '(', "'''\n The Mismatch(es):\n\n '''", ')', ')', 'for', 'key', 'in', '(', 'keys_available', '^', 'keys_expected', ')', ':', 'err', '.', 'write', '(', '"- {0}\\n"', '.', 'format', '(', 'key', ')', ')', 'err_msg', '=', 'err', '.', 'getvalue', '(', ')', 'err', '.', 'close', '(', ')', 'raise', 'ExtensionError', '(', 'err_msg', ')', '####################################################################################', '# See if unexpected keys were presented. #', '####################################################################################', 'all_keys', '=', 'set', '(', 'exhale_args', '.', 'keys', '(', ')', ')', 'keys_processed', '=', 'set', '(', 'keys_processed', ')', 'if', 'all_keys', '!=', 'keys_processed', ':', '# Much love: https://stackoverflow.com/a/17388505/3814202', 'from', 'difflib', 'import', 'SequenceMatcher', 'def', 'similar', '(', 'a', ',', 'b', ')', ':', 'return', 'SequenceMatcher', '(', 'None', ',', 'a', ',', 'b', ')', '.', 'ratio', '(', ')', '*', '100.0', '# If there are keys left over after taking the differences of keys_processed', '# (which is all keys Exhale expects to see), inform the user of keys they might', '# have been trying to provide.', '#', '# Convert everything to lower case for better matching success', 'potential_keys', '=', 'keys_available', '-', 'keys_processed', 'potential_keys_lower', '=', '{', 'key', '.', 'lower', '(', ')', ':', 'key', 'for', 'key', 'in', 'potential_keys', '}', 'extras', '=', 'all_keys', '-', 'keys_processed', 'extra_error', '=', 'StringIO', '(', ')', 'extra_error', '.', 'write', '(', '"Exhale found unexpected keys in `exhale_args`:\\n"', ')', 'for', 'key', 'in', 'extras', ':', 'extra_error', '.', 'write', '(', '" - Extra key: {0}\\n"', '.', 'format', '(', 'key', ')', ')', 'potentials', '=', '[', ']', 'for', 'mate', 'in', 'potential_keys_lower', ':', 'similarity', '=', 'similar', '(', 'key', ',', 'mate', ')', 'if', 'similarity', '>', '50.0', ':', '# Output results with the non-lower version they should put in exhale_args', 'potentials', '.', 'append', '(', '(', 'similarity', ',', 'potential_keys_lower', '[', 'mate', ']', ')', ')', 'if', 'potentials', ':', 'potentials', '=', 'reversed', '(', 'sorted', '(', 'potentials', ')', ')', 'for', 'rank', ',', 'mate', 'in', 'potentials', ':', 'extra_error', '.', 'write', '(', '" - {0:2.2f}% match with: {1}\\n"', '.', 'format', '(', 'rank', ',', 'mate', ')', ')', 'extra_error_str', '=', 'extra_error', '.', 'getvalue', '(', ')', 'extra_error', '.', 'close', '(', ')', 'raise', 'ConfigError', '(', 'extra_error_str', ')', '####################################################################################', '# Verify some potentially inconsistent or ignored settings. #', '####################################################################################', '# treeViewIsBootstrap only takes meaning when createTreeView is True', 'if', 'not', 'createTreeView', 'and', 'treeViewIsBootstrap', ':', 'logger', '.', 'warning', '(', '"Exhale: `treeViewIsBootstrap=True` ignored since `createTreeView=False`"', ')', '# fullToctreeMaxDepth > 5 may produce other sphinx issues unrelated to exhale', 'if', 'fullToctreeMaxDepth', '>', '5', ':', 'logger', '.', 'ingwarn', '(', '"Exhale: `fullToctreeMaxDepth={0}` is greater than 5 and may build errors for non-html."', '.', 'format', '(', 'fullToctreeMaxDepth', ')', ')', '# Make sure that we received a valid mapping created by utils.makeCustomSpecificationsMapping', 'sanity', '=', '_closure_map_sanity_check', 'insane', '=', '"`customSpecificationsMapping` *MUST* be made using exhale.utils.makeCustomSpecificationsMapping"', 'if', 'customSpecificationsMapping', ':', '# Sanity check to make sure exhale made this mapping', 'if', 'sanity', 'not', 'in', 'customSpecificationsMapping', ':', 'raise', 'ConfigError', '(', 'insane', ')', 'elif', 'customSpecificationsMapping', '[', 'sanity', ']', '!=', 'sanity', ':', '# LOL', 'raise', 'ConfigError', '(', 'insane', ')', '# Sanity check #2: enforce no new additions were made', 'expected_keys', '=', 'set', '(', '[', 'sanity', ']', ')', '|', 'set', '(', 'utils', '.', 'AVAILABLE_KINDS', ')', 'provided_keys', '=', 'set', '(', 'customSpecificationsMapping', '.', 'keys', '(', ')', ')', 'diff', '=', 'provided_keys', '-', 'expected_keys', 'if', 'diff', ':', 'raise', 'ConfigError', '(', '"Found extra keys in `customSpecificationsMapping`: {0}"', '.', 'format', '(', 'diff', ')', ')', '# Sanity check #3: make sure the return values are all strings', 'for', 'key', 'in', 'customSpecificationsMapping', ':', 'val_t', '=', 'type', '(', 'customSpecificationsMapping', '[', 'key', ']', ')', 'if', 'not', 'isinstance', '(', 'key', ',', 'six', '.', 'string_types', ')', ':', 'raise', 'ConfigError', '(', '"`customSpecificationsMapping` key `{key}` gave value type `{val_t}` (need `str`)."', '.', 'format', '(', 'key', '=', 'key', ',', 'val_t', '=', 'val_t', ')', ')', '# Specify where the doxygen output should be going', 'global', '_doxygen_xml_output_directory', '_doxygen_xml_output_directory', '=', 'doxy_xml_dir', '# If requested, the time is nigh for executing doxygen. The strategy:', '# 1. Execute doxygen if requested', '# 2. Verify that the expected doxy_xml_dir (specified to `breathe`) was created', '# 3. Assuming everything went to plan, let exhale take over and create all of the .rst docs', 'if', 'exhaleExecutesDoxygen', ':', '# Cannot use both, only one or the other', 'if', 'exhaleUseDoxyfile', 'and', '(', 'exhaleDoxygenStdin', 'is', 'not', 'None', ')', ':', 'raise', 'ConfigError', '(', '"You must choose one of `exhaleUseDoxyfile` or `exhaleDoxygenStdin`, not both."', ')', '# The Doxyfile *must* be at the same level as conf.py', '# This is done so that when separate source / build directories are being used,', '# we can guarantee where the Doxyfile is.', 'if', 'exhaleUseDoxyfile', ':', 'doxyfile_path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'app', '.', 'confdir', ',', '"Doxyfile"', ')', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'doxyfile_path', ')', ':', 'raise', 'ConfigError', '(', '"The file [{0}] does not exist"', '.', 'format', '(', 'doxyfile_path', ')', ')', 'here', '=', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'curdir', ')', 'if', 'here', '==', 'app', '.', 'confdir', ':', 'returnPath', '=', 'None', 'else', ':', 'returnPath', '=', 'here', '# All necessary information ready, go to where the Doxyfile is, run Doxygen', '# and then return back (where applicable) so sphinx can continue', 'start', '=', 'utils', '.', 'get_time', '(', ')', 'if', 'returnPath', ':', 'logger', '.', 'info', '(', 'utils', '.', 'info', '(', '"Exhale: changing directories to [{0}] to execute Doxygen."', '.', 'format', '(', 'app', '.', 'confdir', ')', ')', ')', 'os', '.', 'chdir', '(', 'app', '.', 'confdir', ')', 'logger', '.', 'info', '(', 'utils', '.', 'info', '(', '"Exhale: executing doxygen."', ')', ')', 'status', '=', 'deploy', '.', 'generateDoxygenXML', '(', ')', '# Being overly-careful to put sphinx back where it was before potentially erroring out', 'if', 'returnPath', ':', 'logger', '.', 'info', '(', 'utils', '.', 'info', '(', '"Exhale: changing directories back to [{0}] after Doxygen."', '.', 'format', '(', 'returnPath', ')', ')', ')', 'os', '.', 'chdir', '(', 'returnPath', ')', 'if', 'status', ':', 'raise', 'ExtensionError', '(', 'status', ')', 'else', ':', 'end', '=', 'utils', '.', 'get_time', '(', ')', 'logger', '.', 'info', '(', 'utils', '.', 'progress', '(', '"Exhale: doxygen ran successfully in {0}."', '.', 'format', '(', 'utils', '.', 'time_string', '(', 'start', ',', 'end', ')', ')', ')', ')', 'else', ':', 'if', 'exhaleUseDoxyfile', ':', 'logger', '.', 'warning', '(', '"Exhale: `exhaleUseDoxyfile` ignored since `exhaleExecutesDoxygen=False`"', ')', 'if', 'exhaleDoxygenStdin', 'is', 'not', 'None', ':', 'logger', '.', 'warning', '(', '"Exhale: `exhaleDoxygenStdin` ignored since `exhaleExecutesDoxygen=False`"', ')', 'if', 'exhaleSilentDoxygen', ':', 'logger', '.', 'warning', '(', '"Exhale: `exhaleSilentDoxygen=True` ignored since `exhaleExecutesDoxygen=False`"', ')', '# Either Doxygen was run prior to this being called, or we just finished running it.', '# Make sure that the files we need are actually there.', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'doxy_xml_dir', ')', ':', 'raise', 'ConfigError', '(', '"Exhale: the specified folder [{0}] does not exist. Has Doxygen been run?"', '.', 'format', '(', 'doxy_xml_dir', ')', ')', 'index', '=', 'os', '.', 'path', '.', 'join', '(', 'doxy_xml_dir', ',', '"index.xml"', ')', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'index', ')', ':', 'raise', 'ConfigError', '(', '"Exhale: the file [{0}] does not exist. Has Doxygen been run?"', '.', 'format', '(', 'index', ')', ')', '# Legacy / debugging feature, warn of its purpose', 'if', 'generateBreatheFileDirectives', ':', 'logger', '.', 'warning', '(', '"Exhale: `generateBreatheFileDirectives` is a debugging feature not intended for production."', ')', '####################################################################################', '# If using a fancy treeView, add the necessary frontend files. #', '####################################################################################', 'if', 'createTreeView', ':', 'if', 'treeViewIsBootstrap', ':', 'tree_data_static_base', '=', '"treeView-bootstrap"', 'tree_data_css', '=', '[', 'os', '.', 'path', '.', 'join', '(', '"bootstrap-treeview"', ',', '"bootstrap-treeview.min.css"', ')', ']', 'tree_data_js', '=', '[', 'os', '.', 'path', '.', 'join', '(', '"bootstrap-treeview"', ',', '"bootstrap-treeview.min.js"', ')', ',', '# os.path.join("bootstrap-treeview", "apply-bootstrap-treview.js")', ']', 'tree_data_ext', '=', '[', ']', 'else', ':', 'tree_data_static_base', '=', '"treeView"', 'tree_data_css', '=', '[', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"css"', ',', '"tree_view.css"', ')', ']', 'tree_data_js', '=', '[', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"js"', ',', '"CollapsibleLists.compressed.js"', ')', ',', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"js"', ',', '"apply-collapsible-lists.js"', ')', ']', '# The tree_view.css file uses these', 'tree_data_ext', '=', '[', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"css"', ',', '"button-closed.png"', ')', ',', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"css"', ',', '"button-open.png"', ')', ',', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"css"', ',', '"button.png"', ')', ',', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"css"', ',', '"list-item-contents.png"', ')', ',', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"css"', ',', '"list-item-last-open.png"', ')', ',', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"css"', ',', '"list-item-last.png"', ')', ',', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"css"', ',', '"list-item-open.png"', ')', ',', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"css"', ',', '"list-item.png"', ')', ',', 'os', '.', 'path', '.', 'join', '(', '"collapsible-lists"', ',', '"css"', ',', '"list-item-root.png"', ')', ',', ']', '# Make sure we have everything we need', 'collapse_data', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'dirname', '(', '__file__', ')', ')', ',', '"data"', ',', 'tree_data_static_base', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'collapse_data', ')', ':', 'raise', 'ExtensionError', '(', '"Exhale: the path to [{0}] was not found, possible installation error."', '.', 'format', '(', 'collapse_data', ')', ')', 'else', ':', 'all_files', '=', 'tree_data_css', '+', 'tree_data_js', '+', 'tree_data_ext', 'missing', '=', '[', ']', 'for', 'file', 'in', 'all_files', ':', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'collapse_data', ',', 'file', ')', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'path', ')', ':', 'missing', '.', 'append', '(', 'path', ')', 'if', 'missing', ':', 'raise', 'ExtensionError', '(', '"Exhale: the path(s) {0} were not found, possible installation error."', '.', 'format', '(', 'missing', ')', ')', '# We have all the files we need, the extra files will be copied automatically by', '# sphinx to the correct _static/ location, but stylesheets and javascript need', '# to be added explicitly', 'logger', '.', 'info', '(', 'utils', '.', 'info', '(', '"Exhale: adding tree view css / javascript."', ')', ')', 'app', '.', 'config', '.', 'html_static_path', '.', 'append', '(', 'collapse_data', ')', '# In Sphinx 1.8+ these have been renamed.', '# - app.add_stylesheet -> app.add_css_file', '# - app.add_javascript -> app.add_js_file', '#', '# RemovedInSphinx40Warning:', '# - The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.', '# - The app.add_javascript() is deprecated. Please use app.add_js_file() instead.', '#', "# So we'll need to keep this funky `getattr` chain for a little while ;)", '# Or else pin min sphinx version to 1.8 or higher. Probably when 2.0 is out?', 'add_css_file', '=', 'getattr', '(', 'app', ',', '"add_css_file"', ',', 'getattr', '(', 'app', ',', '"add_stylesheet"', ',', 'None', ')', ')', 'add_js_file', '=', 'getattr', '(', 'app', ',', '"add_js_file"', ',', 'getattr', '(', 'app', ',', '"add_javascript"', ',', 'None', ')', ')', '# Add the stylesheets', 'for', 'css', 'in', 'tree_data_css', ':', 'add_css_file', '(', 'css', ')', '# Add the javascript', 'for', 'js', 'in', 'tree_data_js', ':', 'add_js_file', '(', 'js', ')', 'logger', '.', 'info', '(', 'utils', '.', 'progress', '(', '"Exhale: added tree view css / javascript."', ')', ')'] | This method applies the various configurations users place in their ``conf.py``, in
the dictionary ``exhale_args``. The error checking seems to be robust, and
borderline obsessive, but there may very well be some glaring flaws.
When the user requests for the ``treeView`` to be created, this method is also
responsible for adding the various CSS / JavaScript to the Sphinx Application
to support the hierarchical views.
.. danger::
This method is **not** supposed to be called directly. See
``exhale/__init__.py`` for how this function is called indirectly via the Sphinx
API.
**Parameters**
``app`` (:class:`sphinx.application.Sphinx`)
The Sphinx Application running the documentation build. | ['This', 'method', 'applies', 'the', 'various', 'configurations', 'users', 'place', 'in', 'their', 'conf', '.', 'py', 'in', 'the', 'dictionary', 'exhale_args', '.', 'The', 'error', 'checking', 'seems', 'to', 'be', 'robust', 'and', 'borderline', 'obsessive', 'but', 'there', 'may', 'very', 'well', 'be', 'some', 'glaring', 'flaws', '.'] | train | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/configs.py#L1190-L1810 |
2,805 | bcbio/bcbio-nextgen | bcbio/variation/vcfanno.py | run | def run(vcf, conf_fns, lua_fns, data, basepath=None, decomposed=False):
"""Annotate a VCF file using vcfanno (https://github.com/brentp/vcfanno)
decomposed -- if set to true we'll convert allele based output into single values
to match alleles and make compatible with vcf2db
(https://github.com/quinlan-lab/vcf2db/issues/14)
"""
conf_fns.sort(key=lambda x: os.path.basename(x) if x else "")
lua_fns.sort(key=lambda x: os.path.basename(x) if x else "")
ext = "-annotated-%s" % utils.splitext_plus(os.path.basename(conf_fns[0]))[0]
if vcf.find(ext) > 0:
out_file = vcf
else:
out_file = "%s%s.vcf.gz" % (utils.splitext_plus(vcf)[0], ext)
if not utils.file_exists(out_file):
vcfanno = config_utils.get_program("vcfanno", data)
with file_transaction(out_file) as tx_out_file:
conffn = _combine_files(conf_fns, out_file, data, basepath is None)
luafn = _combine_files(lua_fns, out_file, data, False)
luaflag = "-lua {0}".format(luafn) if luafn and utils.file_exists(luafn) else ""
basepathflag = "-base-path {0}".format(basepath) if basepath else ""
cores = dd.get_num_cores(data)
post_ann = "sed -e 's/Number=A/Number=1/g' |" if decomposed else ""
cmd = ("{vcfanno} -p {cores} {luaflag} {basepathflag} {conffn} {vcf} "
"| {post_ann} bgzip -c > {tx_out_file}")
message = "Annotating {vcf} with vcfanno, using {conffn}".format(**locals())
do.run(cmd.format(**locals()), message)
return vcfutils.bgzip_and_index(out_file, data["config"]) | python | def run(vcf, conf_fns, lua_fns, data, basepath=None, decomposed=False):
"""Annotate a VCF file using vcfanno (https://github.com/brentp/vcfanno)
decomposed -- if set to true we'll convert allele based output into single values
to match alleles and make compatible with vcf2db
(https://github.com/quinlan-lab/vcf2db/issues/14)
"""
conf_fns.sort(key=lambda x: os.path.basename(x) if x else "")
lua_fns.sort(key=lambda x: os.path.basename(x) if x else "")
ext = "-annotated-%s" % utils.splitext_plus(os.path.basename(conf_fns[0]))[0]
if vcf.find(ext) > 0:
out_file = vcf
else:
out_file = "%s%s.vcf.gz" % (utils.splitext_plus(vcf)[0], ext)
if not utils.file_exists(out_file):
vcfanno = config_utils.get_program("vcfanno", data)
with file_transaction(out_file) as tx_out_file:
conffn = _combine_files(conf_fns, out_file, data, basepath is None)
luafn = _combine_files(lua_fns, out_file, data, False)
luaflag = "-lua {0}".format(luafn) if luafn and utils.file_exists(luafn) else ""
basepathflag = "-base-path {0}".format(basepath) if basepath else ""
cores = dd.get_num_cores(data)
post_ann = "sed -e 's/Number=A/Number=1/g' |" if decomposed else ""
cmd = ("{vcfanno} -p {cores} {luaflag} {basepathflag} {conffn} {vcf} "
"| {post_ann} bgzip -c > {tx_out_file}")
message = "Annotating {vcf} with vcfanno, using {conffn}".format(**locals())
do.run(cmd.format(**locals()), message)
return vcfutils.bgzip_and_index(out_file, data["config"]) | ['def', 'run', '(', 'vcf', ',', 'conf_fns', ',', 'lua_fns', ',', 'data', ',', 'basepath', '=', 'None', ',', 'decomposed', '=', 'False', ')', ':', 'conf_fns', '.', 'sort', '(', 'key', '=', 'lambda', 'x', ':', 'os', '.', 'path', '.', 'basename', '(', 'x', ')', 'if', 'x', 'else', '""', ')', 'lua_fns', '.', 'sort', '(', 'key', '=', 'lambda', 'x', ':', 'os', '.', 'path', '.', 'basename', '(', 'x', ')', 'if', 'x', 'else', '""', ')', 'ext', '=', '"-annotated-%s"', '%', 'utils', '.', 'splitext_plus', '(', 'os', '.', 'path', '.', 'basename', '(', 'conf_fns', '[', '0', ']', ')', ')', '[', '0', ']', 'if', 'vcf', '.', 'find', '(', 'ext', ')', '>', '0', ':', 'out_file', '=', 'vcf', 'else', ':', 'out_file', '=', '"%s%s.vcf.gz"', '%', '(', 'utils', '.', 'splitext_plus', '(', 'vcf', ')', '[', '0', ']', ',', 'ext', ')', 'if', 'not', 'utils', '.', 'file_exists', '(', 'out_file', ')', ':', 'vcfanno', '=', 'config_utils', '.', 'get_program', '(', '"vcfanno"', ',', 'data', ')', 'with', 'file_transaction', '(', 'out_file', ')', 'as', 'tx_out_file', ':', 'conffn', '=', '_combine_files', '(', 'conf_fns', ',', 'out_file', ',', 'data', ',', 'basepath', 'is', 'None', ')', 'luafn', '=', '_combine_files', '(', 'lua_fns', ',', 'out_file', ',', 'data', ',', 'False', ')', 'luaflag', '=', '"-lua {0}"', '.', 'format', '(', 'luafn', ')', 'if', 'luafn', 'and', 'utils', '.', 'file_exists', '(', 'luafn', ')', 'else', '""', 'basepathflag', '=', '"-base-path {0}"', '.', 'format', '(', 'basepath', ')', 'if', 'basepath', 'else', '""', 'cores', '=', 'dd', '.', 'get_num_cores', '(', 'data', ')', 'post_ann', '=', '"sed -e \'s/Number=A/Number=1/g\' |"', 'if', 'decomposed', 'else', '""', 'cmd', '=', '(', '"{vcfanno} -p {cores} {luaflag} {basepathflag} {conffn} {vcf} "', '"| {post_ann} bgzip -c > {tx_out_file}"', ')', 'message', '=', '"Annotating {vcf} with vcfanno, using {conffn}"', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', 'do', '.', 'run', '(', 'cmd', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', ',', 'message', ')', 'return', 'vcfutils', '.', 'bgzip_and_index', '(', 'out_file', ',', 'data', '[', '"config"', ']', ')'] | Annotate a VCF file using vcfanno (https://github.com/brentp/vcfanno)
decomposed -- if set to true we'll convert allele based output into single values
to match alleles and make compatible with vcf2db
(https://github.com/quinlan-lab/vcf2db/issues/14) | ['Annotate', 'a', 'VCF', 'file', 'using', 'vcfanno', '(', 'https', ':', '//', 'github', '.', 'com', '/', 'brentp', '/', 'vcfanno', ')'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfanno.py#L18-L45 |
2,806 | project-rig/rig | rig/geometry.py | spinn5_eth_coords | def spinn5_eth_coords(width, height, root_x=0, root_y=0):
"""Generate a list of board coordinates with Ethernet connectivity in a
SpiNNaker machine.
Specifically, generates the coordinates for the Ethernet connected chips of
SpiNN-5 boards arranged in a standard torus topology.
.. warning::
In general, applications should use
:py:class:`rig.machine_control.MachineController.get_system_info` and
:py:meth:`~rig.machine_control.machine_controller.SystemInfo.ethernet_connected_chips`
to gather the coordinates of Ethernet connected chips which are
actually functioning. For example::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(list(si.ethernet_connected_chips()))
[((0, 0), "1.2.3.4"), ((4, 8), "1.2.3.5"), ((8, 4), "1.2.3.6")]
Parameters
----------
width, height : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
"""
# In oddly-shaped machines where chip (0, 0) does not exist, we must offset
# the coordinates returned in accordance with the root chip's location.
root_x %= 12
root_x %= 12
# Internally, work with the width and height rounded up to the next
# multiple of 12
w = ((width + 11) // 12) * 12
h = ((height + 11) // 12) * 12
for x in range(0, w, 12):
for y in range(0, h, 12):
for dx, dy in ((0, 0), (4, 8), (8, 4)):
nx = (x + dx + root_x) % w
ny = (y + dy + root_y) % h
# Skip points which are outside the range available
if nx < width and ny < height:
yield (nx, ny) | python | def spinn5_eth_coords(width, height, root_x=0, root_y=0):
"""Generate a list of board coordinates with Ethernet connectivity in a
SpiNNaker machine.
Specifically, generates the coordinates for the Ethernet connected chips of
SpiNN-5 boards arranged in a standard torus topology.
.. warning::
In general, applications should use
:py:class:`rig.machine_control.MachineController.get_system_info` and
:py:meth:`~rig.machine_control.machine_controller.SystemInfo.ethernet_connected_chips`
to gather the coordinates of Ethernet connected chips which are
actually functioning. For example::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(list(si.ethernet_connected_chips()))
[((0, 0), "1.2.3.4"), ((4, 8), "1.2.3.5"), ((8, 4), "1.2.3.6")]
Parameters
----------
width, height : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
"""
# In oddly-shaped machines where chip (0, 0) does not exist, we must offset
# the coordinates returned in accordance with the root chip's location.
root_x %= 12
root_x %= 12
# Internally, work with the width and height rounded up to the next
# multiple of 12
w = ((width + 11) // 12) * 12
h = ((height + 11) // 12) * 12
for x in range(0, w, 12):
for y in range(0, h, 12):
for dx, dy in ((0, 0), (4, 8), (8, 4)):
nx = (x + dx + root_x) % w
ny = (y + dy + root_y) % h
# Skip points which are outside the range available
if nx < width and ny < height:
yield (nx, ny) | ['def', 'spinn5_eth_coords', '(', 'width', ',', 'height', ',', 'root_x', '=', '0', ',', 'root_y', '=', '0', ')', ':', '# In oddly-shaped machines where chip (0, 0) does not exist, we must offset', "# the coordinates returned in accordance with the root chip's location.", 'root_x', '%=', '12', 'root_x', '%=', '12', '# Internally, work with the width and height rounded up to the next', '# multiple of 12', 'w', '=', '(', '(', 'width', '+', '11', ')', '//', '12', ')', '*', '12', 'h', '=', '(', '(', 'height', '+', '11', ')', '//', '12', ')', '*', '12', 'for', 'x', 'in', 'range', '(', '0', ',', 'w', ',', '12', ')', ':', 'for', 'y', 'in', 'range', '(', '0', ',', 'h', ',', '12', ')', ':', 'for', 'dx', ',', 'dy', 'in', '(', '(', '0', ',', '0', ')', ',', '(', '4', ',', '8', ')', ',', '(', '8', ',', '4', ')', ')', ':', 'nx', '=', '(', 'x', '+', 'dx', '+', 'root_x', ')', '%', 'w', 'ny', '=', '(', 'y', '+', 'dy', '+', 'root_y', ')', '%', 'h', '# Skip points which are outside the range available', 'if', 'nx', '<', 'width', 'and', 'ny', '<', 'height', ':', 'yield', '(', 'nx', ',', 'ny', ')'] | Generate a list of board coordinates with Ethernet connectivity in a
SpiNNaker machine.
Specifically, generates the coordinates for the Ethernet connected chips of
SpiNN-5 boards arranged in a standard torus topology.
.. warning::
In general, applications should use
:py:class:`rig.machine_control.MachineController.get_system_info` and
:py:meth:`~rig.machine_control.machine_controller.SystemInfo.ethernet_connected_chips`
to gather the coordinates of Ethernet connected chips which are
actually functioning. For example::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(list(si.ethernet_connected_chips()))
[((0, 0), "1.2.3.4"), ((4, 8), "1.2.3.5"), ((8, 4), "1.2.3.6")]
Parameters
----------
width, height : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`. | ['Generate', 'a', 'list', 'of', 'board', 'coordinates', 'with', 'Ethernet', 'connectivity', 'in', 'a', 'SpiNNaker', 'machine', '.'] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L281-L328 |
2,807 | ev3dev/ev3dev-lang-python | ev3dev2/sensor/lego.py | UltrasonicSensor.distance_inches_continuous | def distance_inches_continuous(self):
"""
Measurement of the distance detected by the sensor,
in inches.
The sensor will continue to take measurements so
they are available for future reads.
Prefer using the equivalent :meth:`UltrasonicSensor.distance_inches` property.
"""
self._ensure_mode(self.MODE_US_DIST_IN)
return self.value(0) * self._scale('US_DIST_IN') | python | def distance_inches_continuous(self):
"""
Measurement of the distance detected by the sensor,
in inches.
The sensor will continue to take measurements so
they are available for future reads.
Prefer using the equivalent :meth:`UltrasonicSensor.distance_inches` property.
"""
self._ensure_mode(self.MODE_US_DIST_IN)
return self.value(0) * self._scale('US_DIST_IN') | ['def', 'distance_inches_continuous', '(', 'self', ')', ':', 'self', '.', '_ensure_mode', '(', 'self', '.', 'MODE_US_DIST_IN', ')', 'return', 'self', '.', 'value', '(', '0', ')', '*', 'self', '.', '_scale', '(', "'US_DIST_IN'", ')'] | Measurement of the distance detected by the sensor,
in inches.
The sensor will continue to take measurements so
they are available for future reads.
Prefer using the equivalent :meth:`UltrasonicSensor.distance_inches` property. | ['Measurement', 'of', 'the', 'distance', 'detected', 'by', 'the', 'sensor', 'in', 'inches', '.'] | train | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/sensor/lego.py#L494-L505 |
2,808 | tanghaibao/jcvi | jcvi/formats/sam.py | ace | def ace(args):
"""
%prog ace bamfile fastafile
convert bam format to ace format. This often allows the remapping to be
assessed as a denovo assembly format. bam file needs to be indexed. also
creates a .mates file to be used in amos/bambus, and .astat file to mark
whether the contig is unique or repetitive based on A-statistics in Celera
assembler.
"""
p = OptionParser(ace.__doc__)
p.add_option("--splitdir", dest="splitdir", default="outRoot",
help="split the ace per contig to dir [default: %default]")
p.add_option("--unpaired", dest="unpaired", default=False,
help="remove read pairs on the same contig [default: %default]")
p.add_option("--minreadno", dest="minreadno", default=3, type="int",
help="minimum read numbers per contig [default: %default]")
p.add_option("--minctgsize", dest="minctgsize", default=100, type="int",
help="minimum contig size per contig [default: %default]")
p.add_option("--astat", default=False, action="store_true",
help="create .astat to list repetitiveness [default: %default]")
p.add_option("--readids", default=False, action="store_true",
help="create file of mapped and unmapped ids [default: %default]")
from pysam import Samfile
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, fastafile = args
astat = opts.astat
readids = opts.readids
f = Fasta(fastafile)
prefix = bamfile.split(".")[0]
acefile = prefix + ".ace"
readsfile = prefix + ".reads"
astatfile = prefix + ".astat"
logging.debug("Load {0}".format(bamfile))
s = Samfile(bamfile, "rb")
ncontigs = s.nreferences
genomesize = sum(x for a, x in f.itersizes())
logging.debug("Total {0} contigs with size {1} base".format(ncontigs,
genomesize))
qual = "20" # default qual
totalreads = sum(s.count(x) for x in s.references)
logging.debug("Total {0} reads mapped".format(totalreads))
fw = open(acefile, "w")
if astat:
astatfw = open(astatfile, "w")
if readids:
readsfw = open(readsfile, "w")
print("AS {0} {1}".format(ncontigs, totalreads), file=fw)
print(file=fw)
for i, contig in enumerate(s.references):
cseq = f[contig]
nbases = len(cseq)
mapped_reads = [x for x in s.fetch(contig) if not x.is_unmapped]
nreads = len(mapped_reads)
nsegments = 0
print("CO {0} {1} {2} {3} U".format(contig, nbases, nreads,
nsegments), file=fw)
print(fill(str(cseq.seq)), file=fw)
print(file=fw)
if astat:
astat = Astat(nbases, nreads, genomesize, totalreads)
print("{0}\t{1:.1f}".format(contig, astat), file=astatfw)
text = fill([qual] * nbases, delimiter=" ", width=30)
print("BQ\n{0}".format(text), file=fw)
print(file=fw)
rnames = []
for a in mapped_reads:
readname = a.qname
rname = readname
if readids:
print(readname, file=readsfw)
rnames.append(rname)
strand = "C" if a.is_reverse else "U"
paddedstart = a.pos + 1 # 0-based to 1-based
af = "AF {0} {1} {2}".format(rname, strand, paddedstart)
print(af, file=fw)
print(file=fw)
for a, rname in zip(mapped_reads, rnames):
aseq, npadded = cigar_to_seq(a)
if aseq is None:
continue
ninfos = 0
ntags = 0
alen = len(aseq)
rd = "RD {0} {1} {2} {3}\n{4}".format(rname, alen, ninfos, ntags,
fill(aseq))
qs = "QA 1 {0} 1 {0}".format(alen)
print(rd, file=fw)
print(file=fw)
print(qs, file=fw)
print(file=fw) | python | def ace(args):
"""
%prog ace bamfile fastafile
convert bam format to ace format. This often allows the remapping to be
assessed as a denovo assembly format. bam file needs to be indexed. also
creates a .mates file to be used in amos/bambus, and .astat file to mark
whether the contig is unique or repetitive based on A-statistics in Celera
assembler.
"""
p = OptionParser(ace.__doc__)
p.add_option("--splitdir", dest="splitdir", default="outRoot",
help="split the ace per contig to dir [default: %default]")
p.add_option("--unpaired", dest="unpaired", default=False,
help="remove read pairs on the same contig [default: %default]")
p.add_option("--minreadno", dest="minreadno", default=3, type="int",
help="minimum read numbers per contig [default: %default]")
p.add_option("--minctgsize", dest="minctgsize", default=100, type="int",
help="minimum contig size per contig [default: %default]")
p.add_option("--astat", default=False, action="store_true",
help="create .astat to list repetitiveness [default: %default]")
p.add_option("--readids", default=False, action="store_true",
help="create file of mapped and unmapped ids [default: %default]")
from pysam import Samfile
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, fastafile = args
astat = opts.astat
readids = opts.readids
f = Fasta(fastafile)
prefix = bamfile.split(".")[0]
acefile = prefix + ".ace"
readsfile = prefix + ".reads"
astatfile = prefix + ".astat"
logging.debug("Load {0}".format(bamfile))
s = Samfile(bamfile, "rb")
ncontigs = s.nreferences
genomesize = sum(x for a, x in f.itersizes())
logging.debug("Total {0} contigs with size {1} base".format(ncontigs,
genomesize))
qual = "20" # default qual
totalreads = sum(s.count(x) for x in s.references)
logging.debug("Total {0} reads mapped".format(totalreads))
fw = open(acefile, "w")
if astat:
astatfw = open(astatfile, "w")
if readids:
readsfw = open(readsfile, "w")
print("AS {0} {1}".format(ncontigs, totalreads), file=fw)
print(file=fw)
for i, contig in enumerate(s.references):
cseq = f[contig]
nbases = len(cseq)
mapped_reads = [x for x in s.fetch(contig) if not x.is_unmapped]
nreads = len(mapped_reads)
nsegments = 0
print("CO {0} {1} {2} {3} U".format(contig, nbases, nreads,
nsegments), file=fw)
print(fill(str(cseq.seq)), file=fw)
print(file=fw)
if astat:
astat = Astat(nbases, nreads, genomesize, totalreads)
print("{0}\t{1:.1f}".format(contig, astat), file=astatfw)
text = fill([qual] * nbases, delimiter=" ", width=30)
print("BQ\n{0}".format(text), file=fw)
print(file=fw)
rnames = []
for a in mapped_reads:
readname = a.qname
rname = readname
if readids:
print(readname, file=readsfw)
rnames.append(rname)
strand = "C" if a.is_reverse else "U"
paddedstart = a.pos + 1 # 0-based to 1-based
af = "AF {0} {1} {2}".format(rname, strand, paddedstart)
print(af, file=fw)
print(file=fw)
for a, rname in zip(mapped_reads, rnames):
aseq, npadded = cigar_to_seq(a)
if aseq is None:
continue
ninfos = 0
ntags = 0
alen = len(aseq)
rd = "RD {0} {1} {2} {3}\n{4}".format(rname, alen, ninfos, ntags,
fill(aseq))
qs = "QA 1 {0} 1 {0}".format(alen)
print(rd, file=fw)
print(file=fw)
print(qs, file=fw)
print(file=fw) | ['def', 'ace', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'ace', '.', '__doc__', ')', 'p', '.', 'add_option', '(', '"--splitdir"', ',', 'dest', '=', '"splitdir"', ',', 'default', '=', '"outRoot"', ',', 'help', '=', '"split the ace per contig to dir [default: %default]"', ')', 'p', '.', 'add_option', '(', '"--unpaired"', ',', 'dest', '=', '"unpaired"', ',', 'default', '=', 'False', ',', 'help', '=', '"remove read pairs on the same contig [default: %default]"', ')', 'p', '.', 'add_option', '(', '"--minreadno"', ',', 'dest', '=', '"minreadno"', ',', 'default', '=', '3', ',', 'type', '=', '"int"', ',', 'help', '=', '"minimum read numbers per contig [default: %default]"', ')', 'p', '.', 'add_option', '(', '"--minctgsize"', ',', 'dest', '=', '"minctgsize"', ',', 'default', '=', '100', ',', 'type', '=', '"int"', ',', 'help', '=', '"minimum contig size per contig [default: %default]"', ')', 'p', '.', 'add_option', '(', '"--astat"', ',', 'default', '=', 'False', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"create .astat to list repetitiveness [default: %default]"', ')', 'p', '.', 'add_option', '(', '"--readids"', ',', 'default', '=', 'False', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"create file of mapped and unmapped ids [default: %default]"', ')', 'from', 'pysam', 'import', 'Samfile', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '2', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'bamfile', ',', 'fastafile', '=', 'args', 'astat', '=', 'opts', '.', 'astat', 'readids', '=', 'opts', '.', 'readids', 'f', '=', 'Fasta', '(', 'fastafile', ')', 'prefix', '=', 'bamfile', '.', 'split', '(', '"."', ')', '[', '0', ']', 'acefile', '=', 'prefix', '+', '".ace"', 'readsfile', '=', 'prefix', '+', '".reads"', 'astatfile', '=', 'prefix', '+', '".astat"', 'logging', '.', 'debug', '(', '"Load {0}"', '.', 'format', '(', 'bamfile', ')', ')', 's', '=', 'Samfile', '(', 'bamfile', ',', '"rb"', ')', 'ncontigs', '=', 's', '.', 'nreferences', 'genomesize', '=', 'sum', '(', 'x', 'for', 'a', ',', 'x', 'in', 'f', '.', 'itersizes', '(', ')', ')', 'logging', '.', 'debug', '(', '"Total {0} contigs with size {1} base"', '.', 'format', '(', 'ncontigs', ',', 'genomesize', ')', ')', 'qual', '=', '"20"', '# default qual', 'totalreads', '=', 'sum', '(', 's', '.', 'count', '(', 'x', ')', 'for', 'x', 'in', 's', '.', 'references', ')', 'logging', '.', 'debug', '(', '"Total {0} reads mapped"', '.', 'format', '(', 'totalreads', ')', ')', 'fw', '=', 'open', '(', 'acefile', ',', '"w"', ')', 'if', 'astat', ':', 'astatfw', '=', 'open', '(', 'astatfile', ',', '"w"', ')', 'if', 'readids', ':', 'readsfw', '=', 'open', '(', 'readsfile', ',', '"w"', ')', 'print', '(', '"AS {0} {1}"', '.', 'format', '(', 'ncontigs', ',', 'totalreads', ')', ',', 'file', '=', 'fw', ')', 'print', '(', 'file', '=', 'fw', ')', 'for', 'i', ',', 'contig', 'in', 'enumerate', '(', 's', '.', 'references', ')', ':', 'cseq', '=', 'f', '[', 'contig', ']', 'nbases', '=', 'len', '(', 'cseq', ')', 'mapped_reads', '=', '[', 'x', 'for', 'x', 'in', 's', '.', 'fetch', '(', 'contig', ')', 'if', 'not', 'x', '.', 'is_unmapped', ']', 'nreads', '=', 'len', '(', 'mapped_reads', ')', 'nsegments', '=', '0', 'print', '(', '"CO {0} {1} {2} {3} U"', '.', 'format', '(', 'contig', ',', 'nbases', ',', 'nreads', ',', 'nsegments', ')', ',', 'file', '=', 'fw', ')', 'print', '(', 'fill', '(', 'str', '(', 'cseq', '.', 'seq', ')', ')', ',', 'file', '=', 'fw', ')', 'print', '(', 'file', '=', 'fw', ')', 'if', 'astat', ':', 'astat', '=', 'Astat', '(', 'nbases', ',', 'nreads', ',', 'genomesize', ',', 'totalreads', ')', 'print', '(', '"{0}\\t{1:.1f}"', '.', 'format', '(', 'contig', ',', 'astat', ')', ',', 'file', '=', 'astatfw', ')', 'text', '=', 'fill', '(', '[', 'qual', ']', '*', 'nbases', ',', 'delimiter', '=', '" "', ',', 'width', '=', '30', ')', 'print', '(', '"BQ\\n{0}"', '.', 'format', '(', 'text', ')', ',', 'file', '=', 'fw', ')', 'print', '(', 'file', '=', 'fw', ')', 'rnames', '=', '[', ']', 'for', 'a', 'in', 'mapped_reads', ':', 'readname', '=', 'a', '.', 'qname', 'rname', '=', 'readname', 'if', 'readids', ':', 'print', '(', 'readname', ',', 'file', '=', 'readsfw', ')', 'rnames', '.', 'append', '(', 'rname', ')', 'strand', '=', '"C"', 'if', 'a', '.', 'is_reverse', 'else', '"U"', 'paddedstart', '=', 'a', '.', 'pos', '+', '1', '# 0-based to 1-based', 'af', '=', '"AF {0} {1} {2}"', '.', 'format', '(', 'rname', ',', 'strand', ',', 'paddedstart', ')', 'print', '(', 'af', ',', 'file', '=', 'fw', ')', 'print', '(', 'file', '=', 'fw', ')', 'for', 'a', ',', 'rname', 'in', 'zip', '(', 'mapped_reads', ',', 'rnames', ')', ':', 'aseq', ',', 'npadded', '=', 'cigar_to_seq', '(', 'a', ')', 'if', 'aseq', 'is', 'None', ':', 'continue', 'ninfos', '=', '0', 'ntags', '=', '0', 'alen', '=', 'len', '(', 'aseq', ')', 'rd', '=', '"RD {0} {1} {2} {3}\\n{4}"', '.', 'format', '(', 'rname', ',', 'alen', ',', 'ninfos', ',', 'ntags', ',', 'fill', '(', 'aseq', ')', ')', 'qs', '=', '"QA 1 {0} 1 {0}"', '.', 'format', '(', 'alen', ')', 'print', '(', 'rd', ',', 'file', '=', 'fw', ')', 'print', '(', 'file', '=', 'fw', ')', 'print', '(', 'qs', ',', 'file', '=', 'fw', ')', 'print', '(', 'file', '=', 'fw', ')'] | %prog ace bamfile fastafile
convert bam format to ace format. This often allows the remapping to be
assessed as a denovo assembly format. bam file needs to be indexed. also
creates a .mates file to be used in amos/bambus, and .astat file to mark
whether the contig is unique or repetitive based on A-statistics in Celera
assembler. | ['%prog', 'ace', 'bamfile', 'fastafile'] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L823-L937 |
2,809 | Kozea/pygal | pygal/graph/radar.py | Radar._compute | def _compute(self):
"""Compute r min max and labels position"""
delta = 2 * pi / self._len if self._len else 0
self._x_pos = [.5 * pi + i * delta for i in range(self._len + 1)]
for serie in self.all_series:
serie.points = [(v, self._x_pos[i])
for i, v in enumerate(serie.values)]
if self.interpolate:
extended_x_pos = ([.5 * pi - delta] + self._x_pos)
extended_vals = (serie.values[-1:] + serie.values)
serie.interpolated = list(
map(
tuple,
map(
reversed,
self._interpolate(extended_x_pos, extended_vals)
)
)
)
# x labels space
self._box.margin *= 2
self._rmin = self.zero
self._rmax = self._max or 1
self._box.set_polar_box(self._rmin, self._rmax)
self._self_close = True | python | def _compute(self):
"""Compute r min max and labels position"""
delta = 2 * pi / self._len if self._len else 0
self._x_pos = [.5 * pi + i * delta for i in range(self._len + 1)]
for serie in self.all_series:
serie.points = [(v, self._x_pos[i])
for i, v in enumerate(serie.values)]
if self.interpolate:
extended_x_pos = ([.5 * pi - delta] + self._x_pos)
extended_vals = (serie.values[-1:] + serie.values)
serie.interpolated = list(
map(
tuple,
map(
reversed,
self._interpolate(extended_x_pos, extended_vals)
)
)
)
# x labels space
self._box.margin *= 2
self._rmin = self.zero
self._rmax = self._max or 1
self._box.set_polar_box(self._rmin, self._rmax)
self._self_close = True | ['def', '_compute', '(', 'self', ')', ':', 'delta', '=', '2', '*', 'pi', '/', 'self', '.', '_len', 'if', 'self', '.', '_len', 'else', '0', 'self', '.', '_x_pos', '=', '[', '.5', '*', 'pi', '+', 'i', '*', 'delta', 'for', 'i', 'in', 'range', '(', 'self', '.', '_len', '+', '1', ')', ']', 'for', 'serie', 'in', 'self', '.', 'all_series', ':', 'serie', '.', 'points', '=', '[', '(', 'v', ',', 'self', '.', '_x_pos', '[', 'i', ']', ')', 'for', 'i', ',', 'v', 'in', 'enumerate', '(', 'serie', '.', 'values', ')', ']', 'if', 'self', '.', 'interpolate', ':', 'extended_x_pos', '=', '(', '[', '.5', '*', 'pi', '-', 'delta', ']', '+', 'self', '.', '_x_pos', ')', 'extended_vals', '=', '(', 'serie', '.', 'values', '[', '-', '1', ':', ']', '+', 'serie', '.', 'values', ')', 'serie', '.', 'interpolated', '=', 'list', '(', 'map', '(', 'tuple', ',', 'map', '(', 'reversed', ',', 'self', '.', '_interpolate', '(', 'extended_x_pos', ',', 'extended_vals', ')', ')', ')', ')', '# x labels space', 'self', '.', '_box', '.', 'margin', '*=', '2', 'self', '.', '_rmin', '=', 'self', '.', 'zero', 'self', '.', '_rmax', '=', 'self', '.', '_max', 'or', '1', 'self', '.', '_box', '.', 'set_polar_box', '(', 'self', '.', '_rmin', ',', 'self', '.', '_rmax', ')', 'self', '.', '_self_close', '=', 'True'] | Compute r min max and labels position | ['Compute', 'r', 'min', 'max', 'and', 'labels', 'position'] | train | https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/radar.py#L167-L192 |
2,810 | materialsproject/pymatgen | pymatgen/analysis/defects/utils.py | ChargeDensityAnalyzer.get_local_extrema | def get_local_extrema(self, find_min=True, threshold_frac=None,
threshold_abs=None):
"""
Get all local extrema fractional coordinates in charge density,
searching for local minimum by default. Note that sites are NOT grouped
symmetrically.
Args:
find_min (bool): True to find local minimum else maximum, otherwise
find local maximum.
threshold_frac (float): optional fraction of extrema shown, which
returns `threshold_frac * tot_num_extrema` extrema fractional
coordinates based on highest/lowest intensity.
E.g. set 0.2 to show the extrema with 20% highest or lowest
intensity. Value range: 0 <= threshold_frac <= 1
Note that threshold_abs and threshold_frac should not set in the
same time.
threshold_abs (float): optional filter. When searching for local
minima, intensity <= threshold_abs returns; when searching for
local maxima, intensity >= threshold_abs returns.
Note that threshold_abs and threshold_frac should not set in the
same time.
Returns:
extrema_coords (list): list of fractional coordinates corresponding
to local extrema.
"""
sign, extrema_type = 1, "local maxima"
if find_min:
sign, extrema_type = -1, "local minima"
# Make 3x3x3 supercell
# This is a trick to resolve the periodical boundary issue.
total_chg = sign * self.chgcar.data["total"]
total_chg = np.tile(total_chg, reps=(3, 3, 3))
coordinates = peak_local_max(total_chg, min_distance=1)
# Remove duplicated sites introduced by supercell.
f_coords = [coord / total_chg.shape * 3 for coord in coordinates]
f_coords = [f - 1 for f in f_coords if
all(np.array(f) < 2) and all(np.array(f) >= 1)]
# Update information
self._update_extrema(f_coords, extrema_type,
threshold_frac=threshold_frac,
threshold_abs=threshold_abs)
return self.extrema_coords | python | def get_local_extrema(self, find_min=True, threshold_frac=None,
threshold_abs=None):
"""
Get all local extrema fractional coordinates in charge density,
searching for local minimum by default. Note that sites are NOT grouped
symmetrically.
Args:
find_min (bool): True to find local minimum else maximum, otherwise
find local maximum.
threshold_frac (float): optional fraction of extrema shown, which
returns `threshold_frac * tot_num_extrema` extrema fractional
coordinates based on highest/lowest intensity.
E.g. set 0.2 to show the extrema with 20% highest or lowest
intensity. Value range: 0 <= threshold_frac <= 1
Note that threshold_abs and threshold_frac should not set in the
same time.
threshold_abs (float): optional filter. When searching for local
minima, intensity <= threshold_abs returns; when searching for
local maxima, intensity >= threshold_abs returns.
Note that threshold_abs and threshold_frac should not set in the
same time.
Returns:
extrema_coords (list): list of fractional coordinates corresponding
to local extrema.
"""
sign, extrema_type = 1, "local maxima"
if find_min:
sign, extrema_type = -1, "local minima"
# Make 3x3x3 supercell
# This is a trick to resolve the periodical boundary issue.
total_chg = sign * self.chgcar.data["total"]
total_chg = np.tile(total_chg, reps=(3, 3, 3))
coordinates = peak_local_max(total_chg, min_distance=1)
# Remove duplicated sites introduced by supercell.
f_coords = [coord / total_chg.shape * 3 for coord in coordinates]
f_coords = [f - 1 for f in f_coords if
all(np.array(f) < 2) and all(np.array(f) >= 1)]
# Update information
self._update_extrema(f_coords, extrema_type,
threshold_frac=threshold_frac,
threshold_abs=threshold_abs)
return self.extrema_coords | ['def', 'get_local_extrema', '(', 'self', ',', 'find_min', '=', 'True', ',', 'threshold_frac', '=', 'None', ',', 'threshold_abs', '=', 'None', ')', ':', 'sign', ',', 'extrema_type', '=', '1', ',', '"local maxima"', 'if', 'find_min', ':', 'sign', ',', 'extrema_type', '=', '-', '1', ',', '"local minima"', '# Make 3x3x3 supercell', '# This is a trick to resolve the periodical boundary issue.', 'total_chg', '=', 'sign', '*', 'self', '.', 'chgcar', '.', 'data', '[', '"total"', ']', 'total_chg', '=', 'np', '.', 'tile', '(', 'total_chg', ',', 'reps', '=', '(', '3', ',', '3', ',', '3', ')', ')', 'coordinates', '=', 'peak_local_max', '(', 'total_chg', ',', 'min_distance', '=', '1', ')', '# Remove duplicated sites introduced by supercell.', 'f_coords', '=', '[', 'coord', '/', 'total_chg', '.', 'shape', '*', '3', 'for', 'coord', 'in', 'coordinates', ']', 'f_coords', '=', '[', 'f', '-', '1', 'for', 'f', 'in', 'f_coords', 'if', 'all', '(', 'np', '.', 'array', '(', 'f', ')', '<', '2', ')', 'and', 'all', '(', 'np', '.', 'array', '(', 'f', ')', '>=', '1', ')', ']', '# Update information', 'self', '.', '_update_extrema', '(', 'f_coords', ',', 'extrema_type', ',', 'threshold_frac', '=', 'threshold_frac', ',', 'threshold_abs', '=', 'threshold_abs', ')', 'return', 'self', '.', 'extrema_coords'] | Get all local extrema fractional coordinates in charge density,
searching for local minimum by default. Note that sites are NOT grouped
symmetrically.
Args:
find_min (bool): True to find local minimum else maximum, otherwise
find local maximum.
threshold_frac (float): optional fraction of extrema shown, which
returns `threshold_frac * tot_num_extrema` extrema fractional
coordinates based on highest/lowest intensity.
E.g. set 0.2 to show the extrema with 20% highest or lowest
intensity. Value range: 0 <= threshold_frac <= 1
Note that threshold_abs and threshold_frac should not set in the
same time.
threshold_abs (float): optional filter. When searching for local
minima, intensity <= threshold_abs returns; when searching for
local maxima, intensity >= threshold_abs returns.
Note that threshold_abs and threshold_frac should not set in the
same time.
Returns:
extrema_coords (list): list of fractional coordinates corresponding
to local extrema. | ['Get', 'all', 'local', 'extrema', 'fractional', 'coordinates', 'in', 'charge', 'density', 'searching', 'for', 'local', 'minimum', 'by', 'default', '.', 'Note', 'that', 'sites', 'are', 'NOT', 'grouped', 'symmetrically', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/defects/utils.py#L1030-L1083 |
2,811 | sdispater/eloquent | eloquent/support/collection.py | Collection.lists | def lists(self, value, key=None):
"""
Get a list with the values of a given key
:rtype: list
"""
results = map(lambda x: x[value], self._items)
return list(results) | python | def lists(self, value, key=None):
"""
Get a list with the values of a given key
:rtype: list
"""
results = map(lambda x: x[value], self._items)
return list(results) | ['def', 'lists', '(', 'self', ',', 'value', ',', 'key', '=', 'None', ')', ':', 'results', '=', 'map', '(', 'lambda', 'x', ':', 'x', '[', 'value', ']', ',', 'self', '.', '_items', ')', 'return', 'list', '(', 'results', ')'] | Get a list with the values of a given key
:rtype: list | ['Get', 'a', 'list', 'with', 'the', 'values', 'of', 'a', 'given', 'key'] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/support/collection.py#L118-L126 |
2,812 | chaoss/grimoirelab-sortinghat | sortinghat/matcher.py | _match_with_pandas | def _match_with_pandas(filtered, matcher):
"""Find matches in a set using Pandas' library."""
import pandas
data = [fl.to_dict() for fl in filtered]
if not data:
return []
df = pandas.DataFrame(data)
df = df.sort_values(['uuid'])
cdfs = []
criteria = matcher.matching_criteria()
for c in criteria:
cdf = df[['id', 'uuid', c]]
cdf = cdf.dropna(subset=[c])
cdf = pandas.merge(cdf, cdf, on=c, how='left')
cdf = cdf[['uuid_x', 'uuid_y']]
cdfs.append(cdf)
result = pandas.concat(cdfs)
result = result.drop_duplicates()
groups = result.groupby(by=['uuid_x'],
as_index=True, sort=True)
matched = _calculate_matches_closures(groups)
return matched | python | def _match_with_pandas(filtered, matcher):
"""Find matches in a set using Pandas' library."""
import pandas
data = [fl.to_dict() for fl in filtered]
if not data:
return []
df = pandas.DataFrame(data)
df = df.sort_values(['uuid'])
cdfs = []
criteria = matcher.matching_criteria()
for c in criteria:
cdf = df[['id', 'uuid', c]]
cdf = cdf.dropna(subset=[c])
cdf = pandas.merge(cdf, cdf, on=c, how='left')
cdf = cdf[['uuid_x', 'uuid_y']]
cdfs.append(cdf)
result = pandas.concat(cdfs)
result = result.drop_duplicates()
groups = result.groupby(by=['uuid_x'],
as_index=True, sort=True)
matched = _calculate_matches_closures(groups)
return matched | ['def', '_match_with_pandas', '(', 'filtered', ',', 'matcher', ')', ':', 'import', 'pandas', 'data', '=', '[', 'fl', '.', 'to_dict', '(', ')', 'for', 'fl', 'in', 'filtered', ']', 'if', 'not', 'data', ':', 'return', '[', ']', 'df', '=', 'pandas', '.', 'DataFrame', '(', 'data', ')', 'df', '=', 'df', '.', 'sort_values', '(', '[', "'uuid'", ']', ')', 'cdfs', '=', '[', ']', 'criteria', '=', 'matcher', '.', 'matching_criteria', '(', ')', 'for', 'c', 'in', 'criteria', ':', 'cdf', '=', 'df', '[', '[', "'id'", ',', "'uuid'", ',', 'c', ']', ']', 'cdf', '=', 'cdf', '.', 'dropna', '(', 'subset', '=', '[', 'c', ']', ')', 'cdf', '=', 'pandas', '.', 'merge', '(', 'cdf', ',', 'cdf', ',', 'on', '=', 'c', ',', 'how', '=', "'left'", ')', 'cdf', '=', 'cdf', '[', '[', "'uuid_x'", ',', "'uuid_y'", ']', ']', 'cdfs', '.', 'append', '(', 'cdf', ')', 'result', '=', 'pandas', '.', 'concat', '(', 'cdfs', ')', 'result', '=', 'result', '.', 'drop_duplicates', '(', ')', 'groups', '=', 'result', '.', 'groupby', '(', 'by', '=', '[', "'uuid_x'", ']', ',', 'as_index', '=', 'True', ',', 'sort', '=', 'True', ')', 'matched', '=', '_calculate_matches_closures', '(', 'groups', ')', 'return', 'matched'] | Find matches in a set using Pandas' library. | ['Find', 'matches', 'in', 'a', 'set', 'using', 'Pandas', 'library', '.'] | train | https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/matcher.py#L237-L267 |
2,813 | mitsei/dlkit | dlkit/json_/grading/objects.py | GradeForm.get_input_score_end_range_metadata | def get_input_score_end_range_metadata(self):
"""Gets the metadata for the input score start range.
return: (osid.Metadata) - metadata for the input score start
range
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['input_score_end_range'])
metadata.update({'existing_decimal_values': self._my_map['inputScoreEndRange']})
return Metadata(**metadata) | python | def get_input_score_end_range_metadata(self):
"""Gets the metadata for the input score start range.
return: (osid.Metadata) - metadata for the input score start
range
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['input_score_end_range'])
metadata.update({'existing_decimal_values': self._my_map['inputScoreEndRange']})
return Metadata(**metadata) | ['def', 'get_input_score_end_range_metadata', '(', 'self', ')', ':', '# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template', 'metadata', '=', 'dict', '(', 'self', '.', '_mdata', '[', "'input_score_end_range'", ']', ')', 'metadata', '.', 'update', '(', '{', "'existing_decimal_values'", ':', 'self', '.', '_my_map', '[', "'inputScoreEndRange'", ']', '}', ')', 'return', 'Metadata', '(', '*', '*', 'metadata', ')'] | Gets the metadata for the input score start range.
return: (osid.Metadata) - metadata for the input score start
range
*compliance: mandatory -- This method must be implemented.* | ['Gets', 'the', 'metadata', 'for', 'the', 'input', 'score', 'start', 'range', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/objects.py#L239-L250 |
2,814 | pytorch/text | torchtext/data/field.py | NestedField.preprocess | def preprocess(self, xs):
"""Preprocess a single example.
Firstly, tokenization and the supplied preprocessing pipeline is applied. Since
this field is always sequential, the result is a list. Then, each element of
the list is preprocessed using ``self.nesting_field.preprocess`` and the resulting
list is returned.
Arguments:
xs (list or str): The input to preprocess.
Returns:
list: The preprocessed list.
"""
return [self.nesting_field.preprocess(x)
for x in super(NestedField, self).preprocess(xs)] | python | def preprocess(self, xs):
"""Preprocess a single example.
Firstly, tokenization and the supplied preprocessing pipeline is applied. Since
this field is always sequential, the result is a list. Then, each element of
the list is preprocessed using ``self.nesting_field.preprocess`` and the resulting
list is returned.
Arguments:
xs (list or str): The input to preprocess.
Returns:
list: The preprocessed list.
"""
return [self.nesting_field.preprocess(x)
for x in super(NestedField, self).preprocess(xs)] | ['def', 'preprocess', '(', 'self', ',', 'xs', ')', ':', 'return', '[', 'self', '.', 'nesting_field', '.', 'preprocess', '(', 'x', ')', 'for', 'x', 'in', 'super', '(', 'NestedField', ',', 'self', ')', '.', 'preprocess', '(', 'xs', ')', ']'] | Preprocess a single example.
Firstly, tokenization and the supplied preprocessing pipeline is applied. Since
this field is always sequential, the result is a list. Then, each element of
the list is preprocessed using ``self.nesting_field.preprocess`` and the resulting
list is returned.
Arguments:
xs (list or str): The input to preprocess.
Returns:
list: The preprocessed list. | ['Preprocess', 'a', 'single', 'example', '.'] | train | https://github.com/pytorch/text/blob/26bfce6869dc704f1d86792f9a681d453d7e7bb8/torchtext/data/field.py#L528-L543 |
2,815 | bfontaine/term2048 | term2048/game.py | Game.boardToString | def boardToString(self, margins=None):
"""
return a string representation of the current board.
"""
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s | python | def boardToString(self, margins=None):
"""
return a string representation of the current board.
"""
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s | ['def', 'boardToString', '(', 'self', ',', 'margins', '=', 'None', ')', ':', 'if', 'margins', 'is', 'None', ':', 'margins', '=', '{', '}', 'b', '=', 'self', '.', 'board', 'rg', '=', 'range', '(', 'b', '.', 'size', '(', ')', ')', 'left', '=', "' '", '*', 'margins', '.', 'get', '(', "'left'", ',', '0', ')', 's', '=', "'\\n'", '.', 'join', '(', '[', 'left', '+', "' '", '.', 'join', '(', '[', 'self', '.', 'getCellStr', '(', 'x', ',', 'y', ')', 'for', 'x', 'in', 'rg', ']', ')', 'for', 'y', 'in', 'rg', ']', ')', 'return', 's'] | return a string representation of the current board. | ['return', 'a', 'string', 'representation', 'of', 'the', 'current', 'board', '.'] | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/game.py#L280-L292 |
2,816 | kstaniek/condoor | condoor/connection.py | Connection.resume_session_logging | def resume_session_logging(self):
"""Resume session logging."""
self._chain.ctrl.set_session_log(self.session_fd)
self.log("Session logging resumed") | python | def resume_session_logging(self):
"""Resume session logging."""
self._chain.ctrl.set_session_log(self.session_fd)
self.log("Session logging resumed") | ['def', 'resume_session_logging', '(', 'self', ')', ':', 'self', '.', '_chain', '.', 'ctrl', '.', 'set_session_log', '(', 'self', '.', 'session_fd', ')', 'self', '.', 'log', '(', '"Session logging resumed"', ')'] | Resume session logging. | ['Resume', 'session', 'logging', '.'] | train | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/connection.py#L371-L374 |
2,817 | vtkiorg/vtki | vtki/pointset.py | UnstructuredGrid._from_arrays | def _from_arrays(self, offset, cells, cell_type, points, deep=True):
"""
Create VTK unstructured grid from numpy arrays
Parameters
----------
offset : np.ndarray dtype=np.int64
Array indicating the start location of each cell in the cells
array.
cells : np.ndarray dtype=np.int64
Array of cells. Each cell contains the number of points in the
cell and the node numbers of the cell.
cell_type : np.uint8
Cell types of each cell. Each cell type numbers can be found from
vtk documentation. See example below.
points : np.ndarray
Numpy array containing point locations.
Examples
--------
>>> import numpy
>>> import vtk
>>> import vtki
>>> offset = np.array([0, 9])
>>> cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
>>> cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int8)
>>> cell1 = np.array([[0, 0, 0],
... [1, 0, 0],
... [1, 1, 0],
... [0, 1, 0],
... [0, 0, 1],
... [1, 0, 1],
... [1, 1, 1],
... [0, 1, 1]])
>>> cell2 = np.array([[0, 0, 2],
... [1, 0, 2],
... [1, 1, 2],
... [0, 1, 2],
... [0, 0, 3],
... [1, 0, 3],
... [1, 1, 3],
... [0, 1, 3]])
>>> points = np.vstack((cell1, cell2))
>>> grid = vtki.UnstructuredGrid(offset, cells, cell_type, points)
"""
if offset.dtype != vtki.ID_TYPE:
offset = offset.astype(vtki.ID_TYPE)
if cells.dtype != vtki.ID_TYPE:
cells = cells.astype(vtki.ID_TYPE)
if not cells.flags['C_CONTIGUOUS']:
cells = np.ascontiguousarray(cells)
# if cells.ndim != 1:
# cells = cells.ravel()
if cell_type.dtype != np.uint8:
cell_type = cell_type.astype(np.uint8)
# Get number of cells
ncells = cell_type.size
# Convert to vtk arrays
cell_type = numpy_to_vtk(cell_type, deep=deep)
offset = numpy_to_vtkIdTypeArray(offset, deep=deep)
vtkcells = vtk.vtkCellArray()
vtkcells.SetCells(ncells, numpy_to_vtkIdTypeArray(cells.ravel(), deep=deep))
# Convert points to vtkPoints object
points = vtki.vtk_points(points, deep=deep)
# Create unstructured grid
self.SetPoints(points)
self.SetCells(cell_type, offset, vtkcells) | python | def _from_arrays(self, offset, cells, cell_type, points, deep=True):
"""
Create VTK unstructured grid from numpy arrays
Parameters
----------
offset : np.ndarray dtype=np.int64
Array indicating the start location of each cell in the cells
array.
cells : np.ndarray dtype=np.int64
Array of cells. Each cell contains the number of points in the
cell and the node numbers of the cell.
cell_type : np.uint8
Cell types of each cell. Each cell type numbers can be found from
vtk documentation. See example below.
points : np.ndarray
Numpy array containing point locations.
Examples
--------
>>> import numpy
>>> import vtk
>>> import vtki
>>> offset = np.array([0, 9])
>>> cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
>>> cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int8)
>>> cell1 = np.array([[0, 0, 0],
... [1, 0, 0],
... [1, 1, 0],
... [0, 1, 0],
... [0, 0, 1],
... [1, 0, 1],
... [1, 1, 1],
... [0, 1, 1]])
>>> cell2 = np.array([[0, 0, 2],
... [1, 0, 2],
... [1, 1, 2],
... [0, 1, 2],
... [0, 0, 3],
... [1, 0, 3],
... [1, 1, 3],
... [0, 1, 3]])
>>> points = np.vstack((cell1, cell2))
>>> grid = vtki.UnstructuredGrid(offset, cells, cell_type, points)
"""
if offset.dtype != vtki.ID_TYPE:
offset = offset.astype(vtki.ID_TYPE)
if cells.dtype != vtki.ID_TYPE:
cells = cells.astype(vtki.ID_TYPE)
if not cells.flags['C_CONTIGUOUS']:
cells = np.ascontiguousarray(cells)
# if cells.ndim != 1:
# cells = cells.ravel()
if cell_type.dtype != np.uint8:
cell_type = cell_type.astype(np.uint8)
# Get number of cells
ncells = cell_type.size
# Convert to vtk arrays
cell_type = numpy_to_vtk(cell_type, deep=deep)
offset = numpy_to_vtkIdTypeArray(offset, deep=deep)
vtkcells = vtk.vtkCellArray()
vtkcells.SetCells(ncells, numpy_to_vtkIdTypeArray(cells.ravel(), deep=deep))
# Convert points to vtkPoints object
points = vtki.vtk_points(points, deep=deep)
# Create unstructured grid
self.SetPoints(points)
self.SetCells(cell_type, offset, vtkcells) | ['def', '_from_arrays', '(', 'self', ',', 'offset', ',', 'cells', ',', 'cell_type', ',', 'points', ',', 'deep', '=', 'True', ')', ':', 'if', 'offset', '.', 'dtype', '!=', 'vtki', '.', 'ID_TYPE', ':', 'offset', '=', 'offset', '.', 'astype', '(', 'vtki', '.', 'ID_TYPE', ')', 'if', 'cells', '.', 'dtype', '!=', 'vtki', '.', 'ID_TYPE', ':', 'cells', '=', 'cells', '.', 'astype', '(', 'vtki', '.', 'ID_TYPE', ')', 'if', 'not', 'cells', '.', 'flags', '[', "'C_CONTIGUOUS'", ']', ':', 'cells', '=', 'np', '.', 'ascontiguousarray', '(', 'cells', ')', '# if cells.ndim != 1:', '# cells = cells.ravel()', 'if', 'cell_type', '.', 'dtype', '!=', 'np', '.', 'uint8', ':', 'cell_type', '=', 'cell_type', '.', 'astype', '(', 'np', '.', 'uint8', ')', '# Get number of cells', 'ncells', '=', 'cell_type', '.', 'size', '# Convert to vtk arrays', 'cell_type', '=', 'numpy_to_vtk', '(', 'cell_type', ',', 'deep', '=', 'deep', ')', 'offset', '=', 'numpy_to_vtkIdTypeArray', '(', 'offset', ',', 'deep', '=', 'deep', ')', 'vtkcells', '=', 'vtk', '.', 'vtkCellArray', '(', ')', 'vtkcells', '.', 'SetCells', '(', 'ncells', ',', 'numpy_to_vtkIdTypeArray', '(', 'cells', '.', 'ravel', '(', ')', ',', 'deep', '=', 'deep', ')', ')', '# Convert points to vtkPoints object', 'points', '=', 'vtki', '.', 'vtk_points', '(', 'points', ',', 'deep', '=', 'deep', ')', '# Create unstructured grid', 'self', '.', 'SetPoints', '(', 'points', ')', 'self', '.', 'SetCells', '(', 'cell_type', ',', 'offset', ',', 'vtkcells', ')'] | Create VTK unstructured grid from numpy arrays
Parameters
----------
offset : np.ndarray dtype=np.int64
Array indicating the start location of each cell in the cells
array.
cells : np.ndarray dtype=np.int64
Array of cells. Each cell contains the number of points in the
cell and the node numbers of the cell.
cell_type : np.uint8
Cell types of each cell. Each cell type numbers can be found from
vtk documentation. See example below.
points : np.ndarray
Numpy array containing point locations.
Examples
--------
>>> import numpy
>>> import vtk
>>> import vtki
>>> offset = np.array([0, 9])
>>> cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
>>> cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int8)
>>> cell1 = np.array([[0, 0, 0],
... [1, 0, 0],
... [1, 1, 0],
... [0, 1, 0],
... [0, 0, 1],
... [1, 0, 1],
... [1, 1, 1],
... [0, 1, 1]])
>>> cell2 = np.array([[0, 0, 2],
... [1, 0, 2],
... [1, 1, 2],
... [0, 1, 2],
... [0, 0, 3],
... [1, 0, 3],
... [1, 1, 3],
... [0, 1, 3]])
>>> points = np.vstack((cell1, cell2))
>>> grid = vtki.UnstructuredGrid(offset, cells, cell_type, points) | ['Create', 'VTK', 'unstructured', 'grid', 'from', 'numpy', 'arrays'] | train | https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/pointset.py#L1839-L1923 |
2,818 | aiogram/aiogram | aiogram/bot/bot.py | Bot.kick_chat_member | async def kick_chat_member(self, chat_id: typing.Union[base.Integer, base.String], user_id: base.Integer,
until_date: typing.Union[base.Integer, None] = None) -> base.Boolean:
"""
Use this method to kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return to the group
on their own using invite links, etc., unless unbanned first.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting
is off in the target group.
Otherwise members may only be removed by the group's creator or by the member that added them.
Source: https://core.telegram.org/bots/api#kickchatmember
:param chat_id: Unique identifier for the target group or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when the user will be unbanned, unix time
:type until_date: :obj:`typing.Union[base.Integer, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
until_date = prepare_arg(until_date)
payload = generate_payload(**locals())
result = await self.request(api.Methods.KICK_CHAT_MEMBER, payload)
return result | python | async def kick_chat_member(self, chat_id: typing.Union[base.Integer, base.String], user_id: base.Integer,
until_date: typing.Union[base.Integer, None] = None) -> base.Boolean:
"""
Use this method to kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return to the group
on their own using invite links, etc., unless unbanned first.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting
is off in the target group.
Otherwise members may only be removed by the group's creator or by the member that added them.
Source: https://core.telegram.org/bots/api#kickchatmember
:param chat_id: Unique identifier for the target group or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when the user will be unbanned, unix time
:type until_date: :obj:`typing.Union[base.Integer, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
until_date = prepare_arg(until_date)
payload = generate_payload(**locals())
result = await self.request(api.Methods.KICK_CHAT_MEMBER, payload)
return result | ['async', 'def', 'kick_chat_member', '(', 'self', ',', 'chat_id', ':', 'typing', '.', 'Union', '[', 'base', '.', 'Integer', ',', 'base', '.', 'String', ']', ',', 'user_id', ':', 'base', '.', 'Integer', ',', 'until_date', ':', 'typing', '.', 'Union', '[', 'base', '.', 'Integer', ',', 'None', ']', '=', 'None', ')', '->', 'base', '.', 'Boolean', ':', 'until_date', '=', 'prepare_arg', '(', 'until_date', ')', 'payload', '=', 'generate_payload', '(', '*', '*', 'locals', '(', ')', ')', 'result', '=', 'await', 'self', '.', 'request', '(', 'api', '.', 'Methods', '.', 'KICK_CHAT_MEMBER', ',', 'payload', ')', 'return', 'result'] | Use this method to kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return to the group
on their own using invite links, etc., unless unbanned first.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting
is off in the target group.
Otherwise members may only be removed by the group's creator or by the member that added them.
Source: https://core.telegram.org/bots/api#kickchatmember
:param chat_id: Unique identifier for the target group or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when the user will be unbanned, unix time
:type until_date: :obj:`typing.Union[base.Integer, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean` | ['Use', 'this', 'method', 'to', 'kick', 'a', 'user', 'from', 'a', 'group', 'a', 'supergroup', 'or', 'a', 'channel', '.', 'In', 'the', 'case', 'of', 'supergroups', 'and', 'channels', 'the', 'user', 'will', 'not', 'be', 'able', 'to', 'return', 'to', 'the', 'group', 'on', 'their', 'own', 'using', 'invite', 'links', 'etc', '.', 'unless', 'unbanned', 'first', '.'] | train | https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/bot.py#L952-L980 |
2,819 | apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | xmlDoc.newDtd | def newDtd(self, name, ExternalID, SystemID):
"""Creation of a new DTD for the external subset. To create an
internal subset, use xmlCreateIntSubset(). """
ret = libxml2mod.xmlNewDtd(self._o, name, ExternalID, SystemID)
if ret is None:raise treeError('xmlNewDtd() failed')
__tmp = xmlDtd(_obj=ret)
return __tmp | python | def newDtd(self, name, ExternalID, SystemID):
"""Creation of a new DTD for the external subset. To create an
internal subset, use xmlCreateIntSubset(). """
ret = libxml2mod.xmlNewDtd(self._o, name, ExternalID, SystemID)
if ret is None:raise treeError('xmlNewDtd() failed')
__tmp = xmlDtd(_obj=ret)
return __tmp | ['def', 'newDtd', '(', 'self', ',', 'name', ',', 'ExternalID', ',', 'SystemID', ')', ':', 'ret', '=', 'libxml2mod', '.', 'xmlNewDtd', '(', 'self', '.', '_o', ',', 'name', ',', 'ExternalID', ',', 'SystemID', ')', 'if', 'ret', 'is', 'None', ':', 'raise', 'treeError', '(', "'xmlNewDtd() failed'", ')', '__tmp', '=', 'xmlDtd', '(', '_obj', '=', 'ret', ')', 'return', '__tmp'] | Creation of a new DTD for the external subset. To create an
internal subset, use xmlCreateIntSubset(). | ['Creation', 'of', 'a', 'new', 'DTD', 'for', 'the', 'external', 'subset', '.', 'To', 'create', 'an', 'internal', 'subset', 'use', 'xmlCreateIntSubset', '()', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4395-L4401 |
2,820 | yunojuno/elasticsearch-django | elasticsearch_django/models.py | SearchDocumentMixin.index_search_document | def index_search_document(self, *, index):
"""
Create or replace search document in named index.
Checks the local cache to see if the document has changed,
and if not aborts the update, else pushes to ES, and then
resets the local cache. Cache timeout is set as "cache_expiry"
in the settings, and defaults to 60s.
"""
cache_key = self.search_document_cache_key
new_doc = self.as_search_document(index=index)
cached_doc = cache.get(cache_key)
if new_doc == cached_doc:
logger.debug("Search document for %r is unchanged, ignoring update.", self)
return []
cache.set(cache_key, new_doc, timeout=get_setting("cache_expiry", 60))
get_client().index(
index=index, doc_type=self.search_doc_type, body=new_doc, id=self.pk
) | python | def index_search_document(self, *, index):
"""
Create or replace search document in named index.
Checks the local cache to see if the document has changed,
and if not aborts the update, else pushes to ES, and then
resets the local cache. Cache timeout is set as "cache_expiry"
in the settings, and defaults to 60s.
"""
cache_key = self.search_document_cache_key
new_doc = self.as_search_document(index=index)
cached_doc = cache.get(cache_key)
if new_doc == cached_doc:
logger.debug("Search document for %r is unchanged, ignoring update.", self)
return []
cache.set(cache_key, new_doc, timeout=get_setting("cache_expiry", 60))
get_client().index(
index=index, doc_type=self.search_doc_type, body=new_doc, id=self.pk
) | ['def', 'index_search_document', '(', 'self', ',', '*', ',', 'index', ')', ':', 'cache_key', '=', 'self', '.', 'search_document_cache_key', 'new_doc', '=', 'self', '.', 'as_search_document', '(', 'index', '=', 'index', ')', 'cached_doc', '=', 'cache', '.', 'get', '(', 'cache_key', ')', 'if', 'new_doc', '==', 'cached_doc', ':', 'logger', '.', 'debug', '(', '"Search document for %r is unchanged, ignoring update."', ',', 'self', ')', 'return', '[', ']', 'cache', '.', 'set', '(', 'cache_key', ',', 'new_doc', ',', 'timeout', '=', 'get_setting', '(', '"cache_expiry"', ',', '60', ')', ')', 'get_client', '(', ')', '.', 'index', '(', 'index', '=', 'index', ',', 'doc_type', '=', 'self', '.', 'search_doc_type', ',', 'body', '=', 'new_doc', ',', 'id', '=', 'self', '.', 'pk', ')'] | Create or replace search document in named index.
Checks the local cache to see if the document has changed,
and if not aborts the update, else pushes to ES, and then
resets the local cache. Cache timeout is set as "cache_expiry"
in the settings, and defaults to 60s. | ['Create', 'or', 'replace', 'search', 'document', 'in', 'named', 'index', '.'] | train | https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/models.py#L350-L369 |
2,821 | moble/quaternion | quaternion_time_series.py | squad | def squad(R_in, t_in, t_out):
"""Spherical "quadrangular" interpolation of rotors with a cubic spline
This is the best way to interpolate rotations. It uses the analog
of a cubic spline, except that the interpolant is confined to the
rotor manifold in a natural way. Alternative methods involving
interpolation of other coordinates on the rotation group or
normalization of interpolated values give bad results. The
results from this method are as natural as any, and are continuous
in first and second derivatives.
The input `R_in` rotors are assumed to be reasonably continuous
(no sign flips), and the input `t` arrays are assumed to be
sorted. No checking is done for either case, and you may get
silently bad results if these conditions are violated.
This function simplifies the calling, compared to `squad_evaluate`
(which takes a set of four quaternions forming the edges of the
"quadrangle", and the normalized time `tau`) and `squad_vectorized`
(which takes the same arguments, but in array form, and efficiently
loops over them).
Parameters
----------
R_in: array of quaternions
A time-series of rotors (unit quaternions) to be interpolated
t_in: array of float
The times corresponding to R_in
t_out: array of float
The times to which R_in should be interpolated
"""
if R_in.size == 0 or t_out.size == 0:
return np.array((), dtype=np.quaternion)
# This list contains an index for each `t_out` such that
# t_in[i-1] <= t_out < t_in[i]
# Note that `side='right'` is much faster in my tests
# i_in_for_out = t_in.searchsorted(t_out, side='left')
# np.clip(i_in_for_out, 0, len(t_in) - 1, out=i_in_for_out)
i_in_for_out = t_in.searchsorted(t_out, side='right')-1
# Now, for each index `i` in `i_in`, we need to compute the
# interpolation "coefficients" (`A_i`, `B_ip1`).
#
# I previously tested an explicit version of the loops below,
# comparing `stride_tricks.as_strided` with explicit
# implementation via `roll` (as seen here). I found that the
# `roll` was significantly more efficient for simple calculations,
# though the difference is probably totally washed out here. In
# any case, it might be useful to test again.
#
A = R_in * np.exp((- np.log((~R_in) * np.roll(R_in, -1))
+ np.log((~np.roll(R_in, 1)) * R_in) * ((np.roll(t_in, -1) - t_in) / (t_in - np.roll(t_in, 1)))
) * 0.25)
B = np.roll(R_in, -1) * np.exp((np.log((~np.roll(R_in, -1)) * np.roll(R_in, -2))
* ((np.roll(t_in, -1) - t_in) / (np.roll(t_in, -2) - np.roll(t_in, -1)))
- np.log((~R_in) * np.roll(R_in, -1))) * -0.25)
# Correct the first and last A time steps, and last two B time steps. We extend R_in with the following wrap-around
# values:
# R_in[0-1] = R_in[0]*(~R_in[1])*R_in[0]
# R_in[n+0] = R_in[-1] * (~R_in[-2]) * R_in[-1]
# R_in[n+1] = R_in[0] * (~R_in[-1]) * R_in[0]
# = R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1]
# = R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1]
# A[i] = R_in[i] * np.exp((- np.log((~R_in[i]) * R_in[i+1])
# + np.log((~R_in[i-1]) * R_in[i]) * ((t_in[i+1] - t_in[i]) / (t_in[i] - t_in[i-1]))
# ) * 0.25)
# A[0] = R_in[0] * np.exp((- np.log((~R_in[0]) * R_in[1]) + np.log((~R_in[0])*R_in[1]*(~R_in[0])) * R_in[0]) * 0.25)
# = R_in[0]
A[0] = R_in[0]
# A[-1] = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[n+0])
# + np.log((~R_in[-2]) * R_in[-1]) * ((t_in[n+0] - t_in[-1]) / (t_in[-1] - t_in[-2]))
# ) * 0.25)
# = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[n+0]) + np.log((~R_in[-2]) * R_in[-1])) * 0.25)
# = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])
# + np.log((~R_in[-2]) * R_in[-1])) * 0.25)
# = R_in[-1] * np.exp((- np.log((~R_in[-2]) * R_in[-1]) + np.log((~R_in[-2]) * R_in[-1])) * 0.25)
# = R_in[-1]
A[-1] = R_in[-1]
# B[i] = R_in[i+1] * np.exp((np.log((~R_in[i+1]) * R_in[i+2]) * ((t_in[i+1] - t_in[i]) / (t_in[i+2] - t_in[i+1]))
# - np.log((~R_in[i]) * R_in[i+1])) * -0.25)
# B[-2] = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[0]) * ((t_in[-1] - t_in[-2]) / (t_in[0] - t_in[-1]))
# - np.log((~R_in[-2]) * R_in[-1])) * -0.25)
# = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[0]) - np.log((~R_in[-2]) * R_in[-1])) * -0.25)
# = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])
# - np.log((~R_in[-2]) * R_in[-1])) * -0.25)
# = R_in[-1] * np.exp((np.log((~R_in[-2]) * R_in[-1]) - np.log((~R_in[-2]) * R_in[-1])) * -0.25)
# = R_in[-1]
B[-2] = R_in[-1]
# B[-1] = R_in[0]
# B[-1] = R_in[0] * np.exp((np.log((~R_in[0]) * R_in[1]) - np.log((~R_in[-1]) * R_in[0])) * -0.25)
# = R_in[-1] * (~R_in[-2]) * R_in[-1]
# * np.exp((np.log((~(R_in[-1] * (~R_in[-2]) * R_in[-1])) * R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1])
# - np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])) * -0.25)
# = R_in[-1] * (~R_in[-2]) * R_in[-1]
# * np.exp((np.log(((~R_in[-1]) * R_in[-2] * (~R_in[-1])) * R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1])
# - np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])) * -0.25)
# * np.exp((np.log((~R_in[-2]) * R_in[-1])
# - np.log((~R_in[-2]) * R_in[-1])) * -0.25)
B[-1] = R_in[-1] * (~R_in[-2]) * R_in[-1]
# Use the coefficients at the corresponding t_out indices to
# compute the squad interpolant
# R_ip1 = np.array(np.roll(R_in, -1)[i_in_for_out])
# R_ip1[-1] = R_in[-1]*(~R_in[-2])*R_in[-1]
R_ip1 = np.roll(R_in, -1)
R_ip1[-1] = R_in[-1]*(~R_in[-2])*R_in[-1]
R_ip1 = np.array(R_ip1[i_in_for_out])
t_inp1 = np.roll(t_in, -1)
t_inp1[-1] = t_in[-1] + (t_in[-1] - t_in[-2])
tau = (t_out - t_in[i_in_for_out]) / ((t_inp1 - t_in)[i_in_for_out])
# tau = (t_out - t_in[i_in_for_out]) / ((np.roll(t_in, -1) - t_in)[i_in_for_out])
R_out = np.squad_vectorized(tau, R_in[i_in_for_out], A[i_in_for_out], B[i_in_for_out], R_ip1)
return R_out | python | def squad(R_in, t_in, t_out):
"""Spherical "quadrangular" interpolation of rotors with a cubic spline
This is the best way to interpolate rotations. It uses the analog
of a cubic spline, except that the interpolant is confined to the
rotor manifold in a natural way. Alternative methods involving
interpolation of other coordinates on the rotation group or
normalization of interpolated values give bad results. The
results from this method are as natural as any, and are continuous
in first and second derivatives.
The input `R_in` rotors are assumed to be reasonably continuous
(no sign flips), and the input `t` arrays are assumed to be
sorted. No checking is done for either case, and you may get
silently bad results if these conditions are violated.
This function simplifies the calling, compared to `squad_evaluate`
(which takes a set of four quaternions forming the edges of the
"quadrangle", and the normalized time `tau`) and `squad_vectorized`
(which takes the same arguments, but in array form, and efficiently
loops over them).
Parameters
----------
R_in: array of quaternions
A time-series of rotors (unit quaternions) to be interpolated
t_in: array of float
The times corresponding to R_in
t_out: array of float
The times to which R_in should be interpolated
"""
if R_in.size == 0 or t_out.size == 0:
return np.array((), dtype=np.quaternion)
# This list contains an index for each `t_out` such that
# t_in[i-1] <= t_out < t_in[i]
# Note that `side='right'` is much faster in my tests
# i_in_for_out = t_in.searchsorted(t_out, side='left')
# np.clip(i_in_for_out, 0, len(t_in) - 1, out=i_in_for_out)
i_in_for_out = t_in.searchsorted(t_out, side='right')-1
# Now, for each index `i` in `i_in`, we need to compute the
# interpolation "coefficients" (`A_i`, `B_ip1`).
#
# I previously tested an explicit version of the loops below,
# comparing `stride_tricks.as_strided` with explicit
# implementation via `roll` (as seen here). I found that the
# `roll` was significantly more efficient for simple calculations,
# though the difference is probably totally washed out here. In
# any case, it might be useful to test again.
#
A = R_in * np.exp((- np.log((~R_in) * np.roll(R_in, -1))
+ np.log((~np.roll(R_in, 1)) * R_in) * ((np.roll(t_in, -1) - t_in) / (t_in - np.roll(t_in, 1)))
) * 0.25)
B = np.roll(R_in, -1) * np.exp((np.log((~np.roll(R_in, -1)) * np.roll(R_in, -2))
* ((np.roll(t_in, -1) - t_in) / (np.roll(t_in, -2) - np.roll(t_in, -1)))
- np.log((~R_in) * np.roll(R_in, -1))) * -0.25)
# Correct the first and last A time steps, and last two B time steps. We extend R_in with the following wrap-around
# values:
# R_in[0-1] = R_in[0]*(~R_in[1])*R_in[0]
# R_in[n+0] = R_in[-1] * (~R_in[-2]) * R_in[-1]
# R_in[n+1] = R_in[0] * (~R_in[-1]) * R_in[0]
# = R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1]
# = R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1]
# A[i] = R_in[i] * np.exp((- np.log((~R_in[i]) * R_in[i+1])
# + np.log((~R_in[i-1]) * R_in[i]) * ((t_in[i+1] - t_in[i]) / (t_in[i] - t_in[i-1]))
# ) * 0.25)
# A[0] = R_in[0] * np.exp((- np.log((~R_in[0]) * R_in[1]) + np.log((~R_in[0])*R_in[1]*(~R_in[0])) * R_in[0]) * 0.25)
# = R_in[0]
A[0] = R_in[0]
# A[-1] = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[n+0])
# + np.log((~R_in[-2]) * R_in[-1]) * ((t_in[n+0] - t_in[-1]) / (t_in[-1] - t_in[-2]))
# ) * 0.25)
# = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[n+0]) + np.log((~R_in[-2]) * R_in[-1])) * 0.25)
# = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])
# + np.log((~R_in[-2]) * R_in[-1])) * 0.25)
# = R_in[-1] * np.exp((- np.log((~R_in[-2]) * R_in[-1]) + np.log((~R_in[-2]) * R_in[-1])) * 0.25)
# = R_in[-1]
A[-1] = R_in[-1]
# B[i] = R_in[i+1] * np.exp((np.log((~R_in[i+1]) * R_in[i+2]) * ((t_in[i+1] - t_in[i]) / (t_in[i+2] - t_in[i+1]))
# - np.log((~R_in[i]) * R_in[i+1])) * -0.25)
# B[-2] = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[0]) * ((t_in[-1] - t_in[-2]) / (t_in[0] - t_in[-1]))
# - np.log((~R_in[-2]) * R_in[-1])) * -0.25)
# = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[0]) - np.log((~R_in[-2]) * R_in[-1])) * -0.25)
# = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])
# - np.log((~R_in[-2]) * R_in[-1])) * -0.25)
# = R_in[-1] * np.exp((np.log((~R_in[-2]) * R_in[-1]) - np.log((~R_in[-2]) * R_in[-1])) * -0.25)
# = R_in[-1]
B[-2] = R_in[-1]
# B[-1] = R_in[0]
# B[-1] = R_in[0] * np.exp((np.log((~R_in[0]) * R_in[1]) - np.log((~R_in[-1]) * R_in[0])) * -0.25)
# = R_in[-1] * (~R_in[-2]) * R_in[-1]
# * np.exp((np.log((~(R_in[-1] * (~R_in[-2]) * R_in[-1])) * R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1])
# - np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])) * -0.25)
# = R_in[-1] * (~R_in[-2]) * R_in[-1]
# * np.exp((np.log(((~R_in[-1]) * R_in[-2] * (~R_in[-1])) * R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1])
# - np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])) * -0.25)
# * np.exp((np.log((~R_in[-2]) * R_in[-1])
# - np.log((~R_in[-2]) * R_in[-1])) * -0.25)
B[-1] = R_in[-1] * (~R_in[-2]) * R_in[-1]
# Use the coefficients at the corresponding t_out indices to
# compute the squad interpolant
# R_ip1 = np.array(np.roll(R_in, -1)[i_in_for_out])
# R_ip1[-1] = R_in[-1]*(~R_in[-2])*R_in[-1]
R_ip1 = np.roll(R_in, -1)
R_ip1[-1] = R_in[-1]*(~R_in[-2])*R_in[-1]
R_ip1 = np.array(R_ip1[i_in_for_out])
t_inp1 = np.roll(t_in, -1)
t_inp1[-1] = t_in[-1] + (t_in[-1] - t_in[-2])
tau = (t_out - t_in[i_in_for_out]) / ((t_inp1 - t_in)[i_in_for_out])
# tau = (t_out - t_in[i_in_for_out]) / ((np.roll(t_in, -1) - t_in)[i_in_for_out])
R_out = np.squad_vectorized(tau, R_in[i_in_for_out], A[i_in_for_out], B[i_in_for_out], R_ip1)
return R_out | ['def', 'squad', '(', 'R_in', ',', 't_in', ',', 't_out', ')', ':', 'if', 'R_in', '.', 'size', '==', '0', 'or', 't_out', '.', 'size', '==', '0', ':', 'return', 'np', '.', 'array', '(', '(', ')', ',', 'dtype', '=', 'np', '.', 'quaternion', ')', '# This list contains an index for each `t_out` such that', '# t_in[i-1] <= t_out < t_in[i]', "# Note that `side='right'` is much faster in my tests", "# i_in_for_out = t_in.searchsorted(t_out, side='left')", '# np.clip(i_in_for_out, 0, len(t_in) - 1, out=i_in_for_out)', 'i_in_for_out', '=', 't_in', '.', 'searchsorted', '(', 't_out', ',', 'side', '=', "'right'", ')', '-', '1', '# Now, for each index `i` in `i_in`, we need to compute the', '# interpolation "coefficients" (`A_i`, `B_ip1`).', '#', '# I previously tested an explicit version of the loops below,', '# comparing `stride_tricks.as_strided` with explicit', '# implementation via `roll` (as seen here). I found that the', '# `roll` was significantly more efficient for simple calculations,', '# though the difference is probably totally washed out here. In', '# any case, it might be useful to test again.', '#', 'A', '=', 'R_in', '*', 'np', '.', 'exp', '(', '(', '-', 'np', '.', 'log', '(', '(', '~', 'R_in', ')', '*', 'np', '.', 'roll', '(', 'R_in', ',', '-', '1', ')', ')', '+', 'np', '.', 'log', '(', '(', '~', 'np', '.', 'roll', '(', 'R_in', ',', '1', ')', ')', '*', 'R_in', ')', '*', '(', '(', 'np', '.', 'roll', '(', 't_in', ',', '-', '1', ')', '-', 't_in', ')', '/', '(', 't_in', '-', 'np', '.', 'roll', '(', 't_in', ',', '1', ')', ')', ')', ')', '*', '0.25', ')', 'B', '=', 'np', '.', 'roll', '(', 'R_in', ',', '-', '1', ')', '*', 'np', '.', 'exp', '(', '(', 'np', '.', 'log', '(', '(', '~', 'np', '.', 'roll', '(', 'R_in', ',', '-', '1', ')', ')', '*', 'np', '.', 'roll', '(', 'R_in', ',', '-', '2', ')', ')', '*', '(', '(', 'np', '.', 'roll', '(', 't_in', ',', '-', '1', ')', '-', 't_in', ')', '/', '(', 'np', '.', 'roll', '(', 't_in', ',', '-', '2', ')', '-', 'np', '.', 'roll', '(', 't_in', ',', '-', '1', ')', ')', ')', '-', 'np', '.', 'log', '(', '(', '~', 'R_in', ')', '*', 'np', '.', 'roll', '(', 'R_in', ',', '-', '1', ')', ')', ')', '*', '-', '0.25', ')', '# Correct the first and last A time steps, and last two B time steps. We extend R_in with the following wrap-around', '# values:', '# R_in[0-1] = R_in[0]*(~R_in[1])*R_in[0]', '# R_in[n+0] = R_in[-1] * (~R_in[-2]) * R_in[-1]', '# R_in[n+1] = R_in[0] * (~R_in[-1]) * R_in[0]', '# = R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1]', '# = R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1]', '# A[i] = R_in[i] * np.exp((- np.log((~R_in[i]) * R_in[i+1])', '# + np.log((~R_in[i-1]) * R_in[i]) * ((t_in[i+1] - t_in[i]) / (t_in[i] - t_in[i-1]))', '# ) * 0.25)', '# A[0] = R_in[0] * np.exp((- np.log((~R_in[0]) * R_in[1]) + np.log((~R_in[0])*R_in[1]*(~R_in[0])) * R_in[0]) * 0.25)', '# = R_in[0]', 'A', '[', '0', ']', '=', 'R_in', '[', '0', ']', '# A[-1] = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[n+0])', '# + np.log((~R_in[-2]) * R_in[-1]) * ((t_in[n+0] - t_in[-1]) / (t_in[-1] - t_in[-2]))', '# ) * 0.25)', '# = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[n+0]) + np.log((~R_in[-2]) * R_in[-1])) * 0.25)', '# = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])', '# + np.log((~R_in[-2]) * R_in[-1])) * 0.25)', '# = R_in[-1] * np.exp((- np.log((~R_in[-2]) * R_in[-1]) + np.log((~R_in[-2]) * R_in[-1])) * 0.25)', '# = R_in[-1]', 'A', '[', '-', '1', ']', '=', 'R_in', '[', '-', '1', ']', '# B[i] = R_in[i+1] * np.exp((np.log((~R_in[i+1]) * R_in[i+2]) * ((t_in[i+1] - t_in[i]) / (t_in[i+2] - t_in[i+1]))', '# - np.log((~R_in[i]) * R_in[i+1])) * -0.25)', '# B[-2] = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[0]) * ((t_in[-1] - t_in[-2]) / (t_in[0] - t_in[-1]))', '# - np.log((~R_in[-2]) * R_in[-1])) * -0.25)', '# = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[0]) - np.log((~R_in[-2]) * R_in[-1])) * -0.25)', '# = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])', '# - np.log((~R_in[-2]) * R_in[-1])) * -0.25)', '# = R_in[-1] * np.exp((np.log((~R_in[-2]) * R_in[-1]) - np.log((~R_in[-2]) * R_in[-1])) * -0.25)', '# = R_in[-1]', 'B', '[', '-', '2', ']', '=', 'R_in', '[', '-', '1', ']', '# B[-1] = R_in[0]', '# B[-1] = R_in[0] * np.exp((np.log((~R_in[0]) * R_in[1]) - np.log((~R_in[-1]) * R_in[0])) * -0.25)', '# = R_in[-1] * (~R_in[-2]) * R_in[-1]', '# * np.exp((np.log((~(R_in[-1] * (~R_in[-2]) * R_in[-1])) * R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1])', '# - np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])) * -0.25)', '# = R_in[-1] * (~R_in[-2]) * R_in[-1]', '# * np.exp((np.log(((~R_in[-1]) * R_in[-2] * (~R_in[-1])) * R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1])', '# - np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])) * -0.25)', '# * np.exp((np.log((~R_in[-2]) * R_in[-1])', '# - np.log((~R_in[-2]) * R_in[-1])) * -0.25)', 'B', '[', '-', '1', ']', '=', 'R_in', '[', '-', '1', ']', '*', '(', '~', 'R_in', '[', '-', '2', ']', ')', '*', 'R_in', '[', '-', '1', ']', '# Use the coefficients at the corresponding t_out indices to', '# compute the squad interpolant', '# R_ip1 = np.array(np.roll(R_in, -1)[i_in_for_out])', '# R_ip1[-1] = R_in[-1]*(~R_in[-2])*R_in[-1]', 'R_ip1', '=', 'np', '.', 'roll', '(', 'R_in', ',', '-', '1', ')', 'R_ip1', '[', '-', '1', ']', '=', 'R_in', '[', '-', '1', ']', '*', '(', '~', 'R_in', '[', '-', '2', ']', ')', '*', 'R_in', '[', '-', '1', ']', 'R_ip1', '=', 'np', '.', 'array', '(', 'R_ip1', '[', 'i_in_for_out', ']', ')', 't_inp1', '=', 'np', '.', 'roll', '(', 't_in', ',', '-', '1', ')', 't_inp1', '[', '-', '1', ']', '=', 't_in', '[', '-', '1', ']', '+', '(', 't_in', '[', '-', '1', ']', '-', 't_in', '[', '-', '2', ']', ')', 'tau', '=', '(', 't_out', '-', 't_in', '[', 'i_in_for_out', ']', ')', '/', '(', '(', 't_inp1', '-', 't_in', ')', '[', 'i_in_for_out', ']', ')', '# tau = (t_out - t_in[i_in_for_out]) / ((np.roll(t_in, -1) - t_in)[i_in_for_out])', 'R_out', '=', 'np', '.', 'squad_vectorized', '(', 'tau', ',', 'R_in', '[', 'i_in_for_out', ']', ',', 'A', '[', 'i_in_for_out', ']', ',', 'B', '[', 'i_in_for_out', ']', ',', 'R_ip1', ')', 'return', 'R_out'] | Spherical "quadrangular" interpolation of rotors with a cubic spline
This is the best way to interpolate rotations. It uses the analog
of a cubic spline, except that the interpolant is confined to the
rotor manifold in a natural way. Alternative methods involving
interpolation of other coordinates on the rotation group or
normalization of interpolated values give bad results. The
results from this method are as natural as any, and are continuous
in first and second derivatives.
The input `R_in` rotors are assumed to be reasonably continuous
(no sign flips), and the input `t` arrays are assumed to be
sorted. No checking is done for either case, and you may get
silently bad results if these conditions are violated.
This function simplifies the calling, compared to `squad_evaluate`
(which takes a set of four quaternions forming the edges of the
"quadrangle", and the normalized time `tau`) and `squad_vectorized`
(which takes the same arguments, but in array form, and efficiently
loops over them).
Parameters
----------
R_in: array of quaternions
A time-series of rotors (unit quaternions) to be interpolated
t_in: array of float
The times corresponding to R_in
t_out: array of float
The times to which R_in should be interpolated | ['Spherical', 'quadrangular', 'interpolation', 'of', 'rotors', 'with', 'a', 'cubic', 'spline'] | train | https://github.com/moble/quaternion/blob/7a323e81b391d6892e2874073e495e0beb057e85/quaternion_time_series.py#L38-L154 |
2,822 | mfcloud/python-zvm-sdk | smtLayer/getVM.py | getConsole | def getConsole(rh):
"""
Get the virtual machine's console output.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.getConsole")
# Transfer the console to this virtual machine.
parms = ["-T", rh.userid]
results = invokeSMCLI(rh, "Image_Console_Get", parms)
if results['overallRC'] != 0:
if (results['overallRC'] == 8 and results['rc'] == 8 and
results['rs'] == 8):
# Give a more specific message. Userid is either
# not logged on or not spooling their console.
msg = msgs.msg['0409'][1] % (modId, rh.userid)
else:
msg = results['response']
rh.updateResults(results) # Use results from invokeSMCLI
rh.printLn("ES", msg)
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Check whether the reader is online
with open('/sys/bus/ccw/drivers/vmur/0.0.000c/online', 'r') as myfile:
out = myfile.read().replace('\n', '')
myfile.close()
# Nope, offline, error out and exit
if int(out) != 1:
msg = msgs.msg['0411'][1]
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0411'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# We should set class to *, otherwise we will get errors like:
# vmur: Reader device class does not match spool file class
cmd = ["sudo", "/sbin/vmcp", "spool reader class *"]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# If we couldn't change the class, that's not fatal
# But we want to warn about possibly incomplete
# results
msg = msgs.msg['0407'][1] % (modId, strCmd, e.output)
rh.printLn("WS", msg)
except Exception as e:
# All other exceptions.
# If we couldn't change the class, that's not fatal
# But we want to warn about possibly incomplete
# results
rh.printLn("ES", msgs.msg['0422'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.printLn("ES", msgs.msg['0423'][1] % modId, strCmd,
type(e).__name__, str(e))
# List the spool files in the reader
cmd = ["sudo", "/usr/sbin/vmur", "list"]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
files = subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# Uh oh, vmur list command failed for some reason
msg = msgs.msg['0408'][1] % (modId, rh.userid,
strCmd, e.output)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0408'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.updateResults(msgs.msg['0421'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Now for each line that contains our user and is a
# class T console file, add the spool id to our list
spoolFiles = files.split('\n')
outstr = ""
for myfile in spoolFiles:
if (myfile != "" and
myfile.split()[0] == rh.userid and
myfile.split()[2] == "T" and
myfile.split()[3] == "CON"):
fileId = myfile.split()[1]
outstr += fileId + " "
# No files in our list
if outstr == "":
msg = msgs.msg['0410'][1] % (modId, rh.userid)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0410'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Output the list
rh.printLn("N", "List of spool files containing "
"console logs from %s: %s" % (rh.userid, outstr))
rh.results['overallRC'] = 0
rh.printSysLog("Exit getVM.getConsole, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | python | def getConsole(rh):
"""
Get the virtual machine's console output.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.getConsole")
# Transfer the console to this virtual machine.
parms = ["-T", rh.userid]
results = invokeSMCLI(rh, "Image_Console_Get", parms)
if results['overallRC'] != 0:
if (results['overallRC'] == 8 and results['rc'] == 8 and
results['rs'] == 8):
# Give a more specific message. Userid is either
# not logged on or not spooling their console.
msg = msgs.msg['0409'][1] % (modId, rh.userid)
else:
msg = results['response']
rh.updateResults(results) # Use results from invokeSMCLI
rh.printLn("ES", msg)
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Check whether the reader is online
with open('/sys/bus/ccw/drivers/vmur/0.0.000c/online', 'r') as myfile:
out = myfile.read().replace('\n', '')
myfile.close()
# Nope, offline, error out and exit
if int(out) != 1:
msg = msgs.msg['0411'][1]
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0411'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# We should set class to *, otherwise we will get errors like:
# vmur: Reader device class does not match spool file class
cmd = ["sudo", "/sbin/vmcp", "spool reader class *"]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# If we couldn't change the class, that's not fatal
# But we want to warn about possibly incomplete
# results
msg = msgs.msg['0407'][1] % (modId, strCmd, e.output)
rh.printLn("WS", msg)
except Exception as e:
# All other exceptions.
# If we couldn't change the class, that's not fatal
# But we want to warn about possibly incomplete
# results
rh.printLn("ES", msgs.msg['0422'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.printLn("ES", msgs.msg['0423'][1] % modId, strCmd,
type(e).__name__, str(e))
# List the spool files in the reader
cmd = ["sudo", "/usr/sbin/vmur", "list"]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
files = subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# Uh oh, vmur list command failed for some reason
msg = msgs.msg['0408'][1] % (modId, rh.userid,
strCmd, e.output)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0408'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.updateResults(msgs.msg['0421'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Now for each line that contains our user and is a
# class T console file, add the spool id to our list
spoolFiles = files.split('\n')
outstr = ""
for myfile in spoolFiles:
if (myfile != "" and
myfile.split()[0] == rh.userid and
myfile.split()[2] == "T" and
myfile.split()[3] == "CON"):
fileId = myfile.split()[1]
outstr += fileId + " "
# No files in our list
if outstr == "":
msg = msgs.msg['0410'][1] % (modId, rh.userid)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0410'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Output the list
rh.printLn("N", "List of spool files containing "
"console logs from %s: %s" % (rh.userid, outstr))
rh.results['overallRC'] = 0
rh.printSysLog("Exit getVM.getConsole, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | ['def', 'getConsole', '(', 'rh', ')', ':', 'rh', '.', 'printSysLog', '(', '"Enter getVM.getConsole"', ')', '# Transfer the console to this virtual machine.', 'parms', '=', '[', '"-T"', ',', 'rh', '.', 'userid', ']', 'results', '=', 'invokeSMCLI', '(', 'rh', ',', '"Image_Console_Get"', ',', 'parms', ')', 'if', 'results', '[', "'overallRC'", ']', '!=', '0', ':', 'if', '(', 'results', '[', "'overallRC'", ']', '==', '8', 'and', 'results', '[', "'rc'", ']', '==', '8', 'and', 'results', '[', "'rs'", ']', '==', '8', ')', ':', '# Give a more specific message. Userid is either', '# not logged on or not spooling their console.', 'msg', '=', 'msgs', '.', 'msg', '[', "'0409'", ']', '[', '1', ']', '%', '(', 'modId', ',', 'rh', '.', 'userid', ')', 'else', ':', 'msg', '=', 'results', '[', "'response'", ']', 'rh', '.', 'updateResults', '(', 'results', ')', '# Use results from invokeSMCLI', 'rh', '.', 'printLn', '(', '"ES"', ',', 'msg', ')', 'rh', '.', 'printSysLog', '(', '"Exit getVM.parseCmdLine, rc: "', '+', 'str', '(', 'rh', '.', 'results', '[', "'overallRC'", ']', ')', ')', 'return', 'rh', '.', 'results', '[', "'overallRC'", ']', '# Check whether the reader is online', 'with', 'open', '(', "'/sys/bus/ccw/drivers/vmur/0.0.000c/online'", ',', "'r'", ')', 'as', 'myfile', ':', 'out', '=', 'myfile', '.', 'read', '(', ')', '.', 'replace', '(', "'\\n'", ',', "''", ')', 'myfile', '.', 'close', '(', ')', '# Nope, offline, error out and exit', 'if', 'int', '(', 'out', ')', '!=', '1', ':', 'msg', '=', 'msgs', '.', 'msg', '[', "'0411'", ']', '[', '1', ']', 'rh', '.', 'printLn', '(', '"ES"', ',', 'msg', ')', 'rh', '.', 'updateResults', '(', 'msgs', '.', 'msg', '[', "'0411'", ']', '[', '0', ']', ')', 'rh', '.', 'printSysLog', '(', '"Exit getVM.parseCmdLine, rc: "', '+', 'str', '(', 'rh', '.', 'results', '[', "'overallRC'", ']', ')', ')', 'return', 'rh', '.', 'results', '[', "'overallRC'", ']', '# We should set class to *, otherwise we will get errors like:', '# vmur: Reader device class does not match spool file class', 'cmd', '=', '[', '"sudo"', ',', '"/sbin/vmcp"', ',', '"spool reader class *"', ']', 'strCmd', '=', "' '", '.', 'join', '(', 'cmd', ')', 'rh', '.', 'printSysLog', '(', '"Invoking: "', '+', 'strCmd', ')', 'try', ':', 'subprocess', '.', 'check_output', '(', 'cmd', ',', 'close_fds', '=', 'True', ',', 'stderr', '=', 'subprocess', '.', 'STDOUT', ')', 'except', 'subprocess', '.', 'CalledProcessError', 'as', 'e', ':', "# If we couldn't change the class, that's not fatal", '# But we want to warn about possibly incomplete', '# results', 'msg', '=', 'msgs', '.', 'msg', '[', "'0407'", ']', '[', '1', ']', '%', '(', 'modId', ',', 'strCmd', ',', 'e', '.', 'output', ')', 'rh', '.', 'printLn', '(', '"WS"', ',', 'msg', ')', 'except', 'Exception', 'as', 'e', ':', '# All other exceptions.', "# If we couldn't change the class, that's not fatal", '# But we want to warn about possibly incomplete', '# results', 'rh', '.', 'printLn', '(', '"ES"', ',', 'msgs', '.', 'msg', '[', "'0422'", ']', '[', '1', ']', '%', '(', 'modId', ',', 'strCmd', ',', 'type', '(', 'e', ')', '.', '__name__', ',', 'str', '(', 'e', ')', ')', ')', 'rh', '.', 'printLn', '(', '"ES"', ',', 'msgs', '.', 'msg', '[', "'0423'", ']', '[', '1', ']', '%', 'modId', ',', 'strCmd', ',', 'type', '(', 'e', ')', '.', '__name__', ',', 'str', '(', 'e', ')', ')', '# List the spool files in the reader', 'cmd', '=', '[', '"sudo"', ',', '"/usr/sbin/vmur"', ',', '"list"', ']', 'strCmd', '=', "' '", '.', 'join', '(', 'cmd', ')', 'rh', '.', 'printSysLog', '(', '"Invoking: "', '+', 'strCmd', ')', 'try', ':', 'files', '=', 'subprocess', '.', 'check_output', '(', 'cmd', ',', 'close_fds', '=', 'True', ',', 'stderr', '=', 'subprocess', '.', 'STDOUT', ')', 'except', 'subprocess', '.', 'CalledProcessError', 'as', 'e', ':', '# Uh oh, vmur list command failed for some reason', 'msg', '=', 'msgs', '.', 'msg', '[', "'0408'", ']', '[', '1', ']', '%', '(', 'modId', ',', 'rh', '.', 'userid', ',', 'strCmd', ',', 'e', '.', 'output', ')', 'rh', '.', 'printLn', '(', '"ES"', ',', 'msg', ')', 'rh', '.', 'updateResults', '(', 'msgs', '.', 'msg', '[', "'0408'", ']', '[', '0', ']', ')', 'rh', '.', 'printSysLog', '(', '"Exit getVM.parseCmdLine, rc: "', '+', 'str', '(', 'rh', '.', 'results', '[', "'overallRC'", ']', ')', ')', 'return', 'rh', '.', 'results', '[', "'overallRC'", ']', 'except', 'Exception', 'as', 'e', ':', '# All other exceptions.', 'rh', '.', 'printLn', '(', '"ES"', ',', 'msgs', '.', 'msg', '[', "'0421'", ']', '[', '1', ']', '%', '(', 'modId', ',', 'strCmd', ',', 'type', '(', 'e', ')', '.', '__name__', ',', 'str', '(', 'e', ')', ')', ')', 'rh', '.', 'updateResults', '(', 'msgs', '.', 'msg', '[', "'0421'", ']', '[', '0', ']', ')', 'rh', '.', 'printSysLog', '(', '"Exit getVM.parseCmdLine, rc: "', '+', 'str', '(', 'rh', '.', 'results', '[', "'overallRC'", ']', ')', ')', 'return', 'rh', '.', 'results', '[', "'overallRC'", ']', '# Now for each line that contains our user and is a', '# class T console file, add the spool id to our list', 'spoolFiles', '=', 'files', '.', 'split', '(', "'\\n'", ')', 'outstr', '=', '""', 'for', 'myfile', 'in', 'spoolFiles', ':', 'if', '(', 'myfile', '!=', '""', 'and', 'myfile', '.', 'split', '(', ')', '[', '0', ']', '==', 'rh', '.', 'userid', 'and', 'myfile', '.', 'split', '(', ')', '[', '2', ']', '==', '"T"', 'and', 'myfile', '.', 'split', '(', ')', '[', '3', ']', '==', '"CON"', ')', ':', 'fileId', '=', 'myfile', '.', 'split', '(', ')', '[', '1', ']', 'outstr', '+=', 'fileId', '+', '" "', '# No files in our list', 'if', 'outstr', '==', '""', ':', 'msg', '=', 'msgs', '.', 'msg', '[', "'0410'", ']', '[', '1', ']', '%', '(', 'modId', ',', 'rh', '.', 'userid', ')', 'rh', '.', 'printLn', '(', '"ES"', ',', 'msg', ')', 'rh', '.', 'updateResults', '(', 'msgs', '.', 'msg', '[', "'0410'", ']', '[', '0', ']', ')', 'rh', '.', 'printSysLog', '(', '"Exit getVM.parseCmdLine, rc: "', '+', 'str', '(', 'rh', '.', 'results', '[', "'overallRC'", ']', ')', ')', 'return', 'rh', '.', 'results', '[', "'overallRC'", ']', '# Output the list', 'rh', '.', 'printLn', '(', '"N"', ',', '"List of spool files containing "', '"console logs from %s: %s"', '%', '(', 'rh', '.', 'userid', ',', 'outstr', ')', ')', 'rh', '.', 'results', '[', "'overallRC'", ']', '=', '0', 'rh', '.', 'printSysLog', '(', '"Exit getVM.getConsole, rc: "', '+', 'str', '(', 'rh', '.', 'results', '[', "'overallRC'", ']', ')', ')', 'return', 'rh', '.', 'results', '[', "'overallRC'", ']'] | Get the virtual machine's console output.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error | ['Get', 'the', 'virtual', 'machine', 's', 'console', 'output', '.'] | train | https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/getVM.py#L152-L283 |
2,823 | gristlabs/asttokens | asttokens/line_numbers.py | LineNumbers.offset_to_line | def offset_to_line(self, offset):
"""
Converts 0-based character offset to pair (line, col) of 1-based line and 0-based column
numbers.
"""
offset = max(0, min(self._text_len, offset))
line_index = bisect.bisect_right(self._line_offsets, offset) - 1
return (line_index + 1, offset - self._line_offsets[line_index]) | python | def offset_to_line(self, offset):
"""
Converts 0-based character offset to pair (line, col) of 1-based line and 0-based column
numbers.
"""
offset = max(0, min(self._text_len, offset))
line_index = bisect.bisect_right(self._line_offsets, offset) - 1
return (line_index + 1, offset - self._line_offsets[line_index]) | ['def', 'offset_to_line', '(', 'self', ',', 'offset', ')', ':', 'offset', '=', 'max', '(', '0', ',', 'min', '(', 'self', '.', '_text_len', ',', 'offset', ')', ')', 'line_index', '=', 'bisect', '.', 'bisect_right', '(', 'self', '.', '_line_offsets', ',', 'offset', ')', '-', '1', 'return', '(', 'line_index', '+', '1', ',', 'offset', '-', 'self', '.', '_line_offsets', '[', 'line_index', ']', ')'] | Converts 0-based character offset to pair (line, col) of 1-based line and 0-based column
numbers. | ['Converts', '0', '-', 'based', 'character', 'offset', 'to', 'pair', '(', 'line', 'col', ')', 'of', '1', '-', 'based', 'line', 'and', '0', '-', 'based', 'column', 'numbers', '.'] | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/line_numbers.py#L62-L69 |
2,824 | keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.set_display | def set_display(self, brightness=100, brightness_mode="auto"):
"""
allows to modify display state (change brightness)
:param int brightness: display brightness [0, 100] (default: 100)
:param str brightness_mode: the brightness mode of the display
[auto, manual] (default: auto)
"""
assert(brightness_mode in ("auto", "manual"))
assert(brightness in range(101))
log.debug("setting display information...")
cmd, url = DEVICE_URLS["set_display"]
json_data = {
"brightness_mode": brightness_mode,
"brightness": brightness
}
return self._exec(cmd, url, json_data=json_data) | python | def set_display(self, brightness=100, brightness_mode="auto"):
"""
allows to modify display state (change brightness)
:param int brightness: display brightness [0, 100] (default: 100)
:param str brightness_mode: the brightness mode of the display
[auto, manual] (default: auto)
"""
assert(brightness_mode in ("auto", "manual"))
assert(brightness in range(101))
log.debug("setting display information...")
cmd, url = DEVICE_URLS["set_display"]
json_data = {
"brightness_mode": brightness_mode,
"brightness": brightness
}
return self._exec(cmd, url, json_data=json_data) | ['def', 'set_display', '(', 'self', ',', 'brightness', '=', '100', ',', 'brightness_mode', '=', '"auto"', ')', ':', 'assert', '(', 'brightness_mode', 'in', '(', '"auto"', ',', '"manual"', ')', ')', 'assert', '(', 'brightness', 'in', 'range', '(', '101', ')', ')', 'log', '.', 'debug', '(', '"setting display information..."', ')', 'cmd', ',', 'url', '=', 'DEVICE_URLS', '[', '"set_display"', ']', 'json_data', '=', '{', '"brightness_mode"', ':', 'brightness_mode', ',', '"brightness"', ':', 'brightness', '}', 'return', 'self', '.', '_exec', '(', 'cmd', ',', 'url', ',', 'json_data', '=', 'json_data', ')'] | allows to modify display state (change brightness)
:param int brightness: display brightness [0, 100] (default: 100)
:param str brightness_mode: the brightness mode of the display
[auto, manual] (default: auto) | ['allows', 'to', 'modify', 'display', 'state', '(', 'change', 'brightness', ')'] | train | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L350-L369 |
2,825 | pantsbuild/pants | src/python/pants/backend/jvm/tasks/jar_publish.py | JarPublish.create_doc_jar | def create_doc_jar(self, target, open_jar, version):
"""Returns a doc jar if either scala or java docs are available for the given target."""
javadoc = self._java_doc(target)
scaladoc = self._scala_doc(target)
if javadoc or scaladoc:
jar_path = self.artifact_path(open_jar, version, suffix='-javadoc')
with self.open_jar(jar_path, overwrite=True, compressed=True) as open_jar:
def add_docs(docs):
if docs:
for basedir, doc_files in docs.items():
for doc_file in doc_files:
open_jar.write(os.path.join(basedir, doc_file), doc_file)
add_docs(javadoc)
add_docs(scaladoc)
return jar_path
else:
return None | python | def create_doc_jar(self, target, open_jar, version):
"""Returns a doc jar if either scala or java docs are available for the given target."""
javadoc = self._java_doc(target)
scaladoc = self._scala_doc(target)
if javadoc or scaladoc:
jar_path = self.artifact_path(open_jar, version, suffix='-javadoc')
with self.open_jar(jar_path, overwrite=True, compressed=True) as open_jar:
def add_docs(docs):
if docs:
for basedir, doc_files in docs.items():
for doc_file in doc_files:
open_jar.write(os.path.join(basedir, doc_file), doc_file)
add_docs(javadoc)
add_docs(scaladoc)
return jar_path
else:
return None | ['def', 'create_doc_jar', '(', 'self', ',', 'target', ',', 'open_jar', ',', 'version', ')', ':', 'javadoc', '=', 'self', '.', '_java_doc', '(', 'target', ')', 'scaladoc', '=', 'self', '.', '_scala_doc', '(', 'target', ')', 'if', 'javadoc', 'or', 'scaladoc', ':', 'jar_path', '=', 'self', '.', 'artifact_path', '(', 'open_jar', ',', 'version', ',', 'suffix', '=', "'-javadoc'", ')', 'with', 'self', '.', 'open_jar', '(', 'jar_path', ',', 'overwrite', '=', 'True', ',', 'compressed', '=', 'True', ')', 'as', 'open_jar', ':', 'def', 'add_docs', '(', 'docs', ')', ':', 'if', 'docs', ':', 'for', 'basedir', ',', 'doc_files', 'in', 'docs', '.', 'items', '(', ')', ':', 'for', 'doc_file', 'in', 'doc_files', ':', 'open_jar', '.', 'write', '(', 'os', '.', 'path', '.', 'join', '(', 'basedir', ',', 'doc_file', ')', ',', 'doc_file', ')', 'add_docs', '(', 'javadoc', ')', 'add_docs', '(', 'scaladoc', ')', 'return', 'jar_path', 'else', ':', 'return', 'None'] | Returns a doc jar if either scala or java docs are available for the given target. | ['Returns', 'a', 'doc', 'jar', 'if', 'either', 'scala', 'or', 'java', 'docs', 'are', 'available', 'for', 'the', 'given', 'target', '.'] | train | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/jar_publish.py#L985-L1002 |
2,826 | b3j0f/utils | b3j0f/utils/reflect.py | base_elts | def base_elts(elt, cls=None, depth=None):
"""Get bases elements of the input elt.
- If elt is an instance, get class and all base classes.
- If elt is a method, get all base methods.
- If elt is a class, get all base classes.
- In other case, get an empty list.
:param elt: supposed inherited elt.
:param cls: cls from where find attributes equal to elt. If None,
it is found as much as possible. Required in python3 for function
classes.
:type cls: type or list
:param int depth: search depth. If None (default), depth is maximal.
:return: elt bases elements. if elt has not base elements, result is empty.
:rtype: list
"""
result = []
elt_name = getattr(elt, '__name__', None)
if elt_name is not None:
cls = [] if cls is None else ensureiterable(cls)
elt_is_class = False
# if cls is None and elt is routine, it is possible to find the cls
if not cls and isroutine(elt):
if hasattr(elt, '__self__'): # from the instance
instance = get_method_self(elt) # get instance
if instance is None and PY2: # get base im_class if PY2
cls = list(elt.im_class.__bases__)
else: # use instance class
cls = [instance.__class__]
# cls is elt if elt is a class
elif isclass(elt):
elt_is_class = True
cls = list(elt.__bases__)
if cls: # if cls is not empty, find all base classes
index_of_found_classes = 0 # get last visited class index
visited_classes = set(cls) # cache for visited classes
len_classes = len(cls)
if depth is None: # if depth is None, get maximal value
depth = -1 # set negative value
while depth != 0 and index_of_found_classes != len_classes:
len_classes = len(cls)
for index in range(index_of_found_classes, len_classes):
_cls = cls[index]
for base_cls in _cls.__bases__:
if base_cls in visited_classes:
continue
else:
visited_classes.add(base_cls)
cls.append(base_cls)
index_of_found_classes = len_classes
depth -= 1
if elt_is_class:
# if cls is elt, result is classes minus first class
result = cls
elif isroutine(elt):
# get an elt to compare with found element
if ismethod(elt):
elt_to_compare = get_method_function(elt)
else:
elt_to_compare = elt
for _cls in cls: # for all classes
# get possible base elt
b_elt = getattr(_cls, elt_name, None)
if b_elt is not None:
# compare funcs
if ismethod(b_elt):
bec = get_method_function(b_elt)
else:
bec = b_elt
# if matching, add to result
if bec is elt_to_compare:
result.append(b_elt)
return result | python | def base_elts(elt, cls=None, depth=None):
"""Get bases elements of the input elt.
- If elt is an instance, get class and all base classes.
- If elt is a method, get all base methods.
- If elt is a class, get all base classes.
- In other case, get an empty list.
:param elt: supposed inherited elt.
:param cls: cls from where find attributes equal to elt. If None,
it is found as much as possible. Required in python3 for function
classes.
:type cls: type or list
:param int depth: search depth. If None (default), depth is maximal.
:return: elt bases elements. if elt has not base elements, result is empty.
:rtype: list
"""
result = []
elt_name = getattr(elt, '__name__', None)
if elt_name is not None:
cls = [] if cls is None else ensureiterable(cls)
elt_is_class = False
# if cls is None and elt is routine, it is possible to find the cls
if not cls and isroutine(elt):
if hasattr(elt, '__self__'): # from the instance
instance = get_method_self(elt) # get instance
if instance is None and PY2: # get base im_class if PY2
cls = list(elt.im_class.__bases__)
else: # use instance class
cls = [instance.__class__]
# cls is elt if elt is a class
elif isclass(elt):
elt_is_class = True
cls = list(elt.__bases__)
if cls: # if cls is not empty, find all base classes
index_of_found_classes = 0 # get last visited class index
visited_classes = set(cls) # cache for visited classes
len_classes = len(cls)
if depth is None: # if depth is None, get maximal value
depth = -1 # set negative value
while depth != 0 and index_of_found_classes != len_classes:
len_classes = len(cls)
for index in range(index_of_found_classes, len_classes):
_cls = cls[index]
for base_cls in _cls.__bases__:
if base_cls in visited_classes:
continue
else:
visited_classes.add(base_cls)
cls.append(base_cls)
index_of_found_classes = len_classes
depth -= 1
if elt_is_class:
# if cls is elt, result is classes minus first class
result = cls
elif isroutine(elt):
# get an elt to compare with found element
if ismethod(elt):
elt_to_compare = get_method_function(elt)
else:
elt_to_compare = elt
for _cls in cls: # for all classes
# get possible base elt
b_elt = getattr(_cls, elt_name, None)
if b_elt is not None:
# compare funcs
if ismethod(b_elt):
bec = get_method_function(b_elt)
else:
bec = b_elt
# if matching, add to result
if bec is elt_to_compare:
result.append(b_elt)
return result | ['def', 'base_elts', '(', 'elt', ',', 'cls', '=', 'None', ',', 'depth', '=', 'None', ')', ':', 'result', '=', '[', ']', 'elt_name', '=', 'getattr', '(', 'elt', ',', "'__name__'", ',', 'None', ')', 'if', 'elt_name', 'is', 'not', 'None', ':', 'cls', '=', '[', ']', 'if', 'cls', 'is', 'None', 'else', 'ensureiterable', '(', 'cls', ')', 'elt_is_class', '=', 'False', '# if cls is None and elt is routine, it is possible to find the cls', 'if', 'not', 'cls', 'and', 'isroutine', '(', 'elt', ')', ':', 'if', 'hasattr', '(', 'elt', ',', "'__self__'", ')', ':', '# from the instance', 'instance', '=', 'get_method_self', '(', 'elt', ')', '# get instance', 'if', 'instance', 'is', 'None', 'and', 'PY2', ':', '# get base im_class if PY2', 'cls', '=', 'list', '(', 'elt', '.', 'im_class', '.', '__bases__', ')', 'else', ':', '# use instance class', 'cls', '=', '[', 'instance', '.', '__class__', ']', '# cls is elt if elt is a class', 'elif', 'isclass', '(', 'elt', ')', ':', 'elt_is_class', '=', 'True', 'cls', '=', 'list', '(', 'elt', '.', '__bases__', ')', 'if', 'cls', ':', '# if cls is not empty, find all base classes', 'index_of_found_classes', '=', '0', '# get last visited class index', 'visited_classes', '=', 'set', '(', 'cls', ')', '# cache for visited classes', 'len_classes', '=', 'len', '(', 'cls', ')', 'if', 'depth', 'is', 'None', ':', '# if depth is None, get maximal value', 'depth', '=', '-', '1', '# set negative value', 'while', 'depth', '!=', '0', 'and', 'index_of_found_classes', '!=', 'len_classes', ':', 'len_classes', '=', 'len', '(', 'cls', ')', 'for', 'index', 'in', 'range', '(', 'index_of_found_classes', ',', 'len_classes', ')', ':', '_cls', '=', 'cls', '[', 'index', ']', 'for', 'base_cls', 'in', '_cls', '.', '__bases__', ':', 'if', 'base_cls', 'in', 'visited_classes', ':', 'continue', 'else', ':', 'visited_classes', '.', 'add', '(', 'base_cls', ')', 'cls', '.', 'append', '(', 'base_cls', ')', 'index_of_found_classes', '=', 'len_classes', 'depth', '-=', '1', 'if', 'elt_is_class', ':', '# if cls is elt, result is classes minus first class', 'result', '=', 'cls', 'elif', 'isroutine', '(', 'elt', ')', ':', '# get an elt to compare with found element', 'if', 'ismethod', '(', 'elt', ')', ':', 'elt_to_compare', '=', 'get_method_function', '(', 'elt', ')', 'else', ':', 'elt_to_compare', '=', 'elt', 'for', '_cls', 'in', 'cls', ':', '# for all classes', '# get possible base elt', 'b_elt', '=', 'getattr', '(', '_cls', ',', 'elt_name', ',', 'None', ')', 'if', 'b_elt', 'is', 'not', 'None', ':', '# compare funcs', 'if', 'ismethod', '(', 'b_elt', ')', ':', 'bec', '=', 'get_method_function', '(', 'b_elt', ')', 'else', ':', 'bec', '=', 'b_elt', '# if matching, add to result', 'if', 'bec', 'is', 'elt_to_compare', ':', 'result', '.', 'append', '(', 'b_elt', ')', 'return', 'result'] | Get bases elements of the input elt.
- If elt is an instance, get class and all base classes.
- If elt is a method, get all base methods.
- If elt is a class, get all base classes.
- In other case, get an empty list.
:param elt: supposed inherited elt.
:param cls: cls from where find attributes equal to elt. If None,
it is found as much as possible. Required in python3 for function
classes.
:type cls: type or list
:param int depth: search depth. If None (default), depth is maximal.
:return: elt bases elements. if elt has not base elements, result is empty.
:rtype: list | ['Get', 'bases', 'elements', 'of', 'the', 'input', 'elt', '.'] | train | https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/reflect.py#L48-L145 |
2,827 | moonso/vcftoolbox | vcftoolbox/cli.py | variants | def variants(ctx, snpeff):
"""Print the variants in a vcf"""
head = ctx.parent.head
vcf_handle = ctx.parent.handle
outfile = ctx.parent.outfile
silent = ctx.parent.silent
print_headers(head, outfile=outfile, silent=silent)
for line in vcf_handle:
print_variant(variant_line=line, outfile=outfile, silent=silent)
if snpeff:
variant_dict = get_variant_dict(
variant_line = line,
header_line = head.header
)
#Create a info dict:
info_dict = get_info_dict(
info_line = variant_dict['INFO']
)
snpeff_string = info_dict.get('ANN')
if snpeff_string:
#Get the snpeff annotations
snpeff_info = get_snpeff_info(
snpeff_string = snpeff_string,
snpeff_header = head.snpeff_columns
) | python | def variants(ctx, snpeff):
"""Print the variants in a vcf"""
head = ctx.parent.head
vcf_handle = ctx.parent.handle
outfile = ctx.parent.outfile
silent = ctx.parent.silent
print_headers(head, outfile=outfile, silent=silent)
for line in vcf_handle:
print_variant(variant_line=line, outfile=outfile, silent=silent)
if snpeff:
variant_dict = get_variant_dict(
variant_line = line,
header_line = head.header
)
#Create a info dict:
info_dict = get_info_dict(
info_line = variant_dict['INFO']
)
snpeff_string = info_dict.get('ANN')
if snpeff_string:
#Get the snpeff annotations
snpeff_info = get_snpeff_info(
snpeff_string = snpeff_string,
snpeff_header = head.snpeff_columns
) | ['def', 'variants', '(', 'ctx', ',', 'snpeff', ')', ':', 'head', '=', 'ctx', '.', 'parent', '.', 'head', 'vcf_handle', '=', 'ctx', '.', 'parent', '.', 'handle', 'outfile', '=', 'ctx', '.', 'parent', '.', 'outfile', 'silent', '=', 'ctx', '.', 'parent', '.', 'silent', 'print_headers', '(', 'head', ',', 'outfile', '=', 'outfile', ',', 'silent', '=', 'silent', ')', 'for', 'line', 'in', 'vcf_handle', ':', 'print_variant', '(', 'variant_line', '=', 'line', ',', 'outfile', '=', 'outfile', ',', 'silent', '=', 'silent', ')', 'if', 'snpeff', ':', 'variant_dict', '=', 'get_variant_dict', '(', 'variant_line', '=', 'line', ',', 'header_line', '=', 'head', '.', 'header', ')', '#Create a info dict:', 'info_dict', '=', 'get_info_dict', '(', 'info_line', '=', 'variant_dict', '[', "'INFO'", ']', ')', 'snpeff_string', '=', 'info_dict', '.', 'get', '(', "'ANN'", ')', 'if', 'snpeff_string', ':', '#Get the snpeff annotations', 'snpeff_info', '=', 'get_snpeff_info', '(', 'snpeff_string', '=', 'snpeff_string', ',', 'snpeff_header', '=', 'head', '.', 'snpeff_columns', ')'] | Print the variants in a vcf | ['Print', 'the', 'variants', 'in', 'a', 'vcf'] | train | https://github.com/moonso/vcftoolbox/blob/438fb1d85a83812c389774b94802eb5921c89e3a/vcftoolbox/cli.py#L96-L123 |
2,828 | Kane610/deconz | pydeconz/__init__.py | DeconzSession.start | def start(self) -> None:
"""Connect websocket to deCONZ."""
if self.config:
self.websocket = self.ws_client(
self.loop, self.session, self.host,
self.config.websocketport, self.async_session_handler)
self.websocket.start()
else:
_LOGGER.error('No deCONZ config available') | python | def start(self) -> None:
"""Connect websocket to deCONZ."""
if self.config:
self.websocket = self.ws_client(
self.loop, self.session, self.host,
self.config.websocketport, self.async_session_handler)
self.websocket.start()
else:
_LOGGER.error('No deCONZ config available') | ['def', 'start', '(', 'self', ')', '->', 'None', ':', 'if', 'self', '.', 'config', ':', 'self', '.', 'websocket', '=', 'self', '.', 'ws_client', '(', 'self', '.', 'loop', ',', 'self', '.', 'session', ',', 'self', '.', 'host', ',', 'self', '.', 'config', '.', 'websocketport', ',', 'self', '.', 'async_session_handler', ')', 'self', '.', 'websocket', '.', 'start', '(', ')', 'else', ':', '_LOGGER', '.', 'error', '(', "'No deCONZ config available'", ')'] | Connect websocket to deCONZ. | ['Connect', 'websocket', 'to', 'deCONZ', '.'] | train | https://github.com/Kane610/deconz/blob/8a9498dbbc8c168d4a081173ad6c3b1e17fffdf6/pydeconz/__init__.py#L38-L46 |
2,829 | ttinoco/OPTALG | optalg/lin_solver/_mumps/__init__.py | _MumpsBaseContext.set_silent | def set_silent(self):
"""Silence most messages."""
self.set_icntl(1, -1) # output stream for error msgs
self.set_icntl(2, -1) # otuput stream for diagnostic msgs
self.set_icntl(3, -1) # output stream for global info
self.set_icntl(4, 0) | python | def set_silent(self):
"""Silence most messages."""
self.set_icntl(1, -1) # output stream for error msgs
self.set_icntl(2, -1) # otuput stream for diagnostic msgs
self.set_icntl(3, -1) # output stream for global info
self.set_icntl(4, 0) | ['def', 'set_silent', '(', 'self', ')', ':', 'self', '.', 'set_icntl', '(', '1', ',', '-', '1', ')', '# output stream for error msgs', 'self', '.', 'set_icntl', '(', '2', ',', '-', '1', ')', '# otuput stream for diagnostic msgs', 'self', '.', 'set_icntl', '(', '3', ',', '-', '1', ')', '# output stream for global info', 'self', '.', 'set_icntl', '(', '4', ',', '0', ')'] | Silence most messages. | ['Silence', 'most', 'messages', '.'] | train | https://github.com/ttinoco/OPTALG/blob/d4f141292f281eea4faa71473258139e7f433001/optalg/lin_solver/_mumps/__init__.py#L187-L192 |
2,830 | Tinche/cattrs | src/cattr/converters.py | Converter._unstructure_mapping | def _unstructure_mapping(self, mapping):
"""Convert a mapping of attr classes to primitive equivalents."""
# We can reuse the mapping class, so dicts stay dicts and OrderedDicts
# stay OrderedDicts.
dispatch = self._unstructure_func.dispatch
return mapping.__class__(
(dispatch(k.__class__)(k), dispatch(v.__class__)(v))
for k, v in mapping.items()
) | python | def _unstructure_mapping(self, mapping):
"""Convert a mapping of attr classes to primitive equivalents."""
# We can reuse the mapping class, so dicts stay dicts and OrderedDicts
# stay OrderedDicts.
dispatch = self._unstructure_func.dispatch
return mapping.__class__(
(dispatch(k.__class__)(k), dispatch(v.__class__)(v))
for k, v in mapping.items()
) | ['def', '_unstructure_mapping', '(', 'self', ',', 'mapping', ')', ':', '# We can reuse the mapping class, so dicts stay dicts and OrderedDicts', '# stay OrderedDicts.', 'dispatch', '=', 'self', '.', '_unstructure_func', '.', 'dispatch', 'return', 'mapping', '.', '__class__', '(', '(', 'dispatch', '(', 'k', '.', '__class__', ')', '(', 'k', ')', ',', 'dispatch', '(', 'v', '.', '__class__', ')', '(', 'v', ')', ')', 'for', 'k', ',', 'v', 'in', 'mapping', '.', 'items', '(', ')', ')'] | Convert a mapping of attr classes to primitive equivalents. | ['Convert', 'a', 'mapping', 'of', 'attr', 'classes', 'to', 'primitive', 'equivalents', '.'] | train | https://github.com/Tinche/cattrs/blob/481bc9bdb69b2190d699b54f331c8c5c075506d5/src/cattr/converters.py#L227-L236 |
2,831 | fhcrc/seqmagick | seqmagick/subcommands/info.py | summarize_sequence_file | def summarize_sequence_file(source_file, file_type=None):
"""
Summarizes a sequence file, returning a tuple containing the name,
whether the file is an alignment, minimum sequence length, maximum
sequence length, average length, number of sequences.
"""
is_alignment = True
avg_length = None
min_length = sys.maxsize
max_length = 0
sequence_count = 0
# Get an iterator and analyze the data.
with common.FileType('rt')(source_file) as fp:
if not file_type:
file_type = fileformat.from_handle(fp)
for record in SeqIO.parse(fp, file_type):
sequence_count += 1
sequence_length = len(record)
if max_length != 0:
# If even one sequence is not the same length as the others,
# we don't consider this an alignment.
if sequence_length != max_length:
is_alignment = False
# Lengths
if sequence_length > max_length:
max_length = sequence_length
if sequence_length < min_length:
min_length = sequence_length
# Average length
if sequence_count == 1:
avg_length = float(sequence_length)
else:
avg_length = avg_length + ((sequence_length - avg_length) /
sequence_count)
# Handle an empty file:
if avg_length is None:
min_length = max_length = avg_length = 0
if sequence_count <= 1:
is_alignment = False
return (source_file, str(is_alignment).upper(), min_length,
max_length, avg_length, sequence_count) | python | def summarize_sequence_file(source_file, file_type=None):
"""
Summarizes a sequence file, returning a tuple containing the name,
whether the file is an alignment, minimum sequence length, maximum
sequence length, average length, number of sequences.
"""
is_alignment = True
avg_length = None
min_length = sys.maxsize
max_length = 0
sequence_count = 0
# Get an iterator and analyze the data.
with common.FileType('rt')(source_file) as fp:
if not file_type:
file_type = fileformat.from_handle(fp)
for record in SeqIO.parse(fp, file_type):
sequence_count += 1
sequence_length = len(record)
if max_length != 0:
# If even one sequence is not the same length as the others,
# we don't consider this an alignment.
if sequence_length != max_length:
is_alignment = False
# Lengths
if sequence_length > max_length:
max_length = sequence_length
if sequence_length < min_length:
min_length = sequence_length
# Average length
if sequence_count == 1:
avg_length = float(sequence_length)
else:
avg_length = avg_length + ((sequence_length - avg_length) /
sequence_count)
# Handle an empty file:
if avg_length is None:
min_length = max_length = avg_length = 0
if sequence_count <= 1:
is_alignment = False
return (source_file, str(is_alignment).upper(), min_length,
max_length, avg_length, sequence_count) | ['def', 'summarize_sequence_file', '(', 'source_file', ',', 'file_type', '=', 'None', ')', ':', 'is_alignment', '=', 'True', 'avg_length', '=', 'None', 'min_length', '=', 'sys', '.', 'maxsize', 'max_length', '=', '0', 'sequence_count', '=', '0', '# Get an iterator and analyze the data.', 'with', 'common', '.', 'FileType', '(', "'rt'", ')', '(', 'source_file', ')', 'as', 'fp', ':', 'if', 'not', 'file_type', ':', 'file_type', '=', 'fileformat', '.', 'from_handle', '(', 'fp', ')', 'for', 'record', 'in', 'SeqIO', '.', 'parse', '(', 'fp', ',', 'file_type', ')', ':', 'sequence_count', '+=', '1', 'sequence_length', '=', 'len', '(', 'record', ')', 'if', 'max_length', '!=', '0', ':', '# If even one sequence is not the same length as the others,', "# we don't consider this an alignment.", 'if', 'sequence_length', '!=', 'max_length', ':', 'is_alignment', '=', 'False', '# Lengths', 'if', 'sequence_length', '>', 'max_length', ':', 'max_length', '=', 'sequence_length', 'if', 'sequence_length', '<', 'min_length', ':', 'min_length', '=', 'sequence_length', '# Average length', 'if', 'sequence_count', '==', '1', ':', 'avg_length', '=', 'float', '(', 'sequence_length', ')', 'else', ':', 'avg_length', '=', 'avg_length', '+', '(', '(', 'sequence_length', '-', 'avg_length', ')', '/', 'sequence_count', ')', '# Handle an empty file:', 'if', 'avg_length', 'is', 'None', ':', 'min_length', '=', 'max_length', '=', 'avg_length', '=', '0', 'if', 'sequence_count', '<=', '1', ':', 'is_alignment', '=', 'False', 'return', '(', 'source_file', ',', 'str', '(', 'is_alignment', ')', '.', 'upper', '(', ')', ',', 'min_length', ',', 'max_length', ',', 'avg_length', ',', 'sequence_count', ')'] | Summarizes a sequence file, returning a tuple containing the name,
whether the file is an alignment, minimum sequence length, maximum
sequence length, average length, number of sequences. | ['Summarizes', 'a', 'sequence', 'file', 'returning', 'a', 'tuple', 'containing', 'the', 'name', 'whether', 'the', 'file', 'is', 'an', 'alignment', 'minimum', 'sequence', 'length', 'maximum', 'sequence', 'length', 'average', 'length', 'number', 'of', 'sequences', '.'] | train | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/info.py#L98-L143 |
2,832 | markokr/rarfile | rarfile.py | RarExtFile.close | def close(self):
"""Close open resources."""
super(RarExtFile, self).close()
if self._fd:
self._fd.close()
self._fd = None | python | def close(self):
"""Close open resources."""
super(RarExtFile, self).close()
if self._fd:
self._fd.close()
self._fd = None | ['def', 'close', '(', 'self', ')', ':', 'super', '(', 'RarExtFile', ',', 'self', ')', '.', 'close', '(', ')', 'if', 'self', '.', '_fd', ':', 'self', '.', '_fd', '.', 'close', '(', ')', 'self', '.', '_fd', '=', 'None'] | Close open resources. | ['Close', 'open', 'resources', '.'] | train | https://github.com/markokr/rarfile/blob/2704344e8d7a1658c96c8ed8f449d7ba01bedea3/rarfile.py#L2046-L2053 |
2,833 | saltstack/salt | salt/modules/solr.py | _get_none_or_value | def _get_none_or_value(value):
'''
PRIVATE METHOD
Checks to see if the value of a primitive or built-in container such as
a list, dict, set, tuple etc is empty or none. None type is returned if the
value is empty/None/False. Number data types that are 0 will return None.
value : obj
The primitive or built-in container to evaluate.
Return: None or value
'''
if value is None:
return None
elif not value:
return value
# if it's a string, and it's not empty check for none
elif isinstance(value, six.string_types):
if value.lower() == 'none':
return None
return value
# return None
else:
return None | python | def _get_none_or_value(value):
'''
PRIVATE METHOD
Checks to see if the value of a primitive or built-in container such as
a list, dict, set, tuple etc is empty or none. None type is returned if the
value is empty/None/False. Number data types that are 0 will return None.
value : obj
The primitive or built-in container to evaluate.
Return: None or value
'''
if value is None:
return None
elif not value:
return value
# if it's a string, and it's not empty check for none
elif isinstance(value, six.string_types):
if value.lower() == 'none':
return None
return value
# return None
else:
return None | ['def', '_get_none_or_value', '(', 'value', ')', ':', 'if', 'value', 'is', 'None', ':', 'return', 'None', 'elif', 'not', 'value', ':', 'return', 'value', "# if it's a string, and it's not empty check for none", 'elif', 'isinstance', '(', 'value', ',', 'six', '.', 'string_types', ')', ':', 'if', 'value', '.', 'lower', '(', ')', '==', "'none'", ':', 'return', 'None', 'return', 'value', '# return None', 'else', ':', 'return', 'None'] | PRIVATE METHOD
Checks to see if the value of a primitive or built-in container such as
a list, dict, set, tuple etc is empty or none. None type is returned if the
value is empty/None/False. Number data types that are 0 will return None.
value : obj
The primitive or built-in container to evaluate.
Return: None or value | ['PRIVATE', 'METHOD', 'Checks', 'to', 'see', 'if', 'the', 'value', 'of', 'a', 'primitive', 'or', 'built', '-', 'in', 'container', 'such', 'as', 'a', 'list', 'dict', 'set', 'tuple', 'etc', 'is', 'empty', 'or', 'none', '.', 'None', 'type', 'is', 'returned', 'if', 'the', 'value', 'is', 'empty', '/', 'None', '/', 'False', '.', 'Number', 'data', 'types', 'that', 'are', '0', 'will', 'return', 'None', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solr.py#L99-L122 |
2,834 | Robpol86/sphinxcontrib-versioning | sphinxcontrib/versioning/sphinx_.py | build | def build(source, target, versions, current_name, is_root):
"""Build Sphinx docs for one version. Includes Versions class instance with names/urls in the HTML context.
:raise HandledError: If sphinx-build fails. Will be logged before raising.
:param str source: Source directory to pass to sphinx-build.
:param str target: Destination directory to write documentation to (passed to sphinx-build).
:param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
:param str current_name: The ref name of the current version being built.
:param bool is_root: Is this build in the web root?
"""
log = logging.getLogger(__name__)
argv = ('sphinx-build', source, target)
config = Config.from_context()
log.debug('Running sphinx-build for %s with args: %s', current_name, str(argv))
child = multiprocessing.Process(target=_build, args=(argv, config, versions, current_name, is_root))
child.start()
child.join() # Block.
if child.exitcode != 0:
log.error('sphinx-build failed for branch/tag: %s', current_name)
raise HandledError | python | def build(source, target, versions, current_name, is_root):
"""Build Sphinx docs for one version. Includes Versions class instance with names/urls in the HTML context.
:raise HandledError: If sphinx-build fails. Will be logged before raising.
:param str source: Source directory to pass to sphinx-build.
:param str target: Destination directory to write documentation to (passed to sphinx-build).
:param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
:param str current_name: The ref name of the current version being built.
:param bool is_root: Is this build in the web root?
"""
log = logging.getLogger(__name__)
argv = ('sphinx-build', source, target)
config = Config.from_context()
log.debug('Running sphinx-build for %s with args: %s', current_name, str(argv))
child = multiprocessing.Process(target=_build, args=(argv, config, versions, current_name, is_root))
child.start()
child.join() # Block.
if child.exitcode != 0:
log.error('sphinx-build failed for branch/tag: %s', current_name)
raise HandledError | ['def', 'build', '(', 'source', ',', 'target', ',', 'versions', ',', 'current_name', ',', 'is_root', ')', ':', 'log', '=', 'logging', '.', 'getLogger', '(', '__name__', ')', 'argv', '=', '(', "'sphinx-build'", ',', 'source', ',', 'target', ')', 'config', '=', 'Config', '.', 'from_context', '(', ')', 'log', '.', 'debug', '(', "'Running sphinx-build for %s with args: %s'", ',', 'current_name', ',', 'str', '(', 'argv', ')', ')', 'child', '=', 'multiprocessing', '.', 'Process', '(', 'target', '=', '_build', ',', 'args', '=', '(', 'argv', ',', 'config', ',', 'versions', ',', 'current_name', ',', 'is_root', ')', ')', 'child', '.', 'start', '(', ')', 'child', '.', 'join', '(', ')', '# Block.', 'if', 'child', '.', 'exitcode', '!=', '0', ':', 'log', '.', 'error', '(', "'sphinx-build failed for branch/tag: %s'", ',', 'current_name', ')', 'raise', 'HandledError'] | Build Sphinx docs for one version. Includes Versions class instance with names/urls in the HTML context.
:raise HandledError: If sphinx-build fails. Will be logged before raising.
:param str source: Source directory to pass to sphinx-build.
:param str target: Destination directory to write documentation to (passed to sphinx-build).
:param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
:param str current_name: The ref name of the current version being built.
:param bool is_root: Is this build in the web root? | ['Build', 'Sphinx', 'docs', 'for', 'one', 'version', '.', 'Includes', 'Versions', 'class', 'instance', 'with', 'names', '/', 'urls', 'in', 'the', 'HTML', 'context', '.'] | train | https://github.com/Robpol86/sphinxcontrib-versioning/blob/920edec0ac764081b583a2ecf4e6952762b9dbf2/sphinxcontrib/versioning/sphinx_.py#L222-L243 |
2,835 | brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py | brocade_vswitch.get_vnetwork_portgroups_input_vcenter | def get_vnetwork_portgroups_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
input = ET.SubElement(get_vnetwork_portgroups, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def get_vnetwork_portgroups_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
input = ET.SubElement(get_vnetwork_portgroups, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'get_vnetwork_portgroups_input_vcenter', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_vnetwork_portgroups', '=', 'ET', '.', 'Element', '(', '"get_vnetwork_portgroups"', ')', 'config', '=', 'get_vnetwork_portgroups', 'input', '=', 'ET', '.', 'SubElement', '(', 'get_vnetwork_portgroups', ',', '"input"', ')', 'vcenter', '=', 'ET', '.', 'SubElement', '(', 'input', ',', '"vcenter"', ')', 'vcenter', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'vcenter'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L807-L818 |
2,836 | ga4gh/ga4gh-server | ga4gh/server/datamodel/sequence_annotations.py | Gff3DbFeatureSet.populateFromRow | def populateFromRow(self, featureSetRecord):
"""
Populates the instance variables of this FeatureSet from the specified
DB row.
"""
self._dbFilePath = featureSetRecord.dataurl
self.setAttributesJson(featureSetRecord.attributes)
self._db = Gff3DbBackend(self._dbFilePath) | python | def populateFromRow(self, featureSetRecord):
"""
Populates the instance variables of this FeatureSet from the specified
DB row.
"""
self._dbFilePath = featureSetRecord.dataurl
self.setAttributesJson(featureSetRecord.attributes)
self._db = Gff3DbBackend(self._dbFilePath) | ['def', 'populateFromRow', '(', 'self', ',', 'featureSetRecord', ')', ':', 'self', '.', '_dbFilePath', '=', 'featureSetRecord', '.', 'dataurl', 'self', '.', 'setAttributesJson', '(', 'featureSetRecord', '.', 'attributes', ')', 'self', '.', '_db', '=', 'Gff3DbBackend', '(', 'self', '.', '_dbFilePath', ')'] | Populates the instance variables of this FeatureSet from the specified
DB row. | ['Populates', 'the', 'instance', 'variables', 'of', 'this', 'FeatureSet', 'from', 'the', 'specified', 'DB', 'row', '.'] | train | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/sequence_annotations.py#L342-L349 |
2,837 | Nike-Inc/cerberus-python-client | cerberus/client.py | CerberusClient._get_all_file_versions | def _get_all_file_versions(self, secure_data_path, limit=None):
"""
Convenience function that returns a generator yielding the contents of all versions of
a file and its version info
secure_data_path -- full path to the file in the safety deposit box
limit -- Default(100), limits how many records to be returned from the api at once.
"""
for secret in self._get_all_file_version_ids(secure_data_path, limit):
yield {'secret': self.get_file_data(secure_data_path, version=secret['id']),
'version': secret} | python | def _get_all_file_versions(self, secure_data_path, limit=None):
"""
Convenience function that returns a generator yielding the contents of all versions of
a file and its version info
secure_data_path -- full path to the file in the safety deposit box
limit -- Default(100), limits how many records to be returned from the api at once.
"""
for secret in self._get_all_file_version_ids(secure_data_path, limit):
yield {'secret': self.get_file_data(secure_data_path, version=secret['id']),
'version': secret} | ['def', '_get_all_file_versions', '(', 'self', ',', 'secure_data_path', ',', 'limit', '=', 'None', ')', ':', 'for', 'secret', 'in', 'self', '.', '_get_all_file_version_ids', '(', 'secure_data_path', ',', 'limit', ')', ':', 'yield', '{', "'secret'", ':', 'self', '.', 'get_file_data', '(', 'secure_data_path', ',', 'version', '=', 'secret', '[', "'id'", ']', ')', ',', "'version'", ':', 'secret', '}'] | Convenience function that returns a generator yielding the contents of all versions of
a file and its version info
secure_data_path -- full path to the file in the safety deposit box
limit -- Default(100), limits how many records to be returned from the api at once. | ['Convenience', 'function', 'that', 'returns', 'a', 'generator', 'yielding', 'the', 'contents', 'of', 'all', 'versions', 'of', 'a', 'file', 'and', 'its', 'version', 'info'] | train | https://github.com/Nike-Inc/cerberus-python-client/blob/ef38356822e722fcb6a6ed4a1b38a5b493e753ae/cerberus/client.py#L433-L443 |
2,838 | nimbusproject/dashi | dashi/__init__.py | Dashi.ensure | def ensure(self, connection, func, *args, **kwargs):
"""Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy.
"""
channel = None
while 1:
try:
if channel is None:
channel = connection.channel()
return func(channel, *args, **kwargs), channel
except (connection.connection_errors, IOError):
self._call_errback()
channel = self.connect(connection) | python | def ensure(self, connection, func, *args, **kwargs):
"""Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy.
"""
channel = None
while 1:
try:
if channel is None:
channel = connection.channel()
return func(channel, *args, **kwargs), channel
except (connection.connection_errors, IOError):
self._call_errback()
channel = self.connect(connection) | ['def', 'ensure', '(', 'self', ',', 'connection', ',', 'func', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'channel', '=', 'None', 'while', '1', ':', 'try', ':', 'if', 'channel', 'is', 'None', ':', 'channel', '=', 'connection', '.', 'channel', '(', ')', 'return', 'func', '(', 'channel', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ',', 'channel', 'except', '(', 'connection', '.', 'connection_errors', ',', 'IOError', ')', ':', 'self', '.', '_call_errback', '(', ')', 'channel', '=', 'self', '.', 'connect', '(', 'connection', ')'] | Perform an operation until success
Repeats in the face of connection errors, pursuant to retry policy. | ['Perform', 'an', 'operation', 'until', 'success'] | train | https://github.com/nimbusproject/dashi/blob/368b3963ec8abd60aebe0f81915429b45cbf4b5a/dashi/__init__.py#L379-L393 |
2,839 | pymc-devs/pymc | pymc/database/hdf5ea.py | Database._initialize | def _initialize(self, funs_to_tally, length):
"""Create a group named ``chain#`` to store all data for this chain."""
chain = self.nchains
self._chains[chain] = self._h5file.create_group(
'/', 'chain%d' % chain, 'chain #%d' % chain)
for name, fun in six.iteritems(funs_to_tally):
arr = np.asarray(fun())
assert arr.dtype != np.dtype('object')
array = self._h5file.createEArray(
self._chains[chain], name,
tables.Atom.from_dtype(arr.dtype), (0,) + arr.shape,
filters=self.filter)
self._arrays[chain, name] = array
self._traces[name] = Trace(name, getfunc=fun, db=self)
self._traces[name]._initialize(self.chains, length)
self.trace_names.append(list(funs_to_tally.keys())) | python | def _initialize(self, funs_to_tally, length):
"""Create a group named ``chain#`` to store all data for this chain."""
chain = self.nchains
self._chains[chain] = self._h5file.create_group(
'/', 'chain%d' % chain, 'chain #%d' % chain)
for name, fun in six.iteritems(funs_to_tally):
arr = np.asarray(fun())
assert arr.dtype != np.dtype('object')
array = self._h5file.createEArray(
self._chains[chain], name,
tables.Atom.from_dtype(arr.dtype), (0,) + arr.shape,
filters=self.filter)
self._arrays[chain, name] = array
self._traces[name] = Trace(name, getfunc=fun, db=self)
self._traces[name]._initialize(self.chains, length)
self.trace_names.append(list(funs_to_tally.keys())) | ['def', '_initialize', '(', 'self', ',', 'funs_to_tally', ',', 'length', ')', ':', 'chain', '=', 'self', '.', 'nchains', 'self', '.', '_chains', '[', 'chain', ']', '=', 'self', '.', '_h5file', '.', 'create_group', '(', "'/'", ',', "'chain%d'", '%', 'chain', ',', "'chain #%d'", '%', 'chain', ')', 'for', 'name', ',', 'fun', 'in', 'six', '.', 'iteritems', '(', 'funs_to_tally', ')', ':', 'arr', '=', 'np', '.', 'asarray', '(', 'fun', '(', ')', ')', 'assert', 'arr', '.', 'dtype', '!=', 'np', '.', 'dtype', '(', "'object'", ')', 'array', '=', 'self', '.', '_h5file', '.', 'createEArray', '(', 'self', '.', '_chains', '[', 'chain', ']', ',', 'name', ',', 'tables', '.', 'Atom', '.', 'from_dtype', '(', 'arr', '.', 'dtype', ')', ',', '(', '0', ',', ')', '+', 'arr', '.', 'shape', ',', 'filters', '=', 'self', '.', 'filter', ')', 'self', '.', '_arrays', '[', 'chain', ',', 'name', ']', '=', 'array', 'self', '.', '_traces', '[', 'name', ']', '=', 'Trace', '(', 'name', ',', 'getfunc', '=', 'fun', ',', 'db', '=', 'self', ')', 'self', '.', '_traces', '[', 'name', ']', '.', '_initialize', '(', 'self', '.', 'chains', ',', 'length', ')', 'self', '.', 'trace_names', '.', 'append', '(', 'list', '(', 'funs_to_tally', '.', 'keys', '(', ')', ')', ')'] | Create a group named ``chain#`` to store all data for this chain. | ['Create', 'a', 'group', 'named', 'chain#', 'to', 'store', 'all', 'data', 'for', 'this', 'chain', '.'] | train | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5ea.py#L237-L259 |
2,840 | rosenbrockc/fortpy | fortpy/elements.py | Function.returns | def returns(self):
"""Gets a string showing the return type and modifiers for the
function in a nice display format."""
kind = "({}) ".format(self.kind) if self.kind is not None else ""
mods = ", ".join(self.modifiers) + " "
dtype = self.dtype if self.dtype is not None else ""
return "{}{}{}".format(dtype, kind, mods) | python | def returns(self):
"""Gets a string showing the return type and modifiers for the
function in a nice display format."""
kind = "({}) ".format(self.kind) if self.kind is not None else ""
mods = ", ".join(self.modifiers) + " "
dtype = self.dtype if self.dtype is not None else ""
return "{}{}{}".format(dtype, kind, mods) | ['def', 'returns', '(', 'self', ')', ':', 'kind', '=', '"({}) "', '.', 'format', '(', 'self', '.', 'kind', ')', 'if', 'self', '.', 'kind', 'is', 'not', 'None', 'else', '""', 'mods', '=', '", "', '.', 'join', '(', 'self', '.', 'modifiers', ')', '+', '" "', 'dtype', '=', 'self', '.', 'dtype', 'if', 'self', '.', 'dtype', 'is', 'not', 'None', 'else', '""', 'return', '"{}{}{}"', '.', 'format', '(', 'dtype', ',', 'kind', ',', 'mods', ')'] | Gets a string showing the return type and modifiers for the
function in a nice display format. | ['Gets', 'a', 'string', 'showing', 'the', 'return', 'type', 'and', 'modifiers', 'for', 'the', 'function', 'in', 'a', 'nice', 'display', 'format', '.'] | train | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L993-L999 |
2,841 | nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypadLed.off | def off(self, group):
"""Turn the LED off for a group."""
asyncio.ensure_future(self._send_led_on_off_request(group, 0),
loop=self._loop) | python | def off(self, group):
"""Turn the LED off for a group."""
asyncio.ensure_future(self._send_led_on_off_request(group, 0),
loop=self._loop) | ['def', 'off', '(', 'self', ',', 'group', ')', ':', 'asyncio', '.', 'ensure_future', '(', 'self', '.', '_send_led_on_off_request', '(', 'group', ',', '0', ')', ',', 'loop', '=', 'self', '.', '_loop', ')'] | Turn the LED off for a group. | ['Turn', 'the', 'LED', 'off', 'for', 'a', 'group', '.'] | train | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L857-L860 |
2,842 | moonso/vcftoolbox | vcftoolbox/cli.py | sort | def sort(ctx):
"""Sort the variants of a vcf file"""
head = ctx.parent.head
vcf_handle = ctx.parent.handle
outfile = ctx.parent.outfile
silent = ctx.parent.silent
print_headers(head, outfile=outfile, silent=silent)
for line in sort_variants(vcf_handle):
print_variant(variant_line=line, outfile=outfile, silent=silent) | python | def sort(ctx):
"""Sort the variants of a vcf file"""
head = ctx.parent.head
vcf_handle = ctx.parent.handle
outfile = ctx.parent.outfile
silent = ctx.parent.silent
print_headers(head, outfile=outfile, silent=silent)
for line in sort_variants(vcf_handle):
print_variant(variant_line=line, outfile=outfile, silent=silent) | ['def', 'sort', '(', 'ctx', ')', ':', 'head', '=', 'ctx', '.', 'parent', '.', 'head', 'vcf_handle', '=', 'ctx', '.', 'parent', '.', 'handle', 'outfile', '=', 'ctx', '.', 'parent', '.', 'outfile', 'silent', '=', 'ctx', '.', 'parent', '.', 'silent', 'print_headers', '(', 'head', ',', 'outfile', '=', 'outfile', ',', 'silent', '=', 'silent', ')', 'for', 'line', 'in', 'sort_variants', '(', 'vcf_handle', ')', ':', 'print_variant', '(', 'variant_line', '=', 'line', ',', 'outfile', '=', 'outfile', ',', 'silent', '=', 'silent', ')'] | Sort the variants of a vcf file | ['Sort', 'the', 'variants', 'of', 'a', 'vcf', 'file'] | train | https://github.com/moonso/vcftoolbox/blob/438fb1d85a83812c389774b94802eb5921c89e3a/vcftoolbox/cli.py#L127-L137 |
2,843 | edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/export.py | PostData.get_POST_data | def get_POST_data(self):
"""
Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library
"""
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST | python | def get_POST_data(self):
"""
Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library
"""
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST | ['def', 'get_POST_data', '(', 'self', ')', ':', 'self', '.', '_postprocess', '(', ')', '# some fields need to be remapped (depends on type of media)', 'self', '.', '_apply_mapping', '(', 'self', '.', 'mapping', '.', 'get', '(', 'self', '.', '_POST', '[', '"P0502010__b"', ']', ',', 'self', '.', 'mapping', '[', '"else"', ']', ')', ')', 'self', '.', '_check_required_fields', '(', ')', 'return', 'self', '.', '_POST'] | Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library | ['Returns', ':', 'dict', ':', 'POST', 'data', 'which', 'can', 'be', 'sent', 'to', 'webform', 'using', '\\', ':', 'py', ':', 'mod', ':', 'urllib', 'or', 'similar', 'library'] | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L303-L318 |
2,844 | wonambi-python/wonambi | wonambi/ioeeg/edf.py | Edf._read_record | def _read_record(self, f, blk, chans):
"""Read raw data from a single EDF channel.
Parameters
----------
i_chan : int
index of the channel to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
-------
numpy.ndarray
A vector with the data as written on file, in 16-bit precision
"""
dat_in_rec = empty((len(chans), self.max_smp))
i_ch_in_dat = 0
for i_ch in chans:
offset, n_smp_per_chan = self._offset(blk, i_ch)
f.seek(offset)
x = fromfile(f, count=n_smp_per_chan, dtype=EDF_FORMAT)
ratio = int(self.max_smp / n_smp_per_chan)
dat_in_rec[i_ch_in_dat, :] = repeat(x, ratio)
i_ch_in_dat += 1
return dat_in_rec | python | def _read_record(self, f, blk, chans):
"""Read raw data from a single EDF channel.
Parameters
----------
i_chan : int
index of the channel to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
-------
numpy.ndarray
A vector with the data as written on file, in 16-bit precision
"""
dat_in_rec = empty((len(chans), self.max_smp))
i_ch_in_dat = 0
for i_ch in chans:
offset, n_smp_per_chan = self._offset(blk, i_ch)
f.seek(offset)
x = fromfile(f, count=n_smp_per_chan, dtype=EDF_FORMAT)
ratio = int(self.max_smp / n_smp_per_chan)
dat_in_rec[i_ch_in_dat, :] = repeat(x, ratio)
i_ch_in_dat += 1
return dat_in_rec | ['def', '_read_record', '(', 'self', ',', 'f', ',', 'blk', ',', 'chans', ')', ':', 'dat_in_rec', '=', 'empty', '(', '(', 'len', '(', 'chans', ')', ',', 'self', '.', 'max_smp', ')', ')', 'i_ch_in_dat', '=', '0', 'for', 'i_ch', 'in', 'chans', ':', 'offset', ',', 'n_smp_per_chan', '=', 'self', '.', '_offset', '(', 'blk', ',', 'i_ch', ')', 'f', '.', 'seek', '(', 'offset', ')', 'x', '=', 'fromfile', '(', 'f', ',', 'count', '=', 'n_smp_per_chan', ',', 'dtype', '=', 'EDF_FORMAT', ')', 'ratio', '=', 'int', '(', 'self', '.', 'max_smp', '/', 'n_smp_per_chan', ')', 'dat_in_rec', '[', 'i_ch_in_dat', ',', ':', ']', '=', 'repeat', '(', 'x', ',', 'ratio', ')', 'i_ch_in_dat', '+=', '1', 'return', 'dat_in_rec'] | Read raw data from a single EDF channel.
Parameters
----------
i_chan : int
index of the channel to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
-------
numpy.ndarray
A vector with the data as written on file, in 16-bit precision | ['Read', 'raw', 'data', 'from', 'a', 'single', 'EDF', 'channel', '.'] | train | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/edf.py#L214-L244 |
2,845 | juju/python-libjuju | juju/machine.py | Machine.agent_version | def agent_version(self):
"""Get the version of the Juju machine agent.
May return None if the agent is not yet available.
"""
version = self.safe_data['agent-status']['version']
if version:
return client.Number.from_json(version)
else:
return None | python | def agent_version(self):
"""Get the version of the Juju machine agent.
May return None if the agent is not yet available.
"""
version = self.safe_data['agent-status']['version']
if version:
return client.Number.from_json(version)
else:
return None | ['def', 'agent_version', '(', 'self', ')', ':', 'version', '=', 'self', '.', 'safe_data', '[', "'agent-status'", ']', '[', "'version'", ']', 'if', 'version', ':', 'return', 'client', '.', 'Number', '.', 'from_json', '(', 'version', ')', 'else', ':', 'return', 'None'] | Get the version of the Juju machine agent.
May return None if the agent is not yet available. | ['Get', 'the', 'version', 'of', 'the', 'Juju', 'machine', 'agent', '.'] | train | https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/machine.py#L231-L240 |
2,846 | AtteqCom/zsl | src/zsl/application/containers/container.py | IoCContainer.modules | def modules(cls):
"""Collect all the public class attributes.
All class attributes should be a DI modules, this method collects them
and returns as a list.
:return: list of DI modules
:rtype: list[Union[Module, Callable]]
"""
members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a) and a.__name__ == 'modules'))
modules = [module for name, module in members if not name.startswith('_')]
return modules | python | def modules(cls):
"""Collect all the public class attributes.
All class attributes should be a DI modules, this method collects them
and returns as a list.
:return: list of DI modules
:rtype: list[Union[Module, Callable]]
"""
members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a) and a.__name__ == 'modules'))
modules = [module for name, module in members if not name.startswith('_')]
return modules | ['def', 'modules', '(', 'cls', ')', ':', 'members', '=', 'inspect', '.', 'getmembers', '(', 'cls', ',', 'lambda', 'a', ':', 'not', '(', 'inspect', '.', 'isroutine', '(', 'a', ')', 'and', 'a', '.', '__name__', '==', "'modules'", ')', ')', 'modules', '=', '[', 'module', 'for', 'name', ',', 'module', 'in', 'members', 'if', 'not', 'name', '.', 'startswith', '(', "'_'", ')', ']', 'return', 'modules'] | Collect all the public class attributes.
All class attributes should be a DI modules, this method collects them
and returns as a list.
:return: list of DI modules
:rtype: list[Union[Module, Callable]] | ['Collect', 'all', 'the', 'public', 'class', 'attributes', '.'] | train | https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/application/containers/container.py#L24-L35 |
2,847 | ambitioninc/django-query-builder | querybuilder/query.py | Query.explain | def explain(self, sql=None, sql_args=None):
"""
Runs EXPLAIN on this query
:type sql: str or None
:param sql: The sql to run EXPLAIN on. If None is specified, the query will
use ``self.get_sql()``
:type sql_args: dict or None
:param sql_args: A dictionary of the arguments to be escaped in the query. If None and
sql is None, the query will use ``self.get_args()``
:rtype: list of str
:return: list of each line of output from the EXPLAIN statement
"""
cursor = self.get_cursor()
if sql is None:
sql = self.get_sql()
sql_args = self.get_args()
elif sql_args is None:
sql_args = {}
cursor.execute('EXPLAIN {0}'.format(sql), sql_args)
rows = self._fetch_all_as_dict(cursor)
return rows | python | def explain(self, sql=None, sql_args=None):
"""
Runs EXPLAIN on this query
:type sql: str or None
:param sql: The sql to run EXPLAIN on. If None is specified, the query will
use ``self.get_sql()``
:type sql_args: dict or None
:param sql_args: A dictionary of the arguments to be escaped in the query. If None and
sql is None, the query will use ``self.get_args()``
:rtype: list of str
:return: list of each line of output from the EXPLAIN statement
"""
cursor = self.get_cursor()
if sql is None:
sql = self.get_sql()
sql_args = self.get_args()
elif sql_args is None:
sql_args = {}
cursor.execute('EXPLAIN {0}'.format(sql), sql_args)
rows = self._fetch_all_as_dict(cursor)
return rows | ['def', 'explain', '(', 'self', ',', 'sql', '=', 'None', ',', 'sql_args', '=', 'None', ')', ':', 'cursor', '=', 'self', '.', 'get_cursor', '(', ')', 'if', 'sql', 'is', 'None', ':', 'sql', '=', 'self', '.', 'get_sql', '(', ')', 'sql_args', '=', 'self', '.', 'get_args', '(', ')', 'elif', 'sql_args', 'is', 'None', ':', 'sql_args', '=', '{', '}', 'cursor', '.', 'execute', '(', "'EXPLAIN {0}'", '.', 'format', '(', 'sql', ')', ',', 'sql_args', ')', 'rows', '=', 'self', '.', '_fetch_all_as_dict', '(', 'cursor', ')', 'return', 'rows'] | Runs EXPLAIN on this query
:type sql: str or None
:param sql: The sql to run EXPLAIN on. If None is specified, the query will
use ``self.get_sql()``
:type sql_args: dict or None
:param sql_args: A dictionary of the arguments to be escaped in the query. If None and
sql is None, the query will use ``self.get_args()``
:rtype: list of str
:return: list of each line of output from the EXPLAIN statement | ['Runs', 'EXPLAIN', 'on', 'this', 'query'] | train | https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L1557-L1581 |
2,848 | dropbox/stone | stone/backends/obj_c_types.py | ObjCTypesBackend._generate_serializer_signatures | def _generate_serializer_signatures(self, obj_name):
"""Emits the signatures of the serializer object's serializing functions."""
serial_signature = fmt_signature(
func='serialize',
args=fmt_func_args_declaration([(
'instance', '{} *'.format(obj_name))]),
return_type='nullable NSDictionary<NSString *, id> *',
class_func=True)
deserial_signature = fmt_signature(
func='deserialize',
args=fmt_func_args_declaration([('dict',
'NSDictionary<NSString *, id> *')]),
return_type='{} *'.format(obj_name),
class_func=True)
self.emit(comment_prefix)
self.emit_wrapped_text(
'Serializes `{}` instances.'.format(obj_name),
prefix=comment_prefix)
self.emit(comment_prefix)
self.emit_wrapped_text(
'@param instance An instance of the `{}` API object.'.format(
obj_name),
prefix=comment_prefix)
self.emit(comment_prefix)
description_str = ('@return A json-compatible dictionary '
'representation of the `{}` API object.')
self.emit_wrapped_text(
description_str.format(obj_name), prefix=comment_prefix)
self.emit(comment_prefix)
self.emit('{};'.format(serial_signature))
self.emit()
self.emit(comment_prefix)
self.emit_wrapped_text(
'Deserializes `{}` instances.'.format(obj_name),
prefix=comment_prefix)
self.emit(comment_prefix)
description_str = ('@param dict A json-compatible dictionary '
'representation of the `{}` API object.')
self.emit_wrapped_text(
description_str.format(obj_name), prefix=comment_prefix)
self.emit(comment_prefix)
self.emit_wrapped_text(
'@return An instantiation of the `{}` object.'.format(obj_name),
prefix=comment_prefix)
self.emit(comment_prefix)
self.emit('{};'.format(deserial_signature))
self.emit() | python | def _generate_serializer_signatures(self, obj_name):
"""Emits the signatures of the serializer object's serializing functions."""
serial_signature = fmt_signature(
func='serialize',
args=fmt_func_args_declaration([(
'instance', '{} *'.format(obj_name))]),
return_type='nullable NSDictionary<NSString *, id> *',
class_func=True)
deserial_signature = fmt_signature(
func='deserialize',
args=fmt_func_args_declaration([('dict',
'NSDictionary<NSString *, id> *')]),
return_type='{} *'.format(obj_name),
class_func=True)
self.emit(comment_prefix)
self.emit_wrapped_text(
'Serializes `{}` instances.'.format(obj_name),
prefix=comment_prefix)
self.emit(comment_prefix)
self.emit_wrapped_text(
'@param instance An instance of the `{}` API object.'.format(
obj_name),
prefix=comment_prefix)
self.emit(comment_prefix)
description_str = ('@return A json-compatible dictionary '
'representation of the `{}` API object.')
self.emit_wrapped_text(
description_str.format(obj_name), prefix=comment_prefix)
self.emit(comment_prefix)
self.emit('{};'.format(serial_signature))
self.emit()
self.emit(comment_prefix)
self.emit_wrapped_text(
'Deserializes `{}` instances.'.format(obj_name),
prefix=comment_prefix)
self.emit(comment_prefix)
description_str = ('@param dict A json-compatible dictionary '
'representation of the `{}` API object.')
self.emit_wrapped_text(
description_str.format(obj_name), prefix=comment_prefix)
self.emit(comment_prefix)
self.emit_wrapped_text(
'@return An instantiation of the `{}` object.'.format(obj_name),
prefix=comment_prefix)
self.emit(comment_prefix)
self.emit('{};'.format(deserial_signature))
self.emit() | ['def', '_generate_serializer_signatures', '(', 'self', ',', 'obj_name', ')', ':', 'serial_signature', '=', 'fmt_signature', '(', 'func', '=', "'serialize'", ',', 'args', '=', 'fmt_func_args_declaration', '(', '[', '(', "'instance'", ',', "'{} *'", '.', 'format', '(', 'obj_name', ')', ')', ']', ')', ',', 'return_type', '=', "'nullable NSDictionary<NSString *, id> *'", ',', 'class_func', '=', 'True', ')', 'deserial_signature', '=', 'fmt_signature', '(', 'func', '=', "'deserialize'", ',', 'args', '=', 'fmt_func_args_declaration', '(', '[', '(', "'dict'", ',', "'NSDictionary<NSString *, id> *'", ')', ']', ')', ',', 'return_type', '=', "'{} *'", '.', 'format', '(', 'obj_name', ')', ',', 'class_func', '=', 'True', ')', 'self', '.', 'emit', '(', 'comment_prefix', ')', 'self', '.', 'emit_wrapped_text', '(', "'Serializes `{}` instances.'", '.', 'format', '(', 'obj_name', ')', ',', 'prefix', '=', 'comment_prefix', ')', 'self', '.', 'emit', '(', 'comment_prefix', ')', 'self', '.', 'emit_wrapped_text', '(', "'@param instance An instance of the `{}` API object.'", '.', 'format', '(', 'obj_name', ')', ',', 'prefix', '=', 'comment_prefix', ')', 'self', '.', 'emit', '(', 'comment_prefix', ')', 'description_str', '=', '(', "'@return A json-compatible dictionary '", "'representation of the `{}` API object.'", ')', 'self', '.', 'emit_wrapped_text', '(', 'description_str', '.', 'format', '(', 'obj_name', ')', ',', 'prefix', '=', 'comment_prefix', ')', 'self', '.', 'emit', '(', 'comment_prefix', ')', 'self', '.', 'emit', '(', "'{};'", '.', 'format', '(', 'serial_signature', ')', ')', 'self', '.', 'emit', '(', ')', 'self', '.', 'emit', '(', 'comment_prefix', ')', 'self', '.', 'emit_wrapped_text', '(', "'Deserializes `{}` instances.'", '.', 'format', '(', 'obj_name', ')', ',', 'prefix', '=', 'comment_prefix', ')', 'self', '.', 'emit', '(', 'comment_prefix', ')', 'description_str', '=', '(', "'@param dict A json-compatible dictionary '", "'representation of the `{}` API object.'", ')', 'self', '.', 'emit_wrapped_text', '(', 'description_str', '.', 'format', '(', 'obj_name', ')', ',', 'prefix', '=', 'comment_prefix', ')', 'self', '.', 'emit', '(', 'comment_prefix', ')', 'self', '.', 'emit_wrapped_text', '(', "'@return An instantiation of the `{}` object.'", '.', 'format', '(', 'obj_name', ')', ',', 'prefix', '=', 'comment_prefix', ')', 'self', '.', 'emit', '(', 'comment_prefix', ')', 'self', '.', 'emit', '(', "'{};'", '.', 'format', '(', 'deserial_signature', ')', ')', 'self', '.', 'emit', '(', ')'] | Emits the signatures of the serializer object's serializing functions. | ['Emits', 'the', 'signatures', 'of', 'the', 'serializer', 'object', 's', 'serializing', 'functions', '.'] | train | https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/obj_c_types.py#L687-L733 |
2,849 | spotify/luigi | luigi/contrib/kubernetes.py | KubernetesJobTask.__get_job_status | def __get_job_status(self):
"""Return the Kubernetes job status"""
# Figure out status and return it
job = self.__get_job()
if "succeeded" in job.obj["status"] and job.obj["status"]["succeeded"] > 0:
job.scale(replicas=0)
if self.print_pod_logs_on_exit:
self.__print_pod_logs()
if self.delete_on_success:
self.__delete_job_cascade(job)
return "SUCCEEDED"
if "failed" in job.obj["status"]:
failed_cnt = job.obj["status"]["failed"]
self.__logger.debug("Kubernetes job " + self.uu_name
+ " status.failed: " + str(failed_cnt))
if self.print_pod_logs_on_exit:
self.__print_pod_logs()
if failed_cnt > self.max_retrials:
job.scale(replicas=0) # avoid more retrials
return "FAILED"
return "RUNNING" | python | def __get_job_status(self):
"""Return the Kubernetes job status"""
# Figure out status and return it
job = self.__get_job()
if "succeeded" in job.obj["status"] and job.obj["status"]["succeeded"] > 0:
job.scale(replicas=0)
if self.print_pod_logs_on_exit:
self.__print_pod_logs()
if self.delete_on_success:
self.__delete_job_cascade(job)
return "SUCCEEDED"
if "failed" in job.obj["status"]:
failed_cnt = job.obj["status"]["failed"]
self.__logger.debug("Kubernetes job " + self.uu_name
+ " status.failed: " + str(failed_cnt))
if self.print_pod_logs_on_exit:
self.__print_pod_logs()
if failed_cnt > self.max_retrials:
job.scale(replicas=0) # avoid more retrials
return "FAILED"
return "RUNNING" | ['def', '__get_job_status', '(', 'self', ')', ':', '# Figure out status and return it', 'job', '=', 'self', '.', '__get_job', '(', ')', 'if', '"succeeded"', 'in', 'job', '.', 'obj', '[', '"status"', ']', 'and', 'job', '.', 'obj', '[', '"status"', ']', '[', '"succeeded"', ']', '>', '0', ':', 'job', '.', 'scale', '(', 'replicas', '=', '0', ')', 'if', 'self', '.', 'print_pod_logs_on_exit', ':', 'self', '.', '__print_pod_logs', '(', ')', 'if', 'self', '.', 'delete_on_success', ':', 'self', '.', '__delete_job_cascade', '(', 'job', ')', 'return', '"SUCCEEDED"', 'if', '"failed"', 'in', 'job', '.', 'obj', '[', '"status"', ']', ':', 'failed_cnt', '=', 'job', '.', 'obj', '[', '"status"', ']', '[', '"failed"', ']', 'self', '.', '__logger', '.', 'debug', '(', '"Kubernetes job "', '+', 'self', '.', 'uu_name', '+', '" status.failed: "', '+', 'str', '(', 'failed_cnt', ')', ')', 'if', 'self', '.', 'print_pod_logs_on_exit', ':', 'self', '.', '__print_pod_logs', '(', ')', 'if', 'failed_cnt', '>', 'self', '.', 'max_retrials', ':', 'job', '.', 'scale', '(', 'replicas', '=', '0', ')', '# avoid more retrials', 'return', '"FAILED"', 'return', '"RUNNING"'] | Return the Kubernetes job status | ['Return', 'the', 'Kubernetes', 'job', 'status'] | train | https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/kubernetes.py#L298-L320 |
2,850 | SheffieldML/GPy | GPy/util/warping_functions.py | TanhFunction.fgrad_y | def fgrad_y(self, y, return_precalc=False):
"""
gradient of f w.r.t to y ([N x 1])
:returns: Nx1 vector of derivatives, unless return_precalc is true,
then it also returns the precomputed stuff
"""
d = self.d
mpsi = self.psi
# vectorized version
S = (mpsi[:,1] * (y[:,:,None] + mpsi[:,2])).T
R = np.tanh(S)
D = 1 - (R ** 2)
GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T
if return_precalc:
return GRAD, S, R, D
return GRAD | python | def fgrad_y(self, y, return_precalc=False):
"""
gradient of f w.r.t to y ([N x 1])
:returns: Nx1 vector of derivatives, unless return_precalc is true,
then it also returns the precomputed stuff
"""
d = self.d
mpsi = self.psi
# vectorized version
S = (mpsi[:,1] * (y[:,:,None] + mpsi[:,2])).T
R = np.tanh(S)
D = 1 - (R ** 2)
GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T
if return_precalc:
return GRAD, S, R, D
return GRAD | ['def', 'fgrad_y', '(', 'self', ',', 'y', ',', 'return_precalc', '=', 'False', ')', ':', 'd', '=', 'self', '.', 'd', 'mpsi', '=', 'self', '.', 'psi', '# vectorized version', 'S', '=', '(', 'mpsi', '[', ':', ',', '1', ']', '*', '(', 'y', '[', ':', ',', ':', ',', 'None', ']', '+', 'mpsi', '[', ':', ',', '2', ']', ')', ')', '.', 'T', 'R', '=', 'np', '.', 'tanh', '(', 'S', ')', 'D', '=', '1', '-', '(', 'R', '**', '2', ')', 'GRAD', '=', '(', 'd', '+', '(', 'mpsi', '[', ':', ',', '0', ':', '1', ']', '[', ':', ',', ':', ',', 'None', ']', '*', 'mpsi', '[', ':', ',', '1', ':', '2', ']', '[', ':', ',', ':', ',', 'None', ']', '*', 'D', ')', '.', 'sum', '(', 'axis', '=', '0', ')', ')', '.', 'T', 'if', 'return_precalc', ':', 'return', 'GRAD', ',', 'S', ',', 'R', ',', 'D', 'return', 'GRAD'] | gradient of f w.r.t to y ([N x 1])
:returns: Nx1 vector of derivatives, unless return_precalc is true,
then it also returns the precomputed stuff | ['gradient', 'of', 'f', 'w', '.', 'r', '.', 't', 'to', 'y', '(', '[', 'N', 'x', '1', ']', ')'] | train | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/warping_functions.py#L108-L128 |
2,851 | fhs/pyhdf | pyhdf/VS.py | VS.find | def find(self, vName):
"""Get the reference number of a vdata given its name.
The vdata can then be opened (attached) by passing this
reference number to the attach() method.
Args::
vName Name of the vdata for which the reference number
is needed. vdatas names are not guaranteed to be
unique. When more than one vdata bear the same name,
find() will return the refnum of the first one founmd.
Returns::
vdata reference number. 0 is returned if the vdata does not exist.
C library equivalent : VSfind
"""
refNum = _C.VSfind(self._hdf_inst._id, vName)
_checkErr("find", refNum, "cannot find vdata %s" % vName)
return refNum | python | def find(self, vName):
"""Get the reference number of a vdata given its name.
The vdata can then be opened (attached) by passing this
reference number to the attach() method.
Args::
vName Name of the vdata for which the reference number
is needed. vdatas names are not guaranteed to be
unique. When more than one vdata bear the same name,
find() will return the refnum of the first one founmd.
Returns::
vdata reference number. 0 is returned if the vdata does not exist.
C library equivalent : VSfind
"""
refNum = _C.VSfind(self._hdf_inst._id, vName)
_checkErr("find", refNum, "cannot find vdata %s" % vName)
return refNum | ['def', 'find', '(', 'self', ',', 'vName', ')', ':', 'refNum', '=', '_C', '.', 'VSfind', '(', 'self', '.', '_hdf_inst', '.', '_id', ',', 'vName', ')', '_checkErr', '(', '"find"', ',', 'refNum', ',', '"cannot find vdata %s"', '%', 'vName', ')', 'return', 'refNum'] | Get the reference number of a vdata given its name.
The vdata can then be opened (attached) by passing this
reference number to the attach() method.
Args::
vName Name of the vdata for which the reference number
is needed. vdatas names are not guaranteed to be
unique. When more than one vdata bear the same name,
find() will return the refnum of the first one founmd.
Returns::
vdata reference number. 0 is returned if the vdata does not exist.
C library equivalent : VSfind | ['Get', 'the', 'reference', 'number', 'of', 'a', 'vdata', 'given', 'its', 'name', '.', 'The', 'vdata', 'can', 'then', 'be', 'opened', '(', 'attached', ')', 'by', 'passing', 'this', 'reference', 'number', 'to', 'the', 'attach', '()', 'method', '.'] | train | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L961-L982 |
2,852 | flo-compbio/genometools | genometools/expression/matrix.py | ExpMatrix.sample_correlations | def sample_correlations(self):
"""Returns an `ExpMatrix` containing all pairwise sample correlations.
Returns
-------
`ExpMatrix`
The sample correlation matrix.
"""
C = np.corrcoef(self.X.T)
corr_matrix = ExpMatrix(genes=self.samples, samples=self.samples, X=C)
return corr_matrix | python | def sample_correlations(self):
"""Returns an `ExpMatrix` containing all pairwise sample correlations.
Returns
-------
`ExpMatrix`
The sample correlation matrix.
"""
C = np.corrcoef(self.X.T)
corr_matrix = ExpMatrix(genes=self.samples, samples=self.samples, X=C)
return corr_matrix | ['def', 'sample_correlations', '(', 'self', ')', ':', 'C', '=', 'np', '.', 'corrcoef', '(', 'self', '.', 'X', '.', 'T', ')', 'corr_matrix', '=', 'ExpMatrix', '(', 'genes', '=', 'self', '.', 'samples', ',', 'samples', '=', 'self', '.', 'samples', ',', 'X', '=', 'C', ')', 'return', 'corr_matrix'] | Returns an `ExpMatrix` containing all pairwise sample correlations.
Returns
-------
`ExpMatrix`
The sample correlation matrix. | ['Returns', 'an', 'ExpMatrix', 'containing', 'all', 'pairwise', 'sample', 'correlations', '.'] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/expression/matrix.py#L411-L422 |
2,853 | apache/spark | python/pyspark/sql/types.py | UserDefinedType._cachedSqlType | def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type | python | def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type | ['def', '_cachedSqlType', '(', 'cls', ')', ':', 'if', 'not', 'hasattr', '(', 'cls', ',', '"_cached_sql_type"', ')', ':', 'cls', '.', '_cached_sql_type', '=', 'cls', '.', 'sqlType', '(', ')', 'return', 'cls', '.', '_cached_sql_type'] | Cache the sqlType() into class, because it's heavy used in `toInternal`. | ['Cache', 'the', 'sqlType', '()', 'into', 'class', 'because', 'it', 's', 'heavy', 'used', 'in', 'toInternal', '.'] | train | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L675-L681 |
2,854 | galaxyproject/pulsar | pulsar/web/wsgi.py | app_factory | def app_factory(global_conf, **local_conf):
"""
Returns the Pulsar WSGI application.
"""
configuration_file = global_conf.get("__file__", None)
webapp = init_webapp(ini_path=configuration_file, local_conf=local_conf)
return webapp | python | def app_factory(global_conf, **local_conf):
"""
Returns the Pulsar WSGI application.
"""
configuration_file = global_conf.get("__file__", None)
webapp = init_webapp(ini_path=configuration_file, local_conf=local_conf)
return webapp | ['def', 'app_factory', '(', 'global_conf', ',', '*', '*', 'local_conf', ')', ':', 'configuration_file', '=', 'global_conf', '.', 'get', '(', '"__file__"', ',', 'None', ')', 'webapp', '=', 'init_webapp', '(', 'ini_path', '=', 'configuration_file', ',', 'local_conf', '=', 'local_conf', ')', 'return', 'webapp'] | Returns the Pulsar WSGI application. | ['Returns', 'the', 'Pulsar', 'WSGI', 'application', '.'] | train | https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/web/wsgi.py#L14-L20 |
2,855 | kpdyer/libfte | fte/encrypter.py | Encrypter.getCiphertextLen | def getCiphertextLen(self, ciphertext):
"""Given a ``ciphertext`` with a valid header, returns the length of the ciphertext inclusive of ciphertext expansion.
"""
plaintext_length = self.getPlaintextLen(ciphertext)
ciphertext_length = plaintext_length + Encrypter._CTXT_EXPANSION
return ciphertext_length | python | def getCiphertextLen(self, ciphertext):
"""Given a ``ciphertext`` with a valid header, returns the length of the ciphertext inclusive of ciphertext expansion.
"""
plaintext_length = self.getPlaintextLen(ciphertext)
ciphertext_length = plaintext_length + Encrypter._CTXT_EXPANSION
return ciphertext_length | ['def', 'getCiphertextLen', '(', 'self', ',', 'ciphertext', ')', ':', 'plaintext_length', '=', 'self', '.', 'getPlaintextLen', '(', 'ciphertext', ')', 'ciphertext_length', '=', 'plaintext_length', '+', 'Encrypter', '.', '_CTXT_EXPANSION', 'return', 'ciphertext_length'] | Given a ``ciphertext`` with a valid header, returns the length of the ciphertext inclusive of ciphertext expansion. | ['Given', 'a', 'ciphertext', 'with', 'a', 'valid', 'header', 'returns', 'the', 'length', 'of', 'the', 'ciphertext', 'inclusive', 'of', 'ciphertext', 'expansion', '.'] | train | https://github.com/kpdyer/libfte/blob/74ed6ad197b6e72d1b9709c4dbc04041e05eb9b7/fte/encrypter.py#L173-L179 |
2,856 | caseyjlaw/rtpipe | rtpipe/nbpipeline.py | state.save | def save(self, obj, label, format='text'):
""" Save or update obj as pkl file with name label
format can be 'text' or 'pickle'.
"""
# initialize hidden state directory
objloc = '{0}/{1}'.format(self.statedir, label)
with open(objloc, 'w') as fp:
if format == 'pickle':
pickle.dump(obj, fp)
elif format == 'text':
fp.write(str(obj))
else:
print('Format {0} not recognized. Please choose either pickle or text.'.format(format))
print('Saving {0} to label {1}'.format(obj, label)) | python | def save(self, obj, label, format='text'):
""" Save or update obj as pkl file with name label
format can be 'text' or 'pickle'.
"""
# initialize hidden state directory
objloc = '{0}/{1}'.format(self.statedir, label)
with open(objloc, 'w') as fp:
if format == 'pickle':
pickle.dump(obj, fp)
elif format == 'text':
fp.write(str(obj))
else:
print('Format {0} not recognized. Please choose either pickle or text.'.format(format))
print('Saving {0} to label {1}'.format(obj, label)) | ['def', 'save', '(', 'self', ',', 'obj', ',', 'label', ',', 'format', '=', "'text'", ')', ':', '# initialize hidden state directory', 'objloc', '=', "'{0}/{1}'", '.', 'format', '(', 'self', '.', 'statedir', ',', 'label', ')', 'with', 'open', '(', 'objloc', ',', "'w'", ')', 'as', 'fp', ':', 'if', 'format', '==', "'pickle'", ':', 'pickle', '.', 'dump', '(', 'obj', ',', 'fp', ')', 'elif', 'format', '==', "'text'", ':', 'fp', '.', 'write', '(', 'str', '(', 'obj', ')', ')', 'else', ':', 'print', '(', "'Format {0} not recognized. Please choose either pickle or text.'", '.', 'format', '(', 'format', ')', ')', 'print', '(', "'Saving {0} to label {1}'", '.', 'format', '(', 'obj', ',', 'label', ')', ')'] | Save or update obj as pkl file with name label
format can be 'text' or 'pickle'. | ['Save', 'or', 'update', 'obj', 'as', 'pkl', 'file', 'with', 'name', 'label'] | train | https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/nbpipeline.py#L24-L42 |
2,857 | mk-fg/feedjack | feedjack/models.py | Feed.update_handler | def update_handler(feeds):
'''Update all cross-referencing filters results for feeds and others, related to them.
Intended to be called from non-Feed update hooks (like new Post saving).'''
# Check if this call is a result of actions initiated from
# one of the hooks in a higher frame (resulting in recursion).
if Feed._filters_update_handler_lock: return
return Feed._filters_update_handler(Feed, feeds, force=True) | python | def update_handler(feeds):
'''Update all cross-referencing filters results for feeds and others, related to them.
Intended to be called from non-Feed update hooks (like new Post saving).'''
# Check if this call is a result of actions initiated from
# one of the hooks in a higher frame (resulting in recursion).
if Feed._filters_update_handler_lock: return
return Feed._filters_update_handler(Feed, feeds, force=True) | ['def', 'update_handler', '(', 'feeds', ')', ':', '# Check if this call is a result of actions initiated from', '# one of the hooks in a higher frame (resulting in recursion).', 'if', 'Feed', '.', '_filters_update_handler_lock', ':', 'return', 'return', 'Feed', '.', '_filters_update_handler', '(', 'Feed', ',', 'feeds', ',', 'force', '=', 'True', ')'] | Update all cross-referencing filters results for feeds and others, related to them.
Intended to be called from non-Feed update hooks (like new Post saving). | ['Update', 'all', 'cross', '-', 'referencing', 'filters', 'results', 'for', 'feeds', 'and', 'others', 'related', 'to', 'them', '.', 'Intended', 'to', 'be', 'called', 'from', 'non', '-', 'Feed', 'update', 'hooks', '(', 'like', 'new', 'Post', 'saving', ')', '.'] | train | https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/models.py#L616-L622 |
2,858 | mitsei/dlkit | dlkit/json_/authentication/sessions.py | AgentLookupSession.get_agents_by_genus_type | def get_agents_by_genus_type(self, agent_genus_type):
"""Gets an ``AgentList`` corresponding to the given agent genus ``Type`` which does not include agents of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known agents or
an error results. Otherwise, the returned list may contain only
those agents that are accessible through this session.
arg: agent_genus_type (osid.type.Type): an agent genus type
return: (osid.authentication.AgentList) - the returned ``Agent``
list
raise: NullArgument - ``agent_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('authentication',
collection='Agent',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(agent_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.AgentList(result, runtime=self._runtime, proxy=self._proxy) | python | def get_agents_by_genus_type(self, agent_genus_type):
"""Gets an ``AgentList`` corresponding to the given agent genus ``Type`` which does not include agents of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known agents or
an error results. Otherwise, the returned list may contain only
those agents that are accessible through this session.
arg: agent_genus_type (osid.type.Type): an agent genus type
return: (osid.authentication.AgentList) - the returned ``Agent``
list
raise: NullArgument - ``agent_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('authentication',
collection='Agent',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(agent_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.AgentList(result, runtime=self._runtime, proxy=self._proxy) | ['def', 'get_agents_by_genus_type', '(', 'self', ',', 'agent_genus_type', ')', ':', '# Implemented from template for', '# osid.resource.ResourceLookupSession.get_resources_by_genus_type', '# NOTE: This implementation currently ignores plenary view', 'collection', '=', 'JSONClientValidated', '(', "'authentication'", ',', 'collection', '=', "'Agent'", ',', 'runtime', '=', 'self', '.', '_runtime', ')', 'result', '=', 'collection', '.', 'find', '(', 'dict', '(', '{', "'genusTypeId'", ':', 'str', '(', 'agent_genus_type', ')', '}', ',', '*', '*', 'self', '.', '_view_filter', '(', ')', ')', ')', '.', 'sort', '(', "'_id'", ',', 'DESCENDING', ')', 'return', 'objects', '.', 'AgentList', '(', 'result', ',', 'runtime', '=', 'self', '.', '_runtime', ',', 'proxy', '=', 'self', '.', '_proxy', ')'] | Gets an ``AgentList`` corresponding to the given agent genus ``Type`` which does not include agents of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known agents or
an error results. Otherwise, the returned list may contain only
those agents that are accessible through this session.
arg: agent_genus_type (osid.type.Type): an agent genus type
return: (osid.authentication.AgentList) - the returned ``Agent``
list
raise: NullArgument - ``agent_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | ['Gets', 'an', 'AgentList', 'corresponding', 'to', 'the', 'given', 'agent', 'genus', 'Type', 'which', 'does', 'not', 'include', 'agents', 'of', 'genus', 'types', 'derived', 'from', 'the', 'specified', 'Type', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authentication/sessions.py#L255-L280 |
2,859 | jleinonen/pytmatrix | pytmatrix/tmatrix.py | Scatterer._set_scatter_signature | def _set_scatter_signature(self):
"""Mark the amplitude and scattering matrices as up to date.
"""
self._scatter_signature = (self.thet0, self.thet, self.phi0, self.phi,
self.alpha, self.beta, self.orient) | python | def _set_scatter_signature(self):
"""Mark the amplitude and scattering matrices as up to date.
"""
self._scatter_signature = (self.thet0, self.thet, self.phi0, self.phi,
self.alpha, self.beta, self.orient) | ['def', '_set_scatter_signature', '(', 'self', ')', ':', 'self', '.', '_scatter_signature', '=', '(', 'self', '.', 'thet0', ',', 'self', '.', 'thet', ',', 'self', '.', 'phi0', ',', 'self', '.', 'phi', ',', 'self', '.', 'alpha', ',', 'self', '.', 'beta', ',', 'self', '.', 'orient', ')'] | Mark the amplitude and scattering matrices as up to date. | ['Mark', 'the', 'amplitude', 'and', 'scattering', 'matrices', 'as', 'up', 'to', 'date', '.'] | train | https://github.com/jleinonen/pytmatrix/blob/8803507fe5332786feab105fa74acf63e7121718/pytmatrix/tmatrix.py#L231-L235 |
2,860 | jasedit/pymmd | pymmd/mmd.py | has_metadata | def has_metadata(source, ext):
"""Returns a flag indicating if a given block of MultiMarkdown text contains metadata."""
_MMD_LIB.has_metadata.argtypes = [ctypes.c_char_p, ctypes.c_int]
_MMD_LIB.has_metadata.restype = ctypes.c_bool
return _MMD_LIB.has_metadata(source.encode('utf-8'), ext) | python | def has_metadata(source, ext):
"""Returns a flag indicating if a given block of MultiMarkdown text contains metadata."""
_MMD_LIB.has_metadata.argtypes = [ctypes.c_char_p, ctypes.c_int]
_MMD_LIB.has_metadata.restype = ctypes.c_bool
return _MMD_LIB.has_metadata(source.encode('utf-8'), ext) | ['def', 'has_metadata', '(', 'source', ',', 'ext', ')', ':', '_MMD_LIB', '.', 'has_metadata', '.', 'argtypes', '=', '[', 'ctypes', '.', 'c_char_p', ',', 'ctypes', '.', 'c_int', ']', '_MMD_LIB', '.', 'has_metadata', '.', 'restype', '=', 'ctypes', '.', 'c_bool', 'return', '_MMD_LIB', '.', 'has_metadata', '(', 'source', '.', 'encode', '(', "'utf-8'", ')', ',', 'ext', ')'] | Returns a flag indicating if a given block of MultiMarkdown text contains metadata. | ['Returns', 'a', 'flag', 'indicating', 'if', 'a', 'given', 'block', 'of', 'MultiMarkdown', 'text', 'contains', 'metadata', '.'] | train | https://github.com/jasedit/pymmd/blob/37b5a717241b837ca15b8a4d4cc3c06b4456bfbd/pymmd/mmd.py#L103-L107 |
2,861 | kodexlab/reliure | reliure/pipeline.py | Optionable.get_options | def get_options(self, hidden=False):
"""
:param hidden: whether to return hidden options
:type hidden: bool
:returns: dictionary of all options (with option's information)
:rtype: dict
"""
return dict((opt['name'], opt) for opt in self.get_ordered_options(hidden=hidden)) | python | def get_options(self, hidden=False):
"""
:param hidden: whether to return hidden options
:type hidden: bool
:returns: dictionary of all options (with option's information)
:rtype: dict
"""
return dict((opt['name'], opt) for opt in self.get_ordered_options(hidden=hidden)) | ['def', 'get_options', '(', 'self', ',', 'hidden', '=', 'False', ')', ':', 'return', 'dict', '(', '(', 'opt', '[', "'name'", ']', ',', 'opt', ')', 'for', 'opt', 'in', 'self', '.', 'get_ordered_options', '(', 'hidden', '=', 'hidden', ')', ')'] | :param hidden: whether to return hidden options
:type hidden: bool
:returns: dictionary of all options (with option's information)
:rtype: dict | [':', 'param', 'hidden', ':', 'whether', 'to', 'return', 'hidden', 'options', ':', 'type', 'hidden', ':', 'bool', ':', 'returns', ':', 'dictionary', 'of', 'all', 'options', '(', 'with', 'option', 's', 'information', ')', ':', 'rtype', ':', 'dict'] | train | https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/pipeline.py#L307-L314 |
2,862 | MillionIntegrals/vel | vel/rl/models/stochastic_policy_model.py | StochasticPolicyModelFactory.instantiate | def instantiate(self, **extra_args):
""" Instantiate the model """
input_block = self.input_block.instantiate()
backbone = self.backbone.instantiate(**extra_args)
return StochasticPolicyModel(input_block, backbone, extra_args['action_space']) | python | def instantiate(self, **extra_args):
""" Instantiate the model """
input_block = self.input_block.instantiate()
backbone = self.backbone.instantiate(**extra_args)
return StochasticPolicyModel(input_block, backbone, extra_args['action_space']) | ['def', 'instantiate', '(', 'self', ',', '*', '*', 'extra_args', ')', ':', 'input_block', '=', 'self', '.', 'input_block', '.', 'instantiate', '(', ')', 'backbone', '=', 'self', '.', 'backbone', '.', 'instantiate', '(', '*', '*', 'extra_args', ')', 'return', 'StochasticPolicyModel', '(', 'input_block', ',', 'backbone', ',', 'extra_args', '[', "'action_space'", ']', ')'] | Instantiate the model | ['Instantiate', 'the', 'model'] | train | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/stochastic_policy_model.py#L110-L115 |
2,863 | apache/spark | python/pyspark/ml/image.py | _ImageSchema.readImages | def readImages(self, path, recursive=False, numPartitions=-1,
dropImageFailures=False, sampleRatio=1.0, seed=0):
"""
Reads the directory of images from the local or remote source.
.. note:: If multiple jobs are run in parallel with different sampleRatio or recursive flag,
there may be a race condition where one job overwrites the hadoop configs of another.
.. note:: If sample ratio is less than 1, sampling uses a PathFilter that is efficient but
potentially non-deterministic.
.. note:: Deprecated in 2.4.0. Use `spark.read.format("image").load(path)` instead and
this `readImages` will be removed in 3.0.0.
:param str path: Path to the image directory.
:param bool recursive: Recursive search flag.
:param int numPartitions: Number of DataFrame partitions.
:param bool dropImageFailures: Drop the files that are not valid images.
:param float sampleRatio: Fraction of the images loaded.
:param int seed: Random number seed.
:return: a :class:`DataFrame` with a single column of "images",
see ImageSchema for details.
>>> df = ImageSchema.readImages('data/mllib/images/origin/kittens', recursive=True)
>>> df.count()
5
.. versionadded:: 2.3.0
"""
warnings.warn("`ImageSchema.readImage` is deprecated. " +
"Use `spark.read.format(\"image\").load(path)` instead.", DeprecationWarning)
spark = SparkSession.builder.getOrCreate()
image_schema = spark._jvm.org.apache.spark.ml.image.ImageSchema
jsession = spark._jsparkSession
jresult = image_schema.readImages(path, jsession, recursive, numPartitions,
dropImageFailures, float(sampleRatio), seed)
return DataFrame(jresult, spark._wrapped) | python | def readImages(self, path, recursive=False, numPartitions=-1,
dropImageFailures=False, sampleRatio=1.0, seed=0):
"""
Reads the directory of images from the local or remote source.
.. note:: If multiple jobs are run in parallel with different sampleRatio or recursive flag,
there may be a race condition where one job overwrites the hadoop configs of another.
.. note:: If sample ratio is less than 1, sampling uses a PathFilter that is efficient but
potentially non-deterministic.
.. note:: Deprecated in 2.4.0. Use `spark.read.format("image").load(path)` instead and
this `readImages` will be removed in 3.0.0.
:param str path: Path to the image directory.
:param bool recursive: Recursive search flag.
:param int numPartitions: Number of DataFrame partitions.
:param bool dropImageFailures: Drop the files that are not valid images.
:param float sampleRatio: Fraction of the images loaded.
:param int seed: Random number seed.
:return: a :class:`DataFrame` with a single column of "images",
see ImageSchema for details.
>>> df = ImageSchema.readImages('data/mllib/images/origin/kittens', recursive=True)
>>> df.count()
5
.. versionadded:: 2.3.0
"""
warnings.warn("`ImageSchema.readImage` is deprecated. " +
"Use `spark.read.format(\"image\").load(path)` instead.", DeprecationWarning)
spark = SparkSession.builder.getOrCreate()
image_schema = spark._jvm.org.apache.spark.ml.image.ImageSchema
jsession = spark._jsparkSession
jresult = image_schema.readImages(path, jsession, recursive, numPartitions,
dropImageFailures, float(sampleRatio), seed)
return DataFrame(jresult, spark._wrapped) | ['def', 'readImages', '(', 'self', ',', 'path', ',', 'recursive', '=', 'False', ',', 'numPartitions', '=', '-', '1', ',', 'dropImageFailures', '=', 'False', ',', 'sampleRatio', '=', '1.0', ',', 'seed', '=', '0', ')', ':', 'warnings', '.', 'warn', '(', '"`ImageSchema.readImage` is deprecated. "', '+', '"Use `spark.read.format(\\"image\\").load(path)` instead."', ',', 'DeprecationWarning', ')', 'spark', '=', 'SparkSession', '.', 'builder', '.', 'getOrCreate', '(', ')', 'image_schema', '=', 'spark', '.', '_jvm', '.', 'org', '.', 'apache', '.', 'spark', '.', 'ml', '.', 'image', '.', 'ImageSchema', 'jsession', '=', 'spark', '.', '_jsparkSession', 'jresult', '=', 'image_schema', '.', 'readImages', '(', 'path', ',', 'jsession', ',', 'recursive', ',', 'numPartitions', ',', 'dropImageFailures', ',', 'float', '(', 'sampleRatio', ')', ',', 'seed', ')', 'return', 'DataFrame', '(', 'jresult', ',', 'spark', '.', '_wrapped', ')'] | Reads the directory of images from the local or remote source.
.. note:: If multiple jobs are run in parallel with different sampleRatio or recursive flag,
there may be a race condition where one job overwrites the hadoop configs of another.
.. note:: If sample ratio is less than 1, sampling uses a PathFilter that is efficient but
potentially non-deterministic.
.. note:: Deprecated in 2.4.0. Use `spark.read.format("image").load(path)` instead and
this `readImages` will be removed in 3.0.0.
:param str path: Path to the image directory.
:param bool recursive: Recursive search flag.
:param int numPartitions: Number of DataFrame partitions.
:param bool dropImageFailures: Drop the files that are not valid images.
:param float sampleRatio: Fraction of the images loaded.
:param int seed: Random number seed.
:return: a :class:`DataFrame` with a single column of "images",
see ImageSchema for details.
>>> df = ImageSchema.readImages('data/mllib/images/origin/kittens', recursive=True)
>>> df.count()
5
.. versionadded:: 2.3.0 | ['Reads', 'the', 'directory', 'of', 'images', 'from', 'the', 'local', 'or', 'remote', 'source', '.'] | train | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L206-L242 |
2,864 | glomex/gcdt | gcdt/ramuda_core.py | invoke | def invoke(awsclient, function_name, payload, invocation_type=None,
alias_name=ALIAS_NAME, version=None, outfile=None):
"""Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param payload:
:param invocation_type:
:param alias_name:
:param version:
:param outfile: write response to file
:return: ping response payload
"""
log.debug('invoking lambda function: %s', function_name)
client_lambda = awsclient.get_client('lambda')
if invocation_type is None:
invocation_type = 'RequestResponse'
if payload.startswith('file://'):
log.debug('reading payload from file: %s' % payload)
with open(payload[7:], 'r') as pfile:
payload = pfile.read()
if version:
response = client_lambda.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
Payload=payload,
Qualifier=version
)
else:
response = client_lambda.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
Payload=payload,
Qualifier=alias_name
)
results = response['Payload'].read() # payload is a 'StreamingBody'
log.debug('invoke completed')
# write to file
if outfile:
with open(outfile, 'w') as ofile:
ofile.write(str(results))
ofile.flush()
return
else:
return results | python | def invoke(awsclient, function_name, payload, invocation_type=None,
alias_name=ALIAS_NAME, version=None, outfile=None):
"""Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param payload:
:param invocation_type:
:param alias_name:
:param version:
:param outfile: write response to file
:return: ping response payload
"""
log.debug('invoking lambda function: %s', function_name)
client_lambda = awsclient.get_client('lambda')
if invocation_type is None:
invocation_type = 'RequestResponse'
if payload.startswith('file://'):
log.debug('reading payload from file: %s' % payload)
with open(payload[7:], 'r') as pfile:
payload = pfile.read()
if version:
response = client_lambda.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
Payload=payload,
Qualifier=version
)
else:
response = client_lambda.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
Payload=payload,
Qualifier=alias_name
)
results = response['Payload'].read() # payload is a 'StreamingBody'
log.debug('invoke completed')
# write to file
if outfile:
with open(outfile, 'w') as ofile:
ofile.write(str(results))
ofile.flush()
return
else:
return results | ['def', 'invoke', '(', 'awsclient', ',', 'function_name', ',', 'payload', ',', 'invocation_type', '=', 'None', ',', 'alias_name', '=', 'ALIAS_NAME', ',', 'version', '=', 'None', ',', 'outfile', '=', 'None', ')', ':', 'log', '.', 'debug', '(', "'invoking lambda function: %s'", ',', 'function_name', ')', 'client_lambda', '=', 'awsclient', '.', 'get_client', '(', "'lambda'", ')', 'if', 'invocation_type', 'is', 'None', ':', 'invocation_type', '=', "'RequestResponse'", 'if', 'payload', '.', 'startswith', '(', "'file://'", ')', ':', 'log', '.', 'debug', '(', "'reading payload from file: %s'", '%', 'payload', ')', 'with', 'open', '(', 'payload', '[', '7', ':', ']', ',', "'r'", ')', 'as', 'pfile', ':', 'payload', '=', 'pfile', '.', 'read', '(', ')', 'if', 'version', ':', 'response', '=', 'client_lambda', '.', 'invoke', '(', 'FunctionName', '=', 'function_name', ',', 'InvocationType', '=', 'invocation_type', ',', 'Payload', '=', 'payload', ',', 'Qualifier', '=', 'version', ')', 'else', ':', 'response', '=', 'client_lambda', '.', 'invoke', '(', 'FunctionName', '=', 'function_name', ',', 'InvocationType', '=', 'invocation_type', ',', 'Payload', '=', 'payload', ',', 'Qualifier', '=', 'alias_name', ')', 'results', '=', 'response', '[', "'Payload'", ']', '.', 'read', '(', ')', "# payload is a 'StreamingBody'", 'log', '.', 'debug', '(', "'invoke completed'", ')', '# write to file', 'if', 'outfile', ':', 'with', 'open', '(', 'outfile', ',', "'w'", ')', 'as', 'ofile', ':', 'ofile', '.', 'write', '(', 'str', '(', 'results', ')', ')', 'ofile', '.', 'flush', '(', ')', 'return', 'else', ':', 'return', 'results'] | Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param payload:
:param invocation_type:
:param alias_name:
:param version:
:param outfile: write response to file
:return: ping response payload | ['Send', 'a', 'ping', 'request', 'to', 'a', 'lambda', 'function', '.'] | train | https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/ramuda_core.py#L654-L700 |
2,865 | jic-dtool/dtool-config | dtool_config/cli.py | readme_template | def readme_template(readme_template_file):
"""Display / set / update the readme template file."""
if not readme_template_file:
click.secho(dtool_config.utils.get_readme_template_fpath(
CONFIG_PATH,
))
else:
click.secho(dtool_config.utils.set_readme_template_fpath(
CONFIG_PATH,
readme_template_file
)) | python | def readme_template(readme_template_file):
"""Display / set / update the readme template file."""
if not readme_template_file:
click.secho(dtool_config.utils.get_readme_template_fpath(
CONFIG_PATH,
))
else:
click.secho(dtool_config.utils.set_readme_template_fpath(
CONFIG_PATH,
readme_template_file
)) | ['def', 'readme_template', '(', 'readme_template_file', ')', ':', 'if', 'not', 'readme_template_file', ':', 'click', '.', 'secho', '(', 'dtool_config', '.', 'utils', '.', 'get_readme_template_fpath', '(', 'CONFIG_PATH', ',', ')', ')', 'else', ':', 'click', '.', 'secho', '(', 'dtool_config', '.', 'utils', '.', 'set_readme_template_fpath', '(', 'CONFIG_PATH', ',', 'readme_template_file', ')', ')'] | Display / set / update the readme template file. | ['Display', '/', 'set', '/', 'update', 'the', 'readme', 'template', 'file', '.'] | train | https://github.com/jic-dtool/dtool-config/blob/21afa99a6794909e1d0180a45895492b4b726a51/dtool_config/cli.py#L53-L63 |
2,866 | brutasse/graphite-api | graphite_api/functions.py | _getFirstPathExpression | def _getFirstPathExpression(name):
"""Returns the first metric path in an expression."""
tokens = grammar.parseString(name)
pathExpression = None
while pathExpression is None:
if tokens.pathExpression:
pathExpression = tokens.pathExpression
elif tokens.expression:
tokens = tokens.expression
elif tokens.call:
tokens = tokens.call.args[0]
else:
break
return pathExpression | python | def _getFirstPathExpression(name):
"""Returns the first metric path in an expression."""
tokens = grammar.parseString(name)
pathExpression = None
while pathExpression is None:
if tokens.pathExpression:
pathExpression = tokens.pathExpression
elif tokens.expression:
tokens = tokens.expression
elif tokens.call:
tokens = tokens.call.args[0]
else:
break
return pathExpression | ['def', '_getFirstPathExpression', '(', 'name', ')', ':', 'tokens', '=', 'grammar', '.', 'parseString', '(', 'name', ')', 'pathExpression', '=', 'None', 'while', 'pathExpression', 'is', 'None', ':', 'if', 'tokens', '.', 'pathExpression', ':', 'pathExpression', '=', 'tokens', '.', 'pathExpression', 'elif', 'tokens', '.', 'expression', ':', 'tokens', '=', 'tokens', '.', 'expression', 'elif', 'tokens', '.', 'call', ':', 'tokens', '=', 'tokens', '.', 'call', '.', 'args', '[', '0', ']', 'else', ':', 'break', 'return', 'pathExpression'] | Returns the first metric path in an expression. | ['Returns', 'the', 'first', 'metric', 'path', 'in', 'an', 'expression', '.'] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1921-L1934 |
2,867 | fracpete/python-weka-wrapper3 | python/weka/core/dataset.py | Instances.template_instances | def template_instances(cls, dataset, capacity=0):
"""
Uses the Instances as template to create an empty dataset.
:param dataset: the original dataset
:type dataset: Instances
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the empty dataset
:rtype: Instances
"""
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Lweka/core/Instances;I)V", dataset.jobject, capacity)) | python | def template_instances(cls, dataset, capacity=0):
"""
Uses the Instances as template to create an empty dataset.
:param dataset: the original dataset
:type dataset: Instances
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the empty dataset
:rtype: Instances
"""
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Lweka/core/Instances;I)V", dataset.jobject, capacity)) | ['def', 'template_instances', '(', 'cls', ',', 'dataset', ',', 'capacity', '=', '0', ')', ':', 'return', 'Instances', '(', 'javabridge', '.', 'make_instance', '(', '"weka/core/Instances"', ',', '"(Lweka/core/Instances;I)V"', ',', 'dataset', '.', 'jobject', ',', 'capacity', ')', ')'] | Uses the Instances as template to create an empty dataset.
:param dataset: the original dataset
:type dataset: Instances
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the empty dataset
:rtype: Instances | ['Uses', 'the', 'Instances', 'as', 'template', 'to', 'create', 'an', 'empty', 'dataset', '.'] | train | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/dataset.py#L435-L448 |
2,868 | saltstack/salt | salt/modules/boto_apigateway.py | describe_api_deployments | def describe_api_deployments(restApiId, region=None, key=None, keyid=None, profile=None):
'''
Gets information about the defined API Deployments. Return list of api deployments.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_deployments restApiId
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deployments = []
_deployments = conn.get_deployments(restApiId=restApiId)
while True:
if _deployments:
deployments = deployments + _deployments['items']
if 'position' not in _deployments:
break
_deployments = conn.get_deployments(restApiId=restApiId, position=_deployments['position'])
return {'deployments': [_convert_datetime_str(deployment) for deployment in deployments]}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | python | def describe_api_deployments(restApiId, region=None, key=None, keyid=None, profile=None):
'''
Gets information about the defined API Deployments. Return list of api deployments.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_deployments restApiId
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deployments = []
_deployments = conn.get_deployments(restApiId=restApiId)
while True:
if _deployments:
deployments = deployments + _deployments['items']
if 'position' not in _deployments:
break
_deployments = conn.get_deployments(restApiId=restApiId, position=_deployments['position'])
return {'deployments': [_convert_datetime_str(deployment) for deployment in deployments]}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | ['def', 'describe_api_deployments', '(', 'restApiId', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'try', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'deployments', '=', '[', ']', '_deployments', '=', 'conn', '.', 'get_deployments', '(', 'restApiId', '=', 'restApiId', ')', 'while', 'True', ':', 'if', '_deployments', ':', 'deployments', '=', 'deployments', '+', '_deployments', '[', "'items'", ']', 'if', "'position'", 'not', 'in', '_deployments', ':', 'break', '_deployments', '=', 'conn', '.', 'get_deployments', '(', 'restApiId', '=', 'restApiId', ',', 'position', '=', '_deployments', '[', "'position'", ']', ')', 'return', '{', "'deployments'", ':', '[', '_convert_datetime_str', '(', 'deployment', ')', 'for', 'deployment', 'in', 'deployments', ']', '}', 'except', 'ClientError', 'as', 'e', ':', 'return', '{', "'error'", ':', '__utils__', '[', "'boto3.get_error'", ']', '(', 'e', ')', '}'] | Gets information about the defined API Deployments. Return list of api deployments.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_deployments restApiId | ['Gets', 'information', 'about', 'the', 'defined', 'API', 'Deployments', '.', 'Return', 'list', 'of', 'api', 'deployments', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_apigateway.py#L656-L681 |
2,869 | 317070/python-twitch-stream | twitchstream/chat.py | TwitchChatStream._push_from_buffer | def _push_from_buffer(self):
"""
Push a message on the stack to the IRC stream.
This is necessary to avoid Twitch overflow control.
"""
if len(self.buffer) > 0:
if time.time() - self.last_sent_time > 5:
try:
message = self.buffer.pop(0)
self.s.send(message.encode('utf-8'))
if self.verbose:
print(message)
finally:
self.last_sent_time = time.time() | python | def _push_from_buffer(self):
"""
Push a message on the stack to the IRC stream.
This is necessary to avoid Twitch overflow control.
"""
if len(self.buffer) > 0:
if time.time() - self.last_sent_time > 5:
try:
message = self.buffer.pop(0)
self.s.send(message.encode('utf-8'))
if self.verbose:
print(message)
finally:
self.last_sent_time = time.time() | ['def', '_push_from_buffer', '(', 'self', ')', ':', 'if', 'len', '(', 'self', '.', 'buffer', ')', '>', '0', ':', 'if', 'time', '.', 'time', '(', ')', '-', 'self', '.', 'last_sent_time', '>', '5', ':', 'try', ':', 'message', '=', 'self', '.', 'buffer', '.', 'pop', '(', '0', ')', 'self', '.', 's', '.', 'send', '(', 'message', '.', 'encode', '(', "'utf-8'", ')', ')', 'if', 'self', '.', 'verbose', ':', 'print', '(', 'message', ')', 'finally', ':', 'self', '.', 'last_sent_time', '=', 'time', '.', 'time', '(', ')'] | Push a message on the stack to the IRC stream.
This is necessary to avoid Twitch overflow control. | ['Push', 'a', 'message', 'on', 'the', 'stack', 'to', 'the', 'IRC', 'stream', '.', 'This', 'is', 'necessary', 'to', 'avoid', 'Twitch', 'overflow', 'control', '.'] | train | https://github.com/317070/python-twitch-stream/blob/83b4c2a27ee368fc3316b59ab1d25fcf0b0bcda6/twitchstream/chat.py#L159-L172 |
2,870 | vxgmichel/aiostream | examples/extra.py | power | async def power(source, exponent):
"""Raise the elements of an asynchronous sequence to the given power."""
async with streamcontext(source) as streamer:
async for item in streamer:
yield item ** exponent | python | async def power(source, exponent):
"""Raise the elements of an asynchronous sequence to the given power."""
async with streamcontext(source) as streamer:
async for item in streamer:
yield item ** exponent | ['async', 'def', 'power', '(', 'source', ',', 'exponent', ')', ':', 'async', 'with', 'streamcontext', '(', 'source', ')', 'as', 'streamer', ':', 'async', 'for', 'item', 'in', 'streamer', ':', 'yield', 'item', '**', 'exponent'] | Raise the elements of an asynchronous sequence to the given power. | ['Raise', 'the', 'elements', 'of', 'an', 'asynchronous', 'sequence', 'to', 'the', 'given', 'power', '.'] | train | https://github.com/vxgmichel/aiostream/blob/43bdf04ab19108a3f1b5a472062e1392a26cbcf8/examples/extra.py#L16-L20 |
2,871 | radjkarl/imgProcessor | imgProcessor/camera/PerspectiveCorrection.py | PerspectiveCorrection._calcQuadSize | def _calcQuadSize(corners, aspectRatio):
'''
return the size of a rectangle in perspective distortion in [px]
DEBUG: PUT THAT BACK IN??::
if aspectRatio is not given is will be determined
'''
if aspectRatio > 1: # x is bigger -> reduce y
x_length = PerspectiveCorrection._quadXLength(corners)
y = x_length / aspectRatio
return x_length, y
else: # y is bigger -> reduce x
y_length = PerspectiveCorrection._quadYLength(corners)
x = y_length * aspectRatio
return x, y_length | python | def _calcQuadSize(corners, aspectRatio):
'''
return the size of a rectangle in perspective distortion in [px]
DEBUG: PUT THAT BACK IN??::
if aspectRatio is not given is will be determined
'''
if aspectRatio > 1: # x is bigger -> reduce y
x_length = PerspectiveCorrection._quadXLength(corners)
y = x_length / aspectRatio
return x_length, y
else: # y is bigger -> reduce x
y_length = PerspectiveCorrection._quadYLength(corners)
x = y_length * aspectRatio
return x, y_length | ['def', '_calcQuadSize', '(', 'corners', ',', 'aspectRatio', ')', ':', 'if', 'aspectRatio', '>', '1', ':', '# x is bigger -> reduce y\r', 'x_length', '=', 'PerspectiveCorrection', '.', '_quadXLength', '(', 'corners', ')', 'y', '=', 'x_length', '/', 'aspectRatio', 'return', 'x_length', ',', 'y', 'else', ':', '# y is bigger -> reduce x\r', 'y_length', '=', 'PerspectiveCorrection', '.', '_quadYLength', '(', 'corners', ')', 'x', '=', 'y_length', '*', 'aspectRatio', 'return', 'x', ',', 'y_length'] | return the size of a rectangle in perspective distortion in [px]
DEBUG: PUT THAT BACK IN??::
if aspectRatio is not given is will be determined | ['return', 'the', 'size', 'of', 'a', 'rectangle', 'in', 'perspective', 'distortion', 'in', '[', 'px', ']', 'DEBUG', ':', 'PUT', 'THAT', 'BACK', 'IN??', '::', 'if', 'aspectRatio', 'is', 'not', 'given', 'is', 'will', 'be', 'determined'] | train | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L810-L823 |
2,872 | mobolic/facebook-sdk | facebook/__init__.py | GraphAPI.get_object | def get_object(self, id, **args):
"""Fetches the given object from the graph."""
return self.request("{0}/{1}".format(self.version, id), args) | python | def get_object(self, id, **args):
"""Fetches the given object from the graph."""
return self.request("{0}/{1}".format(self.version, id), args) | ['def', 'get_object', '(', 'self', ',', 'id', ',', '*', '*', 'args', ')', ':', 'return', 'self', '.', 'request', '(', '"{0}/{1}"', '.', 'format', '(', 'self', '.', 'version', ',', 'id', ')', ',', 'args', ')'] | Fetches the given object from the graph. | ['Fetches', 'the', 'given', 'object', 'from', 'the', 'graph', '.'] | train | https://github.com/mobolic/facebook-sdk/blob/65ff582e77f7ed68b6e9643a7490e5dee2a1031b/facebook/__init__.py#L133-L135 |
2,873 | klmitch/tendril | tendril/manager.py | TendrilManager.get_local_addr | def get_local_addr(self, timeout=None):
"""
Retrieve the current local address.
:param timeout: If not given or given as ``None``, waits until
the local address is available. Otherwise,
waits for as long as specified. If the local
address is not set by the time the timeout
expires, returns ``None``.
"""
# If we're not running, just return None
if not self.running:
return None
# OK, we're running; wait on the _local_addr_event
if not self._local_addr_event.wait(timeout):
# Still not set after timeout
return None
# We have a local address!
return self._local_addr | python | def get_local_addr(self, timeout=None):
"""
Retrieve the current local address.
:param timeout: If not given or given as ``None``, waits until
the local address is available. Otherwise,
waits for as long as specified. If the local
address is not set by the time the timeout
expires, returns ``None``.
"""
# If we're not running, just return None
if not self.running:
return None
# OK, we're running; wait on the _local_addr_event
if not self._local_addr_event.wait(timeout):
# Still not set after timeout
return None
# We have a local address!
return self._local_addr | ['def', 'get_local_addr', '(', 'self', ',', 'timeout', '=', 'None', ')', ':', "# If we're not running, just return None", 'if', 'not', 'self', '.', 'running', ':', 'return', 'None', "# OK, we're running; wait on the _local_addr_event", 'if', 'not', 'self', '.', '_local_addr_event', '.', 'wait', '(', 'timeout', ')', ':', '# Still not set after timeout', 'return', 'None', '# We have a local address!', 'return', 'self', '.', '_local_addr'] | Retrieve the current local address.
:param timeout: If not given or given as ``None``, waits until
the local address is available. Otherwise,
waits for as long as specified. If the local
address is not set by the time the timeout
expires, returns ``None``. | ['Retrieve', 'the', 'current', 'local', 'address', '.'] | train | https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/manager.py#L291-L312 |
2,874 | secdev/scapy | scapy/contrib/http2.py | AbstractUVarIntField.addfield | def addfield(self, pkt, s, val):
# type: (Optional[packet.Packet], Union[str, Tuple[str, int, int]], int) -> str # noqa: E501
""" An AbstractUVarIntField prefix always consumes the remaining bits
of a BitField;if no current BitField is in use (no tuple in
entry) then the prefix length is 8 bits and the whole byte is to
be consumed
@param packet.Packet|None pkt: the packet containing this field. Probably unused. # noqa: E501
@param str|(str, int, long) s: the string to append this field to. A tuple indicates that some bits were already # noqa: E501
generated by another bitfield-compatible field. This MUST be the case if "size" is not 8. The int is the # noqa: E501
number of bits already generated in the first byte of the str. The long is the value that was generated by the # noqa: E501
previous bitfield-compatible fields.
@param int val: the positive or null value to be added.
@return str: s concatenated with the machine representation of this field. # noqa: E501
@raise AssertionError
"""
assert(val >= 0)
if isinstance(s, bytes):
assert self.size == 8, 'EINVAL: s: tuple expected when prefix_len is not a full byte' # noqa: E501
return s + self.i2m(pkt, val)
# s is a tuple
# assert(s[1] >= 0)
# assert(s[2] >= 0)
# assert (8 - s[1]) == self.size, 'EINVAL: s: not enough bits remaining in current byte to read the prefix' # noqa: E501
if val >= self._max_value:
return s[0] + chb((s[2] << self.size) + self._max_value) + self.i2m(pkt, val)[1:] # noqa: E501
# This AbstractUVarIntField is only one byte long; setting the prefix value # noqa: E501
# and appending the resulting byte to the string
return s[0] + chb((s[2] << self.size) + orb(self.i2m(pkt, val))) | python | def addfield(self, pkt, s, val):
# type: (Optional[packet.Packet], Union[str, Tuple[str, int, int]], int) -> str # noqa: E501
""" An AbstractUVarIntField prefix always consumes the remaining bits
of a BitField;if no current BitField is in use (no tuple in
entry) then the prefix length is 8 bits and the whole byte is to
be consumed
@param packet.Packet|None pkt: the packet containing this field. Probably unused. # noqa: E501
@param str|(str, int, long) s: the string to append this field to. A tuple indicates that some bits were already # noqa: E501
generated by another bitfield-compatible field. This MUST be the case if "size" is not 8. The int is the # noqa: E501
number of bits already generated in the first byte of the str. The long is the value that was generated by the # noqa: E501
previous bitfield-compatible fields.
@param int val: the positive or null value to be added.
@return str: s concatenated with the machine representation of this field. # noqa: E501
@raise AssertionError
"""
assert(val >= 0)
if isinstance(s, bytes):
assert self.size == 8, 'EINVAL: s: tuple expected when prefix_len is not a full byte' # noqa: E501
return s + self.i2m(pkt, val)
# s is a tuple
# assert(s[1] >= 0)
# assert(s[2] >= 0)
# assert (8 - s[1]) == self.size, 'EINVAL: s: not enough bits remaining in current byte to read the prefix' # noqa: E501
if val >= self._max_value:
return s[0] + chb((s[2] << self.size) + self._max_value) + self.i2m(pkt, val)[1:] # noqa: E501
# This AbstractUVarIntField is only one byte long; setting the prefix value # noqa: E501
# and appending the resulting byte to the string
return s[0] + chb((s[2] << self.size) + orb(self.i2m(pkt, val))) | ['def', 'addfield', '(', 'self', ',', 'pkt', ',', 's', ',', 'val', ')', ':', '# type: (Optional[packet.Packet], Union[str, Tuple[str, int, int]], int) -> str # noqa: E501', 'assert', '(', 'val', '>=', '0', ')', 'if', 'isinstance', '(', 's', ',', 'bytes', ')', ':', 'assert', 'self', '.', 'size', '==', '8', ',', "'EINVAL: s: tuple expected when prefix_len is not a full byte'", '# noqa: E501', 'return', 's', '+', 'self', '.', 'i2m', '(', 'pkt', ',', 'val', ')', '# s is a tuple', '# assert(s[1] >= 0)', '# assert(s[2] >= 0)', "# assert (8 - s[1]) == self.size, 'EINVAL: s: not enough bits remaining in current byte to read the prefix' # noqa: E501", 'if', 'val', '>=', 'self', '.', '_max_value', ':', 'return', 's', '[', '0', ']', '+', 'chb', '(', '(', 's', '[', '2', ']', '<<', 'self', '.', 'size', ')', '+', 'self', '.', '_max_value', ')', '+', 'self', '.', 'i2m', '(', 'pkt', ',', 'val', ')', '[', '1', ':', ']', '# noqa: E501', '# This AbstractUVarIntField is only one byte long; setting the prefix value # noqa: E501', '# and appending the resulting byte to the string', 'return', 's', '[', '0', ']', '+', 'chb', '(', '(', 's', '[', '2', ']', '<<', 'self', '.', 'size', ')', '+', 'orb', '(', 'self', '.', 'i2m', '(', 'pkt', ',', 'val', ')', ')', ')'] | An AbstractUVarIntField prefix always consumes the remaining bits
of a BitField;if no current BitField is in use (no tuple in
entry) then the prefix length is 8 bits and the whole byte is to
be consumed
@param packet.Packet|None pkt: the packet containing this field. Probably unused. # noqa: E501
@param str|(str, int, long) s: the string to append this field to. A tuple indicates that some bits were already # noqa: E501
generated by another bitfield-compatible field. This MUST be the case if "size" is not 8. The int is the # noqa: E501
number of bits already generated in the first byte of the str. The long is the value that was generated by the # noqa: E501
previous bitfield-compatible fields.
@param int val: the positive or null value to be added.
@return str: s concatenated with the machine representation of this field. # noqa: E501
@raise AssertionError | ['An', 'AbstractUVarIntField', 'prefix', 'always', 'consumes', 'the', 'remaining', 'bits', 'of', 'a', 'BitField', ';', 'if', 'no', 'current', 'BitField', 'is', 'in', 'use', '(', 'no', 'tuple', 'in', 'entry', ')', 'then', 'the', 'prefix', 'length', 'is', '8', 'bits', 'and', 'the', 'whole', 'byte', 'is', 'to', 'be', 'consumed'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/http2.py#L368-L397 |
2,875 | txomon/abot | abot/cli.py | AsyncCommandMixin.invoke | def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
if self.callback is not None:
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.async_invoke(ctx)) | python | def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
if self.callback is not None:
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.async_invoke(ctx)) | ['def', 'invoke', '(', 'self', ',', 'ctx', ')', ':', 'if', 'self', '.', 'callback', 'is', 'not', 'None', ':', 'loop', '=', 'asyncio', '.', 'get_event_loop', '(', ')', 'return', 'loop', '.', 'run_until_complete', '(', 'self', '.', 'async_invoke', '(', 'ctx', ')', ')'] | Given a context, this invokes the attached callback (if it exists)
in the right way. | ['Given', 'a', 'context', 'this', 'invokes', 'the', 'attached', 'callback', '(', 'if', 'it', 'exists', ')', 'in', 'the', 'right', 'way', '.'] | train | https://github.com/txomon/abot/blob/3ac23c6d14965d4608ed13c284ae1a886b462252/abot/cli.py#L43-L49 |
2,876 | bsolomon1124/pyfinance | pyfinance/options.py | BSM.value | def value(self):
"""Compute option value according to BSM model."""
return self._sign[1] * self.S0 * norm.cdf(
self._sign[1] * self.d1, 0.0, 1.0
) - self._sign[1] * self.K * np.exp(-self.r * self.T) * norm.cdf(
self._sign[1] * self.d2, 0.0, 1.0
) | python | def value(self):
"""Compute option value according to BSM model."""
return self._sign[1] * self.S0 * norm.cdf(
self._sign[1] * self.d1, 0.0, 1.0
) - self._sign[1] * self.K * np.exp(-self.r * self.T) * norm.cdf(
self._sign[1] * self.d2, 0.0, 1.0
) | ['def', 'value', '(', 'self', ')', ':', 'return', 'self', '.', '_sign', '[', '1', ']', '*', 'self', '.', 'S0', '*', 'norm', '.', 'cdf', '(', 'self', '.', '_sign', '[', '1', ']', '*', 'self', '.', 'd1', ',', '0.0', ',', '1.0', ')', '-', 'self', '.', '_sign', '[', '1', ']', '*', 'self', '.', 'K', '*', 'np', '.', 'exp', '(', '-', 'self', '.', 'r', '*', 'self', '.', 'T', ')', '*', 'norm', '.', 'cdf', '(', 'self', '.', '_sign', '[', '1', ']', '*', 'self', '.', 'd2', ',', '0.0', ',', '1.0', ')'] | Compute option value according to BSM model. | ['Compute', 'option', 'value', 'according', 'to', 'BSM', 'model', '.'] | train | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/options.py#L172-L178 |
2,877 | fermiPy/fermipy | fermipy/skymap.py | Map.sum_over_energy | def sum_over_energy(self):
""" Reduce a 3D counts cube to a 2D counts map
"""
# Note that the array is using the opposite convention from WCS
# so we sum over axis 0 in the array, but drop axis 2 in the WCS object
return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2)) | python | def sum_over_energy(self):
""" Reduce a 3D counts cube to a 2D counts map
"""
# Note that the array is using the opposite convention from WCS
# so we sum over axis 0 in the array, but drop axis 2 in the WCS object
return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2)) | ['def', 'sum_over_energy', '(', 'self', ')', ':', '# Note that the array is using the opposite convention from WCS', '# so we sum over axis 0 in the array, but drop axis 2 in the WCS object', 'return', 'Map', '(', 'np', '.', 'sum', '(', 'self', '.', 'counts', ',', 'axis', '=', '0', ')', ',', 'self', '.', 'wcs', '.', 'dropaxis', '(', '2', ')', ')'] | Reduce a 3D counts cube to a 2D counts map | ['Reduce', 'a', '3D', 'counts', 'cube', 'to', 'a', '2D', 'counts', 'map'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/skymap.py#L252-L257 |
2,878 | ask/carrot | carrot/messaging.py | Publisher.declare | def declare(self):
"""Declare the exchange.
Creates the exchange on the broker.
"""
self.backend.exchange_declare(exchange=self.exchange,
type=self.exchange_type,
durable=self.durable,
auto_delete=self.auto_delete) | python | def declare(self):
"""Declare the exchange.
Creates the exchange on the broker.
"""
self.backend.exchange_declare(exchange=self.exchange,
type=self.exchange_type,
durable=self.durable,
auto_delete=self.auto_delete) | ['def', 'declare', '(', 'self', ')', ':', 'self', '.', 'backend', '.', 'exchange_declare', '(', 'exchange', '=', 'self', '.', 'exchange', ',', 'type', '=', 'self', '.', 'exchange_type', ',', 'durable', '=', 'self', '.', 'durable', ',', 'auto_delete', '=', 'self', '.', 'auto_delete', ')'] | Declare the exchange.
Creates the exchange on the broker. | ['Declare', 'the', 'exchange', '.'] | train | https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/messaging.py#L663-L672 |
2,879 | kiliankoe/dvbpy | dvb/dvb.py | pins | def pins(swlat, swlng, nelat, nelng, pintypes='stop', *, raw=False):
"""
DVB Map Pins
(GET https://www.dvb.de/apps/map/pins)
:param swlat: South-West Bounding Box Latitude
:param swlng: South-West Bounding Box Longitude
:param nelat: North-East Bounding Box Latitude
:param nelng: North-East Bounding Box Longitude
:param pintypes: Types to search for, defaults to 'stop'
:param raw: Return raw response
:return:
"""
try:
swlat, swlng = wgs_to_gk4(swlat, swlng)
nelat, nelng = wgs_to_gk4(nelat, nelng)
r = requests.get(
url='https://www.dvb.de/apps/map/pins',
params={
'showlines': 'true',
'swlat': swlat,
'swlng': swlng,
'nelat': nelat,
'nelng': nelng,
'pintypes': pintypes,
},
)
if r.status_code == 200:
response = json.loads(r.content.decode('utf-8'))
else:
raise requests.HTTPError('HTTP Status: {}'.format(r.status_code))
except requests.RequestException as e:
print('Failed to access DVB map pins app. Request Exception', e)
response = None
if response is None:
return None
return response if raw else [pins_return_results(line, pintypes) for line in response] | python | def pins(swlat, swlng, nelat, nelng, pintypes='stop', *, raw=False):
"""
DVB Map Pins
(GET https://www.dvb.de/apps/map/pins)
:param swlat: South-West Bounding Box Latitude
:param swlng: South-West Bounding Box Longitude
:param nelat: North-East Bounding Box Latitude
:param nelng: North-East Bounding Box Longitude
:param pintypes: Types to search for, defaults to 'stop'
:param raw: Return raw response
:return:
"""
try:
swlat, swlng = wgs_to_gk4(swlat, swlng)
nelat, nelng = wgs_to_gk4(nelat, nelng)
r = requests.get(
url='https://www.dvb.de/apps/map/pins',
params={
'showlines': 'true',
'swlat': swlat,
'swlng': swlng,
'nelat': nelat,
'nelng': nelng,
'pintypes': pintypes,
},
)
if r.status_code == 200:
response = json.loads(r.content.decode('utf-8'))
else:
raise requests.HTTPError('HTTP Status: {}'.format(r.status_code))
except requests.RequestException as e:
print('Failed to access DVB map pins app. Request Exception', e)
response = None
if response is None:
return None
return response if raw else [pins_return_results(line, pintypes) for line in response] | ['def', 'pins', '(', 'swlat', ',', 'swlng', ',', 'nelat', ',', 'nelng', ',', 'pintypes', '=', "'stop'", ',', '*', ',', 'raw', '=', 'False', ')', ':', 'try', ':', 'swlat', ',', 'swlng', '=', 'wgs_to_gk4', '(', 'swlat', ',', 'swlng', ')', 'nelat', ',', 'nelng', '=', 'wgs_to_gk4', '(', 'nelat', ',', 'nelng', ')', 'r', '=', 'requests', '.', 'get', '(', 'url', '=', "'https://www.dvb.de/apps/map/pins'", ',', 'params', '=', '{', "'showlines'", ':', "'true'", ',', "'swlat'", ':', 'swlat', ',', "'swlng'", ':', 'swlng', ',', "'nelat'", ':', 'nelat', ',', "'nelng'", ':', 'nelng', ',', "'pintypes'", ':', 'pintypes', ',', '}', ',', ')', 'if', 'r', '.', 'status_code', '==', '200', ':', 'response', '=', 'json', '.', 'loads', '(', 'r', '.', 'content', '.', 'decode', '(', "'utf-8'", ')', ')', 'else', ':', 'raise', 'requests', '.', 'HTTPError', '(', "'HTTP Status: {}'", '.', 'format', '(', 'r', '.', 'status_code', ')', ')', 'except', 'requests', '.', 'RequestException', 'as', 'e', ':', 'print', '(', "'Failed to access DVB map pins app. Request Exception'", ',', 'e', ')', 'response', '=', 'None', 'if', 'response', 'is', 'None', ':', 'return', 'None', 'return', 'response', 'if', 'raw', 'else', '[', 'pins_return_results', '(', 'line', ',', 'pintypes', ')', 'for', 'line', 'in', 'response', ']'] | DVB Map Pins
(GET https://www.dvb.de/apps/map/pins)
:param swlat: South-West Bounding Box Latitude
:param swlng: South-West Bounding Box Longitude
:param nelat: North-East Bounding Box Latitude
:param nelng: North-East Bounding Box Longitude
:param pintypes: Types to search for, defaults to 'stop'
:param raw: Return raw response
:return: | ['DVB', 'Map', 'Pins', '(', 'GET', 'https', ':', '//', 'www', '.', 'dvb', '.', 'de', '/', 'apps', '/', 'map', '/', 'pins', ')'] | train | https://github.com/kiliankoe/dvbpy/blob/d499706ae56386d680f78975d3512d56f848e9dc/dvb/dvb.py#L304-L342 |
2,880 | rtfd/readthedocs-sphinx-ext | readthedocs_ext/readthedocs.py | HtmlBuilderMixin._copy_searchtools | def _copy_searchtools(self, renderer=None):
"""Copy and patch searchtools
This uses the included Sphinx version's searchtools, but patches it to
remove automatic initialization. This is a fork of
``sphinx.util.fileutil.copy_asset``
"""
log.info(bold('copying searchtools... '), nonl=True)
if sphinx.version_info < (1, 8):
search_js_file = 'searchtools.js_t'
else:
search_js_file = 'searchtools.js'
path_src = os.path.join(
package_dir, 'themes', 'basic', 'static', search_js_file
)
if os.path.exists(path_src):
path_dest = os.path.join(self.outdir, '_static', 'searchtools.js')
if renderer is None:
# Sphinx 1.4 used the renderer from the existing builder, but
# the pattern for Sphinx 1.5 is to pass in a renderer separate
# from the builder. This supports both patterns for future
# compatibility
if sphinx.version_info < (1, 5):
renderer = self.templates
else:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
with codecs.open(path_src, 'r', encoding='utf-8') as h_src:
with codecs.open(path_dest, 'w', encoding='utf-8') as h_dest:
data = h_src.read()
data = self.REPLACEMENT_PATTERN.sub(self.REPLACEMENT_TEXT, data)
h_dest.write(renderer.render_string(
data,
self.get_static_readthedocs_context()
))
else:
log.warning('Missing {}'.format(search_js_file))
log.info('done') | python | def _copy_searchtools(self, renderer=None):
"""Copy and patch searchtools
This uses the included Sphinx version's searchtools, but patches it to
remove automatic initialization. This is a fork of
``sphinx.util.fileutil.copy_asset``
"""
log.info(bold('copying searchtools... '), nonl=True)
if sphinx.version_info < (1, 8):
search_js_file = 'searchtools.js_t'
else:
search_js_file = 'searchtools.js'
path_src = os.path.join(
package_dir, 'themes', 'basic', 'static', search_js_file
)
if os.path.exists(path_src):
path_dest = os.path.join(self.outdir, '_static', 'searchtools.js')
if renderer is None:
# Sphinx 1.4 used the renderer from the existing builder, but
# the pattern for Sphinx 1.5 is to pass in a renderer separate
# from the builder. This supports both patterns for future
# compatibility
if sphinx.version_info < (1, 5):
renderer = self.templates
else:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
with codecs.open(path_src, 'r', encoding='utf-8') as h_src:
with codecs.open(path_dest, 'w', encoding='utf-8') as h_dest:
data = h_src.read()
data = self.REPLACEMENT_PATTERN.sub(self.REPLACEMENT_TEXT, data)
h_dest.write(renderer.render_string(
data,
self.get_static_readthedocs_context()
))
else:
log.warning('Missing {}'.format(search_js_file))
log.info('done') | ['def', '_copy_searchtools', '(', 'self', ',', 'renderer', '=', 'None', ')', ':', 'log', '.', 'info', '(', 'bold', '(', "'copying searchtools... '", ')', ',', 'nonl', '=', 'True', ')', 'if', 'sphinx', '.', 'version_info', '<', '(', '1', ',', '8', ')', ':', 'search_js_file', '=', "'searchtools.js_t'", 'else', ':', 'search_js_file', '=', "'searchtools.js'", 'path_src', '=', 'os', '.', 'path', '.', 'join', '(', 'package_dir', ',', "'themes'", ',', "'basic'", ',', "'static'", ',', 'search_js_file', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path_src', ')', ':', 'path_dest', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'outdir', ',', "'_static'", ',', "'searchtools.js'", ')', 'if', 'renderer', 'is', 'None', ':', '# Sphinx 1.4 used the renderer from the existing builder, but', '# the pattern for Sphinx 1.5 is to pass in a renderer separate', '# from the builder. This supports both patterns for future', '# compatibility', 'if', 'sphinx', '.', 'version_info', '<', '(', '1', ',', '5', ')', ':', 'renderer', '=', 'self', '.', 'templates', 'else', ':', 'from', 'sphinx', '.', 'util', '.', 'template', 'import', 'SphinxRenderer', 'renderer', '=', 'SphinxRenderer', '(', ')', 'with', 'codecs', '.', 'open', '(', 'path_src', ',', "'r'", ',', 'encoding', '=', "'utf-8'", ')', 'as', 'h_src', ':', 'with', 'codecs', '.', 'open', '(', 'path_dest', ',', "'w'", ',', 'encoding', '=', "'utf-8'", ')', 'as', 'h_dest', ':', 'data', '=', 'h_src', '.', 'read', '(', ')', 'data', '=', 'self', '.', 'REPLACEMENT_PATTERN', '.', 'sub', '(', 'self', '.', 'REPLACEMENT_TEXT', ',', 'data', ')', 'h_dest', '.', 'write', '(', 'renderer', '.', 'render_string', '(', 'data', ',', 'self', '.', 'get_static_readthedocs_context', '(', ')', ')', ')', 'else', ':', 'log', '.', 'warning', '(', "'Missing {}'", '.', 'format', '(', 'search_js_file', ')', ')', 'log', '.', 'info', '(', "'done'", ')'] | Copy and patch searchtools
This uses the included Sphinx version's searchtools, but patches it to
remove automatic initialization. This is a fork of
``sphinx.util.fileutil.copy_asset`` | ['Copy', 'and', 'patch', 'searchtools'] | train | https://github.com/rtfd/readthedocs-sphinx-ext/blob/f1a01c51c675d36ac365162ea06814544c2aa410/readthedocs_ext/readthedocs.py#L209-L247 |
2,881 | odlgroup/odl | odl/space/npy_tensors.py | _lincomb_impl | def _lincomb_impl(a, x1, b, x2, out):
"""Optimized implementation of ``out[:] = a * x1 + b * x2``."""
# Lazy import to improve `import odl` time
import scipy.linalg
size = native(x1.size)
if size < THRESHOLD_SMALL:
# Faster for small arrays
out.data[:] = a * x1.data + b * x2.data
return
elif (size < THRESHOLD_MEDIUM or
not _blas_is_applicable(x1.data, x2.data, out.data)):
def fallback_axpy(x1, x2, n, a):
"""Fallback axpy implementation avoiding copy."""
if a != 0:
x2 /= a
x2 += x1
x2 *= a
return x2
def fallback_scal(a, x, n):
"""Fallback scal implementation."""
x *= a
return x
def fallback_copy(x1, x2, n):
"""Fallback copy implementation."""
x2[...] = x1[...]
return x2
axpy, scal, copy = (fallback_axpy, fallback_scal, fallback_copy)
x1_arr = x1.data
x2_arr = x2.data
out_arr = out.data
else:
# Need flat data for BLAS, otherwise in-place does not work.
# Raveling must happen in fixed order for non-contiguous out,
# otherwise 'A' is applied to arrays, which makes the outcome
# dependent on their respective contiguousness.
if out.data.flags.f_contiguous:
ravel_order = 'F'
else:
ravel_order = 'C'
x1_arr = x1.data.ravel(order=ravel_order)
x2_arr = x2.data.ravel(order=ravel_order)
out_arr = out.data.ravel(order=ravel_order)
axpy, scal, copy = scipy.linalg.blas.get_blas_funcs(
['axpy', 'scal', 'copy'], arrays=(x1_arr, x2_arr, out_arr))
if x1 is x2 and b != 0:
# x1 is aligned with x2 -> out = (a+b)*x1
_lincomb_impl(a + b, x1, 0, x1, out)
elif out is x1 and out is x2:
# All the vectors are aligned -> out = (a+b)*out
if (a + b) != 0:
scal(a + b, out_arr, size)
else:
out_arr[:] = 0
elif out is x1:
# out is aligned with x1 -> out = a*out + b*x2
if a != 1:
scal(a, out_arr, size)
if b != 0:
axpy(x2_arr, out_arr, size, b)
elif out is x2:
# out is aligned with x2 -> out = a*x1 + b*out
if b != 1:
scal(b, out_arr, size)
if a != 0:
axpy(x1_arr, out_arr, size, a)
else:
# We have exhausted all alignment options, so x1 is not x2 is not out
# We now optimize for various values of a and b
if b == 0:
if a == 0: # Zero assignment -> out = 0
out_arr[:] = 0
else: # Scaled copy -> out = a*x1
copy(x1_arr, out_arr, size)
if a != 1:
scal(a, out_arr, size)
else: # b != 0
if a == 0: # Scaled copy -> out = b*x2
copy(x2_arr, out_arr, size)
if b != 1:
scal(b, out_arr, size)
elif a == 1: # No scaling in x1 -> out = x1 + b*x2
copy(x1_arr, out_arr, size)
axpy(x2_arr, out_arr, size, b)
else: # Generic case -> out = a*x1 + b*x2
copy(x2_arr, out_arr, size)
if b != 1:
scal(b, out_arr, size)
axpy(x1_arr, out_arr, size, a) | python | def _lincomb_impl(a, x1, b, x2, out):
"""Optimized implementation of ``out[:] = a * x1 + b * x2``."""
# Lazy import to improve `import odl` time
import scipy.linalg
size = native(x1.size)
if size < THRESHOLD_SMALL:
# Faster for small arrays
out.data[:] = a * x1.data + b * x2.data
return
elif (size < THRESHOLD_MEDIUM or
not _blas_is_applicable(x1.data, x2.data, out.data)):
def fallback_axpy(x1, x2, n, a):
"""Fallback axpy implementation avoiding copy."""
if a != 0:
x2 /= a
x2 += x1
x2 *= a
return x2
def fallback_scal(a, x, n):
"""Fallback scal implementation."""
x *= a
return x
def fallback_copy(x1, x2, n):
"""Fallback copy implementation."""
x2[...] = x1[...]
return x2
axpy, scal, copy = (fallback_axpy, fallback_scal, fallback_copy)
x1_arr = x1.data
x2_arr = x2.data
out_arr = out.data
else:
# Need flat data for BLAS, otherwise in-place does not work.
# Raveling must happen in fixed order for non-contiguous out,
# otherwise 'A' is applied to arrays, which makes the outcome
# dependent on their respective contiguousness.
if out.data.flags.f_contiguous:
ravel_order = 'F'
else:
ravel_order = 'C'
x1_arr = x1.data.ravel(order=ravel_order)
x2_arr = x2.data.ravel(order=ravel_order)
out_arr = out.data.ravel(order=ravel_order)
axpy, scal, copy = scipy.linalg.blas.get_blas_funcs(
['axpy', 'scal', 'copy'], arrays=(x1_arr, x2_arr, out_arr))
if x1 is x2 and b != 0:
# x1 is aligned with x2 -> out = (a+b)*x1
_lincomb_impl(a + b, x1, 0, x1, out)
elif out is x1 and out is x2:
# All the vectors are aligned -> out = (a+b)*out
if (a + b) != 0:
scal(a + b, out_arr, size)
else:
out_arr[:] = 0
elif out is x1:
# out is aligned with x1 -> out = a*out + b*x2
if a != 1:
scal(a, out_arr, size)
if b != 0:
axpy(x2_arr, out_arr, size, b)
elif out is x2:
# out is aligned with x2 -> out = a*x1 + b*out
if b != 1:
scal(b, out_arr, size)
if a != 0:
axpy(x1_arr, out_arr, size, a)
else:
# We have exhausted all alignment options, so x1 is not x2 is not out
# We now optimize for various values of a and b
if b == 0:
if a == 0: # Zero assignment -> out = 0
out_arr[:] = 0
else: # Scaled copy -> out = a*x1
copy(x1_arr, out_arr, size)
if a != 1:
scal(a, out_arr, size)
else: # b != 0
if a == 0: # Scaled copy -> out = b*x2
copy(x2_arr, out_arr, size)
if b != 1:
scal(b, out_arr, size)
elif a == 1: # No scaling in x1 -> out = x1 + b*x2
copy(x1_arr, out_arr, size)
axpy(x2_arr, out_arr, size, b)
else: # Generic case -> out = a*x1 + b*x2
copy(x2_arr, out_arr, size)
if b != 1:
scal(b, out_arr, size)
axpy(x1_arr, out_arr, size, a) | ['def', '_lincomb_impl', '(', 'a', ',', 'x1', ',', 'b', ',', 'x2', ',', 'out', ')', ':', '# Lazy import to improve `import odl` time', 'import', 'scipy', '.', 'linalg', 'size', '=', 'native', '(', 'x1', '.', 'size', ')', 'if', 'size', '<', 'THRESHOLD_SMALL', ':', '# Faster for small arrays', 'out', '.', 'data', '[', ':', ']', '=', 'a', '*', 'x1', '.', 'data', '+', 'b', '*', 'x2', '.', 'data', 'return', 'elif', '(', 'size', '<', 'THRESHOLD_MEDIUM', 'or', 'not', '_blas_is_applicable', '(', 'x1', '.', 'data', ',', 'x2', '.', 'data', ',', 'out', '.', 'data', ')', ')', ':', 'def', 'fallback_axpy', '(', 'x1', ',', 'x2', ',', 'n', ',', 'a', ')', ':', '"""Fallback axpy implementation avoiding copy."""', 'if', 'a', '!=', '0', ':', 'x2', '/=', 'a', 'x2', '+=', 'x1', 'x2', '*=', 'a', 'return', 'x2', 'def', 'fallback_scal', '(', 'a', ',', 'x', ',', 'n', ')', ':', '"""Fallback scal implementation."""', 'x', '*=', 'a', 'return', 'x', 'def', 'fallback_copy', '(', 'x1', ',', 'x2', ',', 'n', ')', ':', '"""Fallback copy implementation."""', 'x2', '[', '...', ']', '=', 'x1', '[', '...', ']', 'return', 'x2', 'axpy', ',', 'scal', ',', 'copy', '=', '(', 'fallback_axpy', ',', 'fallback_scal', ',', 'fallback_copy', ')', 'x1_arr', '=', 'x1', '.', 'data', 'x2_arr', '=', 'x2', '.', 'data', 'out_arr', '=', 'out', '.', 'data', 'else', ':', '# Need flat data for BLAS, otherwise in-place does not work.', '# Raveling must happen in fixed order for non-contiguous out,', "# otherwise 'A' is applied to arrays, which makes the outcome", '# dependent on their respective contiguousness.', 'if', 'out', '.', 'data', '.', 'flags', '.', 'f_contiguous', ':', 'ravel_order', '=', "'F'", 'else', ':', 'ravel_order', '=', "'C'", 'x1_arr', '=', 'x1', '.', 'data', '.', 'ravel', '(', 'order', '=', 'ravel_order', ')', 'x2_arr', '=', 'x2', '.', 'data', '.', 'ravel', '(', 'order', '=', 'ravel_order', ')', 'out_arr', '=', 'out', '.', 'data', '.', 'ravel', '(', 'order', '=', 'ravel_order', ')', 'axpy', ',', 'scal', ',', 'copy', '=', 'scipy', '.', 'linalg', '.', 'blas', '.', 'get_blas_funcs', '(', '[', "'axpy'", ',', "'scal'", ',', "'copy'", ']', ',', 'arrays', '=', '(', 'x1_arr', ',', 'x2_arr', ',', 'out_arr', ')', ')', 'if', 'x1', 'is', 'x2', 'and', 'b', '!=', '0', ':', '# x1 is aligned with x2 -> out = (a+b)*x1', '_lincomb_impl', '(', 'a', '+', 'b', ',', 'x1', ',', '0', ',', 'x1', ',', 'out', ')', 'elif', 'out', 'is', 'x1', 'and', 'out', 'is', 'x2', ':', '# All the vectors are aligned -> out = (a+b)*out', 'if', '(', 'a', '+', 'b', ')', '!=', '0', ':', 'scal', '(', 'a', '+', 'b', ',', 'out_arr', ',', 'size', ')', 'else', ':', 'out_arr', '[', ':', ']', '=', '0', 'elif', 'out', 'is', 'x1', ':', '# out is aligned with x1 -> out = a*out + b*x2', 'if', 'a', '!=', '1', ':', 'scal', '(', 'a', ',', 'out_arr', ',', 'size', ')', 'if', 'b', '!=', '0', ':', 'axpy', '(', 'x2_arr', ',', 'out_arr', ',', 'size', ',', 'b', ')', 'elif', 'out', 'is', 'x2', ':', '# out is aligned with x2 -> out = a*x1 + b*out', 'if', 'b', '!=', '1', ':', 'scal', '(', 'b', ',', 'out_arr', ',', 'size', ')', 'if', 'a', '!=', '0', ':', 'axpy', '(', 'x1_arr', ',', 'out_arr', ',', 'size', ',', 'a', ')', 'else', ':', '# We have exhausted all alignment options, so x1 is not x2 is not out', '# We now optimize for various values of a and b', 'if', 'b', '==', '0', ':', 'if', 'a', '==', '0', ':', '# Zero assignment -> out = 0', 'out_arr', '[', ':', ']', '=', '0', 'else', ':', '# Scaled copy -> out = a*x1', 'copy', '(', 'x1_arr', ',', 'out_arr', ',', 'size', ')', 'if', 'a', '!=', '1', ':', 'scal', '(', 'a', ',', 'out_arr', ',', 'size', ')', 'else', ':', '# b != 0', 'if', 'a', '==', '0', ':', '# Scaled copy -> out = b*x2', 'copy', '(', 'x2_arr', ',', 'out_arr', ',', 'size', ')', 'if', 'b', '!=', '1', ':', 'scal', '(', 'b', ',', 'out_arr', ',', 'size', ')', 'elif', 'a', '==', '1', ':', '# No scaling in x1 -> out = x1 + b*x2', 'copy', '(', 'x1_arr', ',', 'out_arr', ',', 'size', ')', 'axpy', '(', 'x2_arr', ',', 'out_arr', ',', 'size', ',', 'b', ')', 'else', ':', '# Generic case -> out = a*x1 + b*x2', 'copy', '(', 'x2_arr', ',', 'out_arr', ',', 'size', ')', 'if', 'b', '!=', '1', ':', 'scal', '(', 'b', ',', 'out_arr', ',', 'size', ')', 'axpy', '(', 'x1_arr', ',', 'out_arr', ',', 'size', ',', 'a', ')'] | Optimized implementation of ``out[:] = a * x1 + b * x2``. | ['Optimized', 'implementation', 'of', 'out', '[', ':', ']', '=', 'a', '*', 'x1', '+', 'b', '*', 'x2', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/npy_tensors.py#L1802-L1901 |
2,882 | berkerpeksag/astor | astor/rtrip.py | convert | def convert(srctree, dsttree=dsttree, readonly=False, dumpall=False,
ignore_exceptions=False, fullcomp=False):
"""Walk the srctree, and convert/copy all python files
into the dsttree
"""
if fullcomp:
allow_ast_comparison()
parse_file = code_to_ast.parse_file
find_py_files = code_to_ast.find_py_files
srctree = os.path.normpath(srctree)
if not readonly:
dsttree = os.path.normpath(dsttree)
logging.info('')
logging.info('Trashing ' + dsttree)
shutil.rmtree(dsttree, True)
unknown_src_nodes = set()
unknown_dst_nodes = set()
badfiles = set()
broken = []
oldpath = None
allfiles = find_py_files(srctree, None if readonly else dsttree)
for srcpath, fname in allfiles:
# Create destination directory
if not readonly and srcpath != oldpath:
oldpath = srcpath
if srcpath >= srctree:
dstpath = srcpath.replace(srctree, dsttree, 1)
if not dstpath.startswith(dsttree):
raise ValueError("%s not a subdirectory of %s" %
(dstpath, dsttree))
else:
assert srctree.startswith(srcpath)
dstpath = dsttree
os.makedirs(dstpath)
srcfname = os.path.join(srcpath, fname)
logging.info('Converting %s' % srcfname)
try:
srcast = parse_file(srcfname)
except SyntaxError:
badfiles.add(srcfname)
continue
try:
dsttxt = to_source(srcast)
except:
if not ignore_exceptions:
raise
dsttxt = ''
if not readonly:
dstfname = os.path.join(dstpath, fname)
try:
with open(dstfname, 'wb') as f:
f.write(out_prep(dsttxt))
except UnicodeEncodeError:
badfiles.add(dstfname)
# As a sanity check, make sure that ASTs themselves
# round-trip OK
try:
dstast = ast.parse(dsttxt) if readonly else parse_file(dstfname)
except SyntaxError:
dstast = []
if fullcomp:
unknown_src_nodes.update(strip_tree(srcast))
unknown_dst_nodes.update(strip_tree(dstast))
bad = srcast != dstast
else:
bad = not fast_compare(srcast, dstast)
if dumpall or bad:
srcdump = dump_tree(srcast)
dstdump = dump_tree(dstast)
logging.warning(' calculating dump -- %s' %
('bad' if bad else 'OK'))
if bad:
broken.append(srcfname)
if dumpall or bad:
if not readonly:
try:
with open(dstfname[:-3] + '.srcdmp', 'wb') as f:
f.write(out_prep(srcdump))
except UnicodeEncodeError:
badfiles.add(dstfname[:-3] + '.srcdmp')
try:
with open(dstfname[:-3] + '.dstdmp', 'wb') as f:
f.write(out_prep(dstdump))
except UnicodeEncodeError:
badfiles.add(dstfname[:-3] + '.dstdmp')
elif dumpall:
sys.stdout.write('\n\nAST:\n\n ')
sys.stdout.write(srcdump.replace('\n', '\n '))
sys.stdout.write('\n\nDecompile:\n\n ')
sys.stdout.write(dsttxt.replace('\n', '\n '))
sys.stdout.write('\n\nNew AST:\n\n ')
sys.stdout.write('(same as old)' if dstdump == srcdump
else dstdump.replace('\n', '\n '))
sys.stdout.write('\n')
if badfiles:
logging.warning('\nFiles not processed due to syntax errors:')
for fname in sorted(badfiles):
logging.warning(' %s' % fname)
if broken:
logging.warning('\nFiles failed to round-trip to AST:')
for srcfname in broken:
logging.warning(' %s' % srcfname)
ok_to_strip = 'col_offset _precedence _use_parens lineno _p_op _pp'
ok_to_strip = set(ok_to_strip.split())
bad_nodes = (unknown_dst_nodes | unknown_src_nodes) - ok_to_strip
if bad_nodes:
logging.error('\nERROR -- UNKNOWN NODES STRIPPED: %s' % bad_nodes)
logging.info('\n')
return broken | python | def convert(srctree, dsttree=dsttree, readonly=False, dumpall=False,
ignore_exceptions=False, fullcomp=False):
"""Walk the srctree, and convert/copy all python files
into the dsttree
"""
if fullcomp:
allow_ast_comparison()
parse_file = code_to_ast.parse_file
find_py_files = code_to_ast.find_py_files
srctree = os.path.normpath(srctree)
if not readonly:
dsttree = os.path.normpath(dsttree)
logging.info('')
logging.info('Trashing ' + dsttree)
shutil.rmtree(dsttree, True)
unknown_src_nodes = set()
unknown_dst_nodes = set()
badfiles = set()
broken = []
oldpath = None
allfiles = find_py_files(srctree, None if readonly else dsttree)
for srcpath, fname in allfiles:
# Create destination directory
if not readonly and srcpath != oldpath:
oldpath = srcpath
if srcpath >= srctree:
dstpath = srcpath.replace(srctree, dsttree, 1)
if not dstpath.startswith(dsttree):
raise ValueError("%s not a subdirectory of %s" %
(dstpath, dsttree))
else:
assert srctree.startswith(srcpath)
dstpath = dsttree
os.makedirs(dstpath)
srcfname = os.path.join(srcpath, fname)
logging.info('Converting %s' % srcfname)
try:
srcast = parse_file(srcfname)
except SyntaxError:
badfiles.add(srcfname)
continue
try:
dsttxt = to_source(srcast)
except:
if not ignore_exceptions:
raise
dsttxt = ''
if not readonly:
dstfname = os.path.join(dstpath, fname)
try:
with open(dstfname, 'wb') as f:
f.write(out_prep(dsttxt))
except UnicodeEncodeError:
badfiles.add(dstfname)
# As a sanity check, make sure that ASTs themselves
# round-trip OK
try:
dstast = ast.parse(dsttxt) if readonly else parse_file(dstfname)
except SyntaxError:
dstast = []
if fullcomp:
unknown_src_nodes.update(strip_tree(srcast))
unknown_dst_nodes.update(strip_tree(dstast))
bad = srcast != dstast
else:
bad = not fast_compare(srcast, dstast)
if dumpall or bad:
srcdump = dump_tree(srcast)
dstdump = dump_tree(dstast)
logging.warning(' calculating dump -- %s' %
('bad' if bad else 'OK'))
if bad:
broken.append(srcfname)
if dumpall or bad:
if not readonly:
try:
with open(dstfname[:-3] + '.srcdmp', 'wb') as f:
f.write(out_prep(srcdump))
except UnicodeEncodeError:
badfiles.add(dstfname[:-3] + '.srcdmp')
try:
with open(dstfname[:-3] + '.dstdmp', 'wb') as f:
f.write(out_prep(dstdump))
except UnicodeEncodeError:
badfiles.add(dstfname[:-3] + '.dstdmp')
elif dumpall:
sys.stdout.write('\n\nAST:\n\n ')
sys.stdout.write(srcdump.replace('\n', '\n '))
sys.stdout.write('\n\nDecompile:\n\n ')
sys.stdout.write(dsttxt.replace('\n', '\n '))
sys.stdout.write('\n\nNew AST:\n\n ')
sys.stdout.write('(same as old)' if dstdump == srcdump
else dstdump.replace('\n', '\n '))
sys.stdout.write('\n')
if badfiles:
logging.warning('\nFiles not processed due to syntax errors:')
for fname in sorted(badfiles):
logging.warning(' %s' % fname)
if broken:
logging.warning('\nFiles failed to round-trip to AST:')
for srcfname in broken:
logging.warning(' %s' % srcfname)
ok_to_strip = 'col_offset _precedence _use_parens lineno _p_op _pp'
ok_to_strip = set(ok_to_strip.split())
bad_nodes = (unknown_dst_nodes | unknown_src_nodes) - ok_to_strip
if bad_nodes:
logging.error('\nERROR -- UNKNOWN NODES STRIPPED: %s' % bad_nodes)
logging.info('\n')
return broken | ['def', 'convert', '(', 'srctree', ',', 'dsttree', '=', 'dsttree', ',', 'readonly', '=', 'False', ',', 'dumpall', '=', 'False', ',', 'ignore_exceptions', '=', 'False', ',', 'fullcomp', '=', 'False', ')', ':', 'if', 'fullcomp', ':', 'allow_ast_comparison', '(', ')', 'parse_file', '=', 'code_to_ast', '.', 'parse_file', 'find_py_files', '=', 'code_to_ast', '.', 'find_py_files', 'srctree', '=', 'os', '.', 'path', '.', 'normpath', '(', 'srctree', ')', 'if', 'not', 'readonly', ':', 'dsttree', '=', 'os', '.', 'path', '.', 'normpath', '(', 'dsttree', ')', 'logging', '.', 'info', '(', "''", ')', 'logging', '.', 'info', '(', "'Trashing '", '+', 'dsttree', ')', 'shutil', '.', 'rmtree', '(', 'dsttree', ',', 'True', ')', 'unknown_src_nodes', '=', 'set', '(', ')', 'unknown_dst_nodes', '=', 'set', '(', ')', 'badfiles', '=', 'set', '(', ')', 'broken', '=', '[', ']', 'oldpath', '=', 'None', 'allfiles', '=', 'find_py_files', '(', 'srctree', ',', 'None', 'if', 'readonly', 'else', 'dsttree', ')', 'for', 'srcpath', ',', 'fname', 'in', 'allfiles', ':', '# Create destination directory', 'if', 'not', 'readonly', 'and', 'srcpath', '!=', 'oldpath', ':', 'oldpath', '=', 'srcpath', 'if', 'srcpath', '>=', 'srctree', ':', 'dstpath', '=', 'srcpath', '.', 'replace', '(', 'srctree', ',', 'dsttree', ',', '1', ')', 'if', 'not', 'dstpath', '.', 'startswith', '(', 'dsttree', ')', ':', 'raise', 'ValueError', '(', '"%s not a subdirectory of %s"', '%', '(', 'dstpath', ',', 'dsttree', ')', ')', 'else', ':', 'assert', 'srctree', '.', 'startswith', '(', 'srcpath', ')', 'dstpath', '=', 'dsttree', 'os', '.', 'makedirs', '(', 'dstpath', ')', 'srcfname', '=', 'os', '.', 'path', '.', 'join', '(', 'srcpath', ',', 'fname', ')', 'logging', '.', 'info', '(', "'Converting %s'", '%', 'srcfname', ')', 'try', ':', 'srcast', '=', 'parse_file', '(', 'srcfname', ')', 'except', 'SyntaxError', ':', 'badfiles', '.', 'add', '(', 'srcfname', ')', 'continue', 'try', ':', 'dsttxt', '=', 'to_source', '(', 'srcast', ')', 'except', ':', 'if', 'not', 'ignore_exceptions', ':', 'raise', 'dsttxt', '=', "''", 'if', 'not', 'readonly', ':', 'dstfname', '=', 'os', '.', 'path', '.', 'join', '(', 'dstpath', ',', 'fname', ')', 'try', ':', 'with', 'open', '(', 'dstfname', ',', "'wb'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'out_prep', '(', 'dsttxt', ')', ')', 'except', 'UnicodeEncodeError', ':', 'badfiles', '.', 'add', '(', 'dstfname', ')', '# As a sanity check, make sure that ASTs themselves', '# round-trip OK', 'try', ':', 'dstast', '=', 'ast', '.', 'parse', '(', 'dsttxt', ')', 'if', 'readonly', 'else', 'parse_file', '(', 'dstfname', ')', 'except', 'SyntaxError', ':', 'dstast', '=', '[', ']', 'if', 'fullcomp', ':', 'unknown_src_nodes', '.', 'update', '(', 'strip_tree', '(', 'srcast', ')', ')', 'unknown_dst_nodes', '.', 'update', '(', 'strip_tree', '(', 'dstast', ')', ')', 'bad', '=', 'srcast', '!=', 'dstast', 'else', ':', 'bad', '=', 'not', 'fast_compare', '(', 'srcast', ',', 'dstast', ')', 'if', 'dumpall', 'or', 'bad', ':', 'srcdump', '=', 'dump_tree', '(', 'srcast', ')', 'dstdump', '=', 'dump_tree', '(', 'dstast', ')', 'logging', '.', 'warning', '(', "' calculating dump -- %s'", '%', '(', "'bad'", 'if', 'bad', 'else', "'OK'", ')', ')', 'if', 'bad', ':', 'broken', '.', 'append', '(', 'srcfname', ')', 'if', 'dumpall', 'or', 'bad', ':', 'if', 'not', 'readonly', ':', 'try', ':', 'with', 'open', '(', 'dstfname', '[', ':', '-', '3', ']', '+', "'.srcdmp'", ',', "'wb'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'out_prep', '(', 'srcdump', ')', ')', 'except', 'UnicodeEncodeError', ':', 'badfiles', '.', 'add', '(', 'dstfname', '[', ':', '-', '3', ']', '+', "'.srcdmp'", ')', 'try', ':', 'with', 'open', '(', 'dstfname', '[', ':', '-', '3', ']', '+', "'.dstdmp'", ',', "'wb'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'out_prep', '(', 'dstdump', ')', ')', 'except', 'UnicodeEncodeError', ':', 'badfiles', '.', 'add', '(', 'dstfname', '[', ':', '-', '3', ']', '+', "'.dstdmp'", ')', 'elif', 'dumpall', ':', 'sys', '.', 'stdout', '.', 'write', '(', "'\\n\\nAST:\\n\\n '", ')', 'sys', '.', 'stdout', '.', 'write', '(', 'srcdump', '.', 'replace', '(', "'\\n'", ',', "'\\n '", ')', ')', 'sys', '.', 'stdout', '.', 'write', '(', "'\\n\\nDecompile:\\n\\n '", ')', 'sys', '.', 'stdout', '.', 'write', '(', 'dsttxt', '.', 'replace', '(', "'\\n'", ',', "'\\n '", ')', ')', 'sys', '.', 'stdout', '.', 'write', '(', "'\\n\\nNew AST:\\n\\n '", ')', 'sys', '.', 'stdout', '.', 'write', '(', "'(same as old)'", 'if', 'dstdump', '==', 'srcdump', 'else', 'dstdump', '.', 'replace', '(', "'\\n'", ',', "'\\n '", ')', ')', 'sys', '.', 'stdout', '.', 'write', '(', "'\\n'", ')', 'if', 'badfiles', ':', 'logging', '.', 'warning', '(', "'\\nFiles not processed due to syntax errors:'", ')', 'for', 'fname', 'in', 'sorted', '(', 'badfiles', ')', ':', 'logging', '.', 'warning', '(', "' %s'", '%', 'fname', ')', 'if', 'broken', ':', 'logging', '.', 'warning', '(', "'\\nFiles failed to round-trip to AST:'", ')', 'for', 'srcfname', 'in', 'broken', ':', 'logging', '.', 'warning', '(', "' %s'", '%', 'srcfname', ')', 'ok_to_strip', '=', "'col_offset _precedence _use_parens lineno _p_op _pp'", 'ok_to_strip', '=', 'set', '(', 'ok_to_strip', '.', 'split', '(', ')', ')', 'bad_nodes', '=', '(', 'unknown_dst_nodes', '|', 'unknown_src_nodes', ')', '-', 'ok_to_strip', 'if', 'bad_nodes', ':', 'logging', '.', 'error', '(', "'\\nERROR -- UNKNOWN NODES STRIPPED: %s'", '%', 'bad_nodes', ')', 'logging', '.', 'info', '(', "'\\n'", ')', 'return', 'broken'] | Walk the srctree, and convert/copy all python files
into the dsttree | ['Walk', 'the', 'srctree', 'and', 'convert', '/', 'copy', 'all', 'python', 'files', 'into', 'the', 'dsttree'] | train | https://github.com/berkerpeksag/astor/blob/d9e893eb49d9eb2e30779680f90cd632c30e0ba1/astor/rtrip.py#L32-L153 |
2,883 | saltstack/salt | salt/modules/virt.py | stop | def stop(name, **kwargs):
'''
Hard power down the virtual machine, this is equivalent to pulling the power.
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.stop <domain>
'''
conn = __get_conn(**kwargs)
ret = _get_domain(conn, name).destroy() == 0
conn.close()
return ret | python | def stop(name, **kwargs):
'''
Hard power down the virtual machine, this is equivalent to pulling the power.
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.stop <domain>
'''
conn = __get_conn(**kwargs)
ret = _get_domain(conn, name).destroy() == 0
conn.close()
return ret | ['def', 'stop', '(', 'name', ',', '*', '*', 'kwargs', ')', ':', 'conn', '=', '__get_conn', '(', '*', '*', 'kwargs', ')', 'ret', '=', '_get_domain', '(', 'conn', ',', 'name', ')', '.', 'destroy', '(', ')', '==', '0', 'conn', '.', 'close', '(', ')', 'return', 'ret'] | Hard power down the virtual machine, this is equivalent to pulling the power.
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.stop <domain> | ['Hard', 'power', 'down', 'the', 'virtual', 'machine', 'this', 'is', 'equivalent', 'to', 'pulling', 'the', 'power', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L2825-L2849 |
2,884 | humilis/humilis-lambdautils | lambdautils/state.py | _state_table_name | def _state_table_name(environment=None, layer=None, stage=None):
"""The name of the state table associated to a humilis deployment."""
if environment is None:
# For backwards compatiblity
environment = os.environ.get("HUMILIS_ENVIRONMENT")
if layer is None:
layer = os.environ.get("HUMILIS_LAYER")
if stage is None:
stage = os.environ.get("HUMILIS_STAGE")
if environment:
if stage:
return "{environment}-{layer}-{stage}-state".format(
**locals())
else:
return "{environment}-{layer}-state".format(**locals()) | python | def _state_table_name(environment=None, layer=None, stage=None):
"""The name of the state table associated to a humilis deployment."""
if environment is None:
# For backwards compatiblity
environment = os.environ.get("HUMILIS_ENVIRONMENT")
if layer is None:
layer = os.environ.get("HUMILIS_LAYER")
if stage is None:
stage = os.environ.get("HUMILIS_STAGE")
if environment:
if stage:
return "{environment}-{layer}-{stage}-state".format(
**locals())
else:
return "{environment}-{layer}-state".format(**locals()) | ['def', '_state_table_name', '(', 'environment', '=', 'None', ',', 'layer', '=', 'None', ',', 'stage', '=', 'None', ')', ':', 'if', 'environment', 'is', 'None', ':', '# For backwards compatiblity', 'environment', '=', 'os', '.', 'environ', '.', 'get', '(', '"HUMILIS_ENVIRONMENT"', ')', 'if', 'layer', 'is', 'None', ':', 'layer', '=', 'os', '.', 'environ', '.', 'get', '(', '"HUMILIS_LAYER"', ')', 'if', 'stage', 'is', 'None', ':', 'stage', '=', 'os', '.', 'environ', '.', 'get', '(', '"HUMILIS_STAGE"', ')', 'if', 'environment', ':', 'if', 'stage', ':', 'return', '"{environment}-{layer}-{stage}-state"', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', 'else', ':', 'return', '"{environment}-{layer}-state"', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')'] | The name of the state table associated to a humilis deployment. | ['The', 'name', 'of', 'the', 'state', 'table', 'associated', 'to', 'a', 'humilis', 'deployment', '.'] | train | https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L47-L63 |
2,885 | hydraplatform/hydra-base | hydra_base/lib/sharing.py | share_network | def share_network(network_id, usernames, read_only, share,**kwargs):
"""
Share a network with a list of users, identified by their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users
"""
user_id = kwargs.get('user_id')
net_i = _get_network(network_id)
net_i.check_share_permission(user_id)
if read_only == 'Y':
write = 'N'
share = 'N'
else:
write = 'Y'
if net_i.created_by != int(user_id) and share == 'Y':
raise HydraError("Cannot share the 'sharing' ability as user %s is not"
" the owner of network %s"%
(user_id, network_id))
for username in usernames:
user_i = _get_user(username)
#Set the owner ship on the network itself
net_i.set_owner(user_i.id, write=write, share=share)
for o in net_i.project.owners:
if o.user_id == user_i.id:
break
else:
#Give the user read access to the containing project
net_i.project.set_owner(user_i.id, write='N', share='N')
db.DBSession.flush() | python | def share_network(network_id, usernames, read_only, share,**kwargs):
"""
Share a network with a list of users, identified by their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users
"""
user_id = kwargs.get('user_id')
net_i = _get_network(network_id)
net_i.check_share_permission(user_id)
if read_only == 'Y':
write = 'N'
share = 'N'
else:
write = 'Y'
if net_i.created_by != int(user_id) and share == 'Y':
raise HydraError("Cannot share the 'sharing' ability as user %s is not"
" the owner of network %s"%
(user_id, network_id))
for username in usernames:
user_i = _get_user(username)
#Set the owner ship on the network itself
net_i.set_owner(user_i.id, write=write, share=share)
for o in net_i.project.owners:
if o.user_id == user_i.id:
break
else:
#Give the user read access to the containing project
net_i.project.set_owner(user_i.id, write='N', share='N')
db.DBSession.flush() | ['def', 'share_network', '(', 'network_id', ',', 'usernames', ',', 'read_only', ',', 'share', ',', '*', '*', 'kwargs', ')', ':', 'user_id', '=', 'kwargs', '.', 'get', '(', "'user_id'", ')', 'net_i', '=', '_get_network', '(', 'network_id', ')', 'net_i', '.', 'check_share_permission', '(', 'user_id', ')', 'if', 'read_only', '==', "'Y'", ':', 'write', '=', "'N'", 'share', '=', "'N'", 'else', ':', 'write', '=', "'Y'", 'if', 'net_i', '.', 'created_by', '!=', 'int', '(', 'user_id', ')', 'and', 'share', '==', "'Y'", ':', 'raise', 'HydraError', '(', '"Cannot share the \'sharing\' ability as user %s is not"', '" the owner of network %s"', '%', '(', 'user_id', ',', 'network_id', ')', ')', 'for', 'username', 'in', 'usernames', ':', 'user_i', '=', '_get_user', '(', 'username', ')', '#Set the owner ship on the network itself', 'net_i', '.', 'set_owner', '(', 'user_i', '.', 'id', ',', 'write', '=', 'write', ',', 'share', '=', 'share', ')', 'for', 'o', 'in', 'net_i', '.', 'project', '.', 'owners', ':', 'if', 'o', '.', 'user_id', '==', 'user_i', '.', 'id', ':', 'break', 'else', ':', '#Give the user read access to the containing project', 'net_i', '.', 'project', '.', 'set_owner', '(', 'user_i', '.', 'id', ',', 'write', '=', "'N'", ',', 'share', '=', "'N'", ')', 'db', '.', 'DBSession', '.', 'flush', '(', ')'] | Share a network with a list of users, identified by their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users | ['Share', 'a', 'network', 'with', 'a', 'list', 'of', 'users', 'identified', 'by', 'their', 'usernames', '.'] | train | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/sharing.py#L57-L93 |
2,886 | helixyte/everest | everest/views/base.py | WarnAndResubmitUserMessageChecker.create_307_response | def create_307_response(self):
"""
Creates a 307 "Temporary Redirect" response including a HTTP Warning
header with code 299 that contains the user message received during
processing the request.
"""
request = get_current_request()
msg_mb = UserMessageMember(self.message)
coll = request.root['_messages']
coll.add(msg_mb)
# Figure out the new location URL.
qs = self.__get_new_query_string(request.query_string,
self.message.slug)
resubmit_url = "%s?%s" % (request.path_url, qs)
headers = [('Warning', '299 %s' % self.message.text),
# ('Content-Type', cnt_type),
]
http_exc = HttpWarningResubmit(location=resubmit_url,
detail=self.message.text,
headers=headers)
return request.get_response(http_exc) | python | def create_307_response(self):
"""
Creates a 307 "Temporary Redirect" response including a HTTP Warning
header with code 299 that contains the user message received during
processing the request.
"""
request = get_current_request()
msg_mb = UserMessageMember(self.message)
coll = request.root['_messages']
coll.add(msg_mb)
# Figure out the new location URL.
qs = self.__get_new_query_string(request.query_string,
self.message.slug)
resubmit_url = "%s?%s" % (request.path_url, qs)
headers = [('Warning', '299 %s' % self.message.text),
# ('Content-Type', cnt_type),
]
http_exc = HttpWarningResubmit(location=resubmit_url,
detail=self.message.text,
headers=headers)
return request.get_response(http_exc) | ['def', 'create_307_response', '(', 'self', ')', ':', 'request', '=', 'get_current_request', '(', ')', 'msg_mb', '=', 'UserMessageMember', '(', 'self', '.', 'message', ')', 'coll', '=', 'request', '.', 'root', '[', "'_messages'", ']', 'coll', '.', 'add', '(', 'msg_mb', ')', '# Figure out the new location URL.', 'qs', '=', 'self', '.', '__get_new_query_string', '(', 'request', '.', 'query_string', ',', 'self', '.', 'message', '.', 'slug', ')', 'resubmit_url', '=', '"%s?%s"', '%', '(', 'request', '.', 'path_url', ',', 'qs', ')', 'headers', '=', '[', '(', "'Warning'", ',', "'299 %s'", '%', 'self', '.', 'message', '.', 'text', ')', ',', "# ('Content-Type', cnt_type),", ']', 'http_exc', '=', 'HttpWarningResubmit', '(', 'location', '=', 'resubmit_url', ',', 'detail', '=', 'self', '.', 'message', '.', 'text', ',', 'headers', '=', 'headers', ')', 'return', 'request', '.', 'get_response', '(', 'http_exc', ')'] | Creates a 307 "Temporary Redirect" response including a HTTP Warning
header with code 299 that contains the user message received during
processing the request. | ['Creates', 'a', '307', 'Temporary', 'Redirect', 'response', 'including', 'a', 'HTTP', 'Warning', 'header', 'with', 'code', '299', 'that', 'contains', 'the', 'user', 'message', 'received', 'during', 'processing', 'the', 'request', '.'] | train | https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/views/base.py#L540-L560 |
2,887 | openstack/networking-cisco | networking_cisco/apps/saf/agent/vdp/dfa_vdp_mgr.py | VdpMgr.vdp_vlan_change_cb | def vdp_vlan_change_cb(self, port_uuid, lvid, vdp_vlan, fail_reason):
"""Callback function for updating the VDP VLAN in DB. """
LOG.info("Vlan change CB lvid %(lvid)s VDP %(vdp)s",
{'lvid': lvid, 'vdp': vdp_vlan})
self.update_vm_result(port_uuid, constants.RESULT_SUCCESS,
lvid=lvid, vdp_vlan=vdp_vlan,
fail_reason=fail_reason) | python | def vdp_vlan_change_cb(self, port_uuid, lvid, vdp_vlan, fail_reason):
"""Callback function for updating the VDP VLAN in DB. """
LOG.info("Vlan change CB lvid %(lvid)s VDP %(vdp)s",
{'lvid': lvid, 'vdp': vdp_vlan})
self.update_vm_result(port_uuid, constants.RESULT_SUCCESS,
lvid=lvid, vdp_vlan=vdp_vlan,
fail_reason=fail_reason) | ['def', 'vdp_vlan_change_cb', '(', 'self', ',', 'port_uuid', ',', 'lvid', ',', 'vdp_vlan', ',', 'fail_reason', ')', ':', 'LOG', '.', 'info', '(', '"Vlan change CB lvid %(lvid)s VDP %(vdp)s"', ',', '{', "'lvid'", ':', 'lvid', ',', "'vdp'", ':', 'vdp_vlan', '}', ')', 'self', '.', 'update_vm_result', '(', 'port_uuid', ',', 'constants', '.', 'RESULT_SUCCESS', ',', 'lvid', '=', 'lvid', ',', 'vdp_vlan', '=', 'vdp_vlan', ',', 'fail_reason', '=', 'fail_reason', ')'] | Callback function for updating the VDP VLAN in DB. | ['Callback', 'function', 'for', 'updating', 'the', 'VDP', 'VLAN', 'in', 'DB', '.'] | train | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/vdp/dfa_vdp_mgr.py#L200-L206 |
2,888 | aboSamoor/polyglot | polyglot/mapping/base.py | VocabularyBase.sanitize_words | def sanitize_words(self, words):
"""Guarantees that all textual symbols are unicode.
Note:
We do not convert numbers, only strings to unicode.
We assume that the strings are encoded in utf-8.
"""
_words = []
for w in words:
if isinstance(w, string_types) and not isinstance(w, unicode):
_words.append(unicode(w, encoding="utf-8"))
else:
_words.append(w)
return _words | python | def sanitize_words(self, words):
"""Guarantees that all textual symbols are unicode.
Note:
We do not convert numbers, only strings to unicode.
We assume that the strings are encoded in utf-8.
"""
_words = []
for w in words:
if isinstance(w, string_types) and not isinstance(w, unicode):
_words.append(unicode(w, encoding="utf-8"))
else:
_words.append(w)
return _words | ['def', 'sanitize_words', '(', 'self', ',', 'words', ')', ':', '_words', '=', '[', ']', 'for', 'w', 'in', 'words', ':', 'if', 'isinstance', '(', 'w', ',', 'string_types', ')', 'and', 'not', 'isinstance', '(', 'w', ',', 'unicode', ')', ':', '_words', '.', 'append', '(', 'unicode', '(', 'w', ',', 'encoding', '=', '"utf-8"', ')', ')', 'else', ':', '_words', '.', 'append', '(', 'w', ')', 'return', '_words'] | Guarantees that all textual symbols are unicode.
Note:
We do not convert numbers, only strings to unicode.
We assume that the strings are encoded in utf-8. | ['Guarantees', 'that', 'all', 'textual', 'symbols', 'are', 'unicode', '.'] | train | https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/mapping/base.py#L52-L65 |
2,889 | saltstack/salt | salt/utils/win_dacl.py | set_inheritance | def set_inheritance(obj_name, enabled, obj_type='file', clear=False):
'''
Enable or disable an objects inheritance.
Args:
obj_name (str):
The name of the object
enabled (bool):
True to enable inheritance, False to disable
obj_type (Optional[str]):
The type of object. Only three objects allow inheritance. Valid
objects are:
- file (default): This is a file or directory
- registry
- registry32 (for WOW64)
clear (Optional[bool]):
True to clear existing ACEs, False to keep existing ACEs.
Default is False
Returns:
bool: True if successful, otherwise an Error
Usage:
.. code-block:: python
salt.utils.win_dacl.set_inheritance('C:\\Temp', False)
'''
if obj_type not in ['file', 'registry', 'registry32']:
raise SaltInvocationError(
'obj_type called with incorrect parameter: {0}'.format(obj_name))
if clear:
obj_dacl = dacl(obj_type=obj_type)
else:
obj_dacl = dacl(obj_name, obj_type)
return obj_dacl.save(obj_name, not enabled) | python | def set_inheritance(obj_name, enabled, obj_type='file', clear=False):
'''
Enable or disable an objects inheritance.
Args:
obj_name (str):
The name of the object
enabled (bool):
True to enable inheritance, False to disable
obj_type (Optional[str]):
The type of object. Only three objects allow inheritance. Valid
objects are:
- file (default): This is a file or directory
- registry
- registry32 (for WOW64)
clear (Optional[bool]):
True to clear existing ACEs, False to keep existing ACEs.
Default is False
Returns:
bool: True if successful, otherwise an Error
Usage:
.. code-block:: python
salt.utils.win_dacl.set_inheritance('C:\\Temp', False)
'''
if obj_type not in ['file', 'registry', 'registry32']:
raise SaltInvocationError(
'obj_type called with incorrect parameter: {0}'.format(obj_name))
if clear:
obj_dacl = dacl(obj_type=obj_type)
else:
obj_dacl = dacl(obj_name, obj_type)
return obj_dacl.save(obj_name, not enabled) | ['def', 'set_inheritance', '(', 'obj_name', ',', 'enabled', ',', 'obj_type', '=', "'file'", ',', 'clear', '=', 'False', ')', ':', 'if', 'obj_type', 'not', 'in', '[', "'file'", ',', "'registry'", ',', "'registry32'", ']', ':', 'raise', 'SaltInvocationError', '(', "'obj_type called with incorrect parameter: {0}'", '.', 'format', '(', 'obj_name', ')', ')', 'if', 'clear', ':', 'obj_dacl', '=', 'dacl', '(', 'obj_type', '=', 'obj_type', ')', 'else', ':', 'obj_dacl', '=', 'dacl', '(', 'obj_name', ',', 'obj_type', ')', 'return', 'obj_dacl', '.', 'save', '(', 'obj_name', ',', 'not', 'enabled', ')'] | Enable or disable an objects inheritance.
Args:
obj_name (str):
The name of the object
enabled (bool):
True to enable inheritance, False to disable
obj_type (Optional[str]):
The type of object. Only three objects allow inheritance. Valid
objects are:
- file (default): This is a file or directory
- registry
- registry32 (for WOW64)
clear (Optional[bool]):
True to clear existing ACEs, False to keep existing ACEs.
Default is False
Returns:
bool: True if successful, otherwise an Error
Usage:
.. code-block:: python
salt.utils.win_dacl.set_inheritance('C:\\Temp', False) | ['Enable', 'or', 'disable', 'an', 'objects', 'inheritance', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_dacl.py#L1776-L1818 |
2,890 | gsi-upm/soil | soil/environment.py | Environment.save_state | def save_state(self):
'''
:DEPRECATED:
Periodically save the state of the environment and the agents.
'''
self._save_state()
while self.peek() != simpy.core.Infinity:
delay = max(self.peek() - self.now, self.interval)
utils.logger.debug('Step: {}'.format(self.now))
ev = self.event()
ev._ok = True
# Schedule the event with minimum priority so
# that it executes before all agents
self.schedule(ev, -999, delay)
yield ev
self._save_state() | python | def save_state(self):
'''
:DEPRECATED:
Periodically save the state of the environment and the agents.
'''
self._save_state()
while self.peek() != simpy.core.Infinity:
delay = max(self.peek() - self.now, self.interval)
utils.logger.debug('Step: {}'.format(self.now))
ev = self.event()
ev._ok = True
# Schedule the event with minimum priority so
# that it executes before all agents
self.schedule(ev, -999, delay)
yield ev
self._save_state() | ['def', 'save_state', '(', 'self', ')', ':', 'self', '.', '_save_state', '(', ')', 'while', 'self', '.', 'peek', '(', ')', '!=', 'simpy', '.', 'core', '.', 'Infinity', ':', 'delay', '=', 'max', '(', 'self', '.', 'peek', '(', ')', '-', 'self', '.', 'now', ',', 'self', '.', 'interval', ')', 'utils', '.', 'logger', '.', 'debug', '(', "'Step: {}'", '.', 'format', '(', 'self', '.', 'now', ')', ')', 'ev', '=', 'self', '.', 'event', '(', ')', 'ev', '.', '_ok', '=', 'True', '# Schedule the event with minimum priority so', '# that it executes before all agents', 'self', '.', 'schedule', '(', 'ev', ',', '-', '999', ',', 'delay', ')', 'yield', 'ev', 'self', '.', '_save_state', '(', ')'] | :DEPRECATED:
Periodically save the state of the environment and the agents. | [':', 'DEPRECATED', ':', 'Periodically', 'save', 'the', 'state', 'of', 'the', 'environment', 'and', 'the', 'agents', '.'] | train | https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/environment.py#L175-L190 |
2,891 | rohankapoorcom/zm-py | zoneminder/zm.py | ZoneMinder.get_monitors | def get_monitors(self) -> List[Monitor]:
"""Get a list of Monitors from the ZoneMinder API."""
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors | python | def get_monitors(self) -> List[Monitor]:
"""Get a list of Monitors from the ZoneMinder API."""
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors | ['def', 'get_monitors', '(', 'self', ')', '->', 'List', '[', 'Monitor', ']', ':', 'raw_monitors', '=', 'self', '.', '_zm_request', '(', "'get'", ',', 'ZoneMinder', '.', 'MONITOR_URL', ')', 'if', 'not', 'raw_monitors', ':', '_LOGGER', '.', 'warning', '(', '"Could not fetch monitors from ZoneMinder"', ')', 'return', '[', ']', 'monitors', '=', '[', ']', 'for', 'raw_result', 'in', 'raw_monitors', '[', "'monitors'", ']', ':', '_LOGGER', '.', 'debug', '(', '"Initializing camera %s"', ',', 'raw_result', '[', "'Monitor'", ']', '[', "'Id'", ']', ')', 'monitors', '.', 'append', '(', 'Monitor', '(', 'self', ',', 'raw_result', ')', ')', 'return', 'monitors'] | Get a list of Monitors from the ZoneMinder API. | ['Get', 'a', 'list', 'of', 'Monitors', 'from', 'the', 'ZoneMinder', 'API', '.'] | train | https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L102-L115 |
2,892 | twilio/twilio-python | twilio/rest/authy/v1/form.py | FormContext.fetch | def fetch(self):
"""
Fetch a FormInstance
:returns: Fetched FormInstance
:rtype: twilio.rest.authy.v1.form.FormInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return FormInstance(self._version, payload, form_type=self._solution['form_type'], ) | python | def fetch(self):
"""
Fetch a FormInstance
:returns: Fetched FormInstance
:rtype: twilio.rest.authy.v1.form.FormInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return FormInstance(self._version, payload, form_type=self._solution['form_type'], ) | ['def', 'fetch', '(', 'self', ')', ':', 'params', '=', 'values', '.', 'of', '(', '{', '}', ')', 'payload', '=', 'self', '.', '_version', '.', 'fetch', '(', "'GET'", ',', 'self', '.', '_uri', ',', 'params', '=', 'params', ',', ')', 'return', 'FormInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'form_type', '=', 'self', '.', '_solution', '[', "'form_type'", ']', ',', ')'] | Fetch a FormInstance
:returns: Fetched FormInstance
:rtype: twilio.rest.authy.v1.form.FormInstance | ['Fetch', 'a', 'FormInstance'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/authy/v1/form.py#L129-L144 |
2,893 | googlefonts/glyphsLib | Lib/glyphsLib/builder/custom_params.py | GlyphsObjectProxy.set_custom_value | def set_custom_value(self, key, value):
"""Set one custom parameter with the given value.
We assume that the list of custom parameters does not already contain
the given parameter so we only append.
"""
self._owner.customParameters.append(
self._glyphs_module.GSCustomParameter(name=key, value=value)
) | python | def set_custom_value(self, key, value):
"""Set one custom parameter with the given value.
We assume that the list of custom parameters does not already contain
the given parameter so we only append.
"""
self._owner.customParameters.append(
self._glyphs_module.GSCustomParameter(name=key, value=value)
) | ['def', 'set_custom_value', '(', 'self', ',', 'key', ',', 'value', ')', ':', 'self', '.', '_owner', '.', 'customParameters', '.', 'append', '(', 'self', '.', '_glyphs_module', '.', 'GSCustomParameter', '(', 'name', '=', 'key', ',', 'value', '=', 'value', ')', ')'] | Set one custom parameter with the given value.
We assume that the list of custom parameters does not already contain
the given parameter so we only append. | ['Set', 'one', 'custom', 'parameter', 'with', 'the', 'given', 'value', '.', 'We', 'assume', 'that', 'the', 'list', 'of', 'custom', 'parameters', 'does', 'not', 'already', 'contain', 'the', 'given', 'parameter', 'so', 'we', 'only', 'append', '.'] | train | https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/custom_params.py#L118-L125 |
2,894 | onyxfish/clan | clan/report.py | ReportCommand.html | def html(self, report, f):
"""
Write report data to an HTML file.
"""
env = Environment(loader=PackageLoader('clan', 'templates'))
template = env.get_template('report.html')
context = {
'report': report,
'GLOBAL_ARGUMENTS': GLOBAL_ARGUMENTS,
'field_definitions': self.field_definitions,
'format_comma': format_comma,
'format_duration': format_duration,
'format_percent': format_percent
}
f.write(template.render(**context).encode('utf-8')) | python | def html(self, report, f):
"""
Write report data to an HTML file.
"""
env = Environment(loader=PackageLoader('clan', 'templates'))
template = env.get_template('report.html')
context = {
'report': report,
'GLOBAL_ARGUMENTS': GLOBAL_ARGUMENTS,
'field_definitions': self.field_definitions,
'format_comma': format_comma,
'format_duration': format_duration,
'format_percent': format_percent
}
f.write(template.render(**context).encode('utf-8')) | ['def', 'html', '(', 'self', ',', 'report', ',', 'f', ')', ':', 'env', '=', 'Environment', '(', 'loader', '=', 'PackageLoader', '(', "'clan'", ',', "'templates'", ')', ')', 'template', '=', 'env', '.', 'get_template', '(', "'report.html'", ')', 'context', '=', '{', "'report'", ':', 'report', ',', "'GLOBAL_ARGUMENTS'", ':', 'GLOBAL_ARGUMENTS', ',', "'field_definitions'", ':', 'self', '.', 'field_definitions', ',', "'format_comma'", ':', 'format_comma', ',', "'format_duration'", ':', 'format_duration', ',', "'format_percent'", ':', 'format_percent', '}', 'f', '.', 'write', '(', 'template', '.', 'render', '(', '*', '*', 'context', ')', '.', 'encode', '(', "'utf-8'", ')', ')'] | Write report data to an HTML file. | ['Write', 'report', 'data', 'to', 'an', 'HTML', 'file', '.'] | train | https://github.com/onyxfish/clan/blob/415ddd027ea81013f2d62d75aec6da70703df49c/clan/report.py#L294-L311 |
2,895 | pantsbuild/pants | src/python/pants/build_graph/build_graph.py | BuildGraph.get_all_derivatives | def get_all_derivatives(self, address):
"""Get all targets derived directly or indirectly from the specified target.
Note that the specified target itself is not returned.
:API: public
"""
ret = []
direct = self.get_direct_derivatives(address)
ret.extend(direct)
for t in direct:
ret.extend(self.get_all_derivatives(t.address))
return ret | python | def get_all_derivatives(self, address):
"""Get all targets derived directly or indirectly from the specified target.
Note that the specified target itself is not returned.
:API: public
"""
ret = []
direct = self.get_direct_derivatives(address)
ret.extend(direct)
for t in direct:
ret.extend(self.get_all_derivatives(t.address))
return ret | ['def', 'get_all_derivatives', '(', 'self', ',', 'address', ')', ':', 'ret', '=', '[', ']', 'direct', '=', 'self', '.', 'get_direct_derivatives', '(', 'address', ')', 'ret', '.', 'extend', '(', 'direct', ')', 'for', 't', 'in', 'direct', ':', 'ret', '.', 'extend', '(', 'self', '.', 'get_all_derivatives', '(', 't', '.', 'address', ')', ')', 'return', 'ret'] | Get all targets derived directly or indirectly from the specified target.
Note that the specified target itself is not returned.
:API: public | ['Get', 'all', 'targets', 'derived', 'directly', 'or', 'indirectly', 'from', 'the', 'specified', 'target', '.'] | train | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/build_graph.py#L217-L229 |
2,896 | Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_1/release/release_client.py | ReleaseClient.update_release | def update_release(self, release, project, release_id):
"""UpdateRelease.
[Preview API] Update a complete release object.
:param :class:`<Release> <azure.devops.v5_1.release.models.Release>` release: Release object for update.
:param str project: Project ID or project name
:param int release_id: Id of the release to update.
:rtype: :class:`<Release> <azure.devops.v5_1.release.models.Release>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
content = self._serialize.body(release, 'Release')
response = self._send(http_method='PUT',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='5.1-preview.8',
route_values=route_values,
content=content)
return self._deserialize('Release', response) | python | def update_release(self, release, project, release_id):
"""UpdateRelease.
[Preview API] Update a complete release object.
:param :class:`<Release> <azure.devops.v5_1.release.models.Release>` release: Release object for update.
:param str project: Project ID or project name
:param int release_id: Id of the release to update.
:rtype: :class:`<Release> <azure.devops.v5_1.release.models.Release>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
content = self._serialize.body(release, 'Release')
response = self._send(http_method='PUT',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='5.1-preview.8',
route_values=route_values,
content=content)
return self._deserialize('Release', response) | ['def', 'update_release', '(', 'self', ',', 'release', ',', 'project', ',', 'release_id', ')', ':', 'route_values', '=', '{', '}', 'if', 'project', 'is', 'not', 'None', ':', 'route_values', '[', "'project'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'project'", ',', 'project', ',', "'str'", ')', 'if', 'release_id', 'is', 'not', 'None', ':', 'route_values', '[', "'releaseId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'release_id'", ',', 'release_id', ',', "'int'", ')', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'release', ',', "'Release'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'PUT'", ',', 'location_id', '=', "'a166fde7-27ad-408e-ba75-703c2cc9d500'", ',', 'version', '=', "'5.1-preview.8'", ',', 'route_values', '=', 'route_values', ',', 'content', '=', 'content', ')', 'return', 'self', '.', '_deserialize', '(', "'Release'", ',', 'response', ')'] | UpdateRelease.
[Preview API] Update a complete release object.
:param :class:`<Release> <azure.devops.v5_1.release.models.Release>` release: Release object for update.
:param str project: Project ID or project name
:param int release_id: Id of the release to update.
:rtype: :class:`<Release> <azure.devops.v5_1.release.models.Release>` | ['UpdateRelease', '.', '[', 'Preview', 'API', ']', 'Update', 'a', 'complete', 'release', 'object', '.', ':', 'param', ':', 'class', ':', '<Release', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'release', '.', 'models', '.', 'Release', '>', 'release', ':', 'Release', 'object', 'for', 'update', '.', ':', 'param', 'str', 'project', ':', 'Project', 'ID', 'or', 'project', 'name', ':', 'param', 'int', 'release_id', ':', 'Id', 'of', 'the', 'release', 'to', 'update', '.', ':', 'rtype', ':', ':', 'class', ':', '<Release', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'release', '.', 'models', '.', 'Release', '>'] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/release/release_client.py#L727-L746 |
2,897 | pypa/pipenv | pipenv/vendor/distlib/_backport/tarfile.py | TarFile.gettarinfo | def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo | python | def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo | ['def', 'gettarinfo', '(', 'self', ',', 'name', '=', 'None', ',', 'arcname', '=', 'None', ',', 'fileobj', '=', 'None', ')', ':', 'self', '.', '_check', '(', '"aw"', ')', '# When fileobj is given, replace name by', "# fileobj's real name.", 'if', 'fileobj', 'is', 'not', 'None', ':', 'name', '=', 'fileobj', '.', 'name', '# Building the name of the member in the archive.', '# Backward slashes are converted to forward slashes,', '# Absolute paths are turned to relative paths.', 'if', 'arcname', 'is', 'None', ':', 'arcname', '=', 'name', 'drv', ',', 'arcname', '=', 'os', '.', 'path', '.', 'splitdrive', '(', 'arcname', ')', 'arcname', '=', 'arcname', '.', 'replace', '(', 'os', '.', 'sep', ',', '"/"', ')', 'arcname', '=', 'arcname', '.', 'lstrip', '(', '"/"', ')', '# Now, fill the TarInfo object with', '# information specific for the file.', 'tarinfo', '=', 'self', '.', 'tarinfo', '(', ')', 'tarinfo', '.', 'tarfile', '=', 'self', '# Use os.stat or os.lstat, depending on platform', '# and if symlinks shall be resolved.', 'if', 'fileobj', 'is', 'None', ':', 'if', 'hasattr', '(', 'os', ',', '"lstat"', ')', 'and', 'not', 'self', '.', 'dereference', ':', 'statres', '=', 'os', '.', 'lstat', '(', 'name', ')', 'else', ':', 'statres', '=', 'os', '.', 'stat', '(', 'name', ')', 'else', ':', 'statres', '=', 'os', '.', 'fstat', '(', 'fileobj', '.', 'fileno', '(', ')', ')', 'linkname', '=', '""', 'stmd', '=', 'statres', '.', 'st_mode', 'if', 'stat', '.', 'S_ISREG', '(', 'stmd', ')', ':', 'inode', '=', '(', 'statres', '.', 'st_ino', ',', 'statres', '.', 'st_dev', ')', 'if', 'not', 'self', '.', 'dereference', 'and', 'statres', '.', 'st_nlink', '>', '1', 'and', 'inode', 'in', 'self', '.', 'inodes', 'and', 'arcname', '!=', 'self', '.', 'inodes', '[', 'inode', ']', ':', '# Is it a hardlink to an already', '# archived file?', 'type', '=', 'LNKTYPE', 'linkname', '=', 'self', '.', 'inodes', '[', 'inode', ']', 'else', ':', '# The inode is added only if its valid.', '# For win32 it is always 0.', 'type', '=', 'REGTYPE', 'if', 'inode', '[', '0', ']', ':', 'self', '.', 'inodes', '[', 'inode', ']', '=', 'arcname', 'elif', 'stat', '.', 'S_ISDIR', '(', 'stmd', ')', ':', 'type', '=', 'DIRTYPE', 'elif', 'stat', '.', 'S_ISFIFO', '(', 'stmd', ')', ':', 'type', '=', 'FIFOTYPE', 'elif', 'stat', '.', 'S_ISLNK', '(', 'stmd', ')', ':', 'type', '=', 'SYMTYPE', 'linkname', '=', 'os', '.', 'readlink', '(', 'name', ')', 'elif', 'stat', '.', 'S_ISCHR', '(', 'stmd', ')', ':', 'type', '=', 'CHRTYPE', 'elif', 'stat', '.', 'S_ISBLK', '(', 'stmd', ')', ':', 'type', '=', 'BLKTYPE', 'else', ':', 'return', 'None', '# Fill the TarInfo object with all', '# information we can get.', 'tarinfo', '.', 'name', '=', 'arcname', 'tarinfo', '.', 'mode', '=', 'stmd', 'tarinfo', '.', 'uid', '=', 'statres', '.', 'st_uid', 'tarinfo', '.', 'gid', '=', 'statres', '.', 'st_gid', 'if', 'type', '==', 'REGTYPE', ':', 'tarinfo', '.', 'size', '=', 'statres', '.', 'st_size', 'else', ':', 'tarinfo', '.', 'size', '=', '0', 'tarinfo', '.', 'mtime', '=', 'statres', '.', 'st_mtime', 'tarinfo', '.', 'type', '=', 'type', 'tarinfo', '.', 'linkname', '=', 'linkname', 'if', 'pwd', ':', 'try', ':', 'tarinfo', '.', 'uname', '=', 'pwd', '.', 'getpwuid', '(', 'tarinfo', '.', 'uid', ')', '[', '0', ']', 'except', 'KeyError', ':', 'pass', 'if', 'grp', ':', 'try', ':', 'tarinfo', '.', 'gname', '=', 'grp', '.', 'getgrgid', '(', 'tarinfo', '.', 'gid', ')', '[', '0', ']', 'except', 'KeyError', ':', 'pass', 'if', 'type', 'in', '(', 'CHRTYPE', ',', 'BLKTYPE', ')', ':', 'if', 'hasattr', '(', 'os', ',', '"major"', ')', 'and', 'hasattr', '(', 'os', ',', '"minor"', ')', ':', 'tarinfo', '.', 'devmajor', '=', 'os', '.', 'major', '(', 'statres', '.', 'st_rdev', ')', 'tarinfo', '.', 'devminor', '=', 'os', '.', 'minor', '(', 'statres', '.', 'st_rdev', ')', 'return', 'tarinfo'] | Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive. | ['Create', 'a', 'TarInfo', 'object', 'for', 'either', 'the', 'file', 'name', 'or', 'the', 'file', 'object', 'fileobj', '(', 'using', 'os', '.', 'fstat', 'on', 'its', 'file', 'descriptor', ')', '.', 'You', 'can', 'modify', 'some', 'of', 'the', 'TarInfo', 's', 'attributes', 'before', 'you', 'add', 'it', 'using', 'addfile', '()', '.', 'If', 'given', 'arcname', 'specifies', 'an', 'alternative', 'name', 'for', 'the', 'file', 'in', 'the', 'archive', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1911-L2007 |
2,898 | nugget/python-insteonplm | insteonplm/devices/__init__.py | Device.receive_message | def receive_message(self, msg):
"""Receive a messages sent to this device."""
_LOGGER.debug('Starting Device.receive_message')
if hasattr(msg, 'isack') and msg.isack:
_LOGGER.debug('Got Message ACK')
if self._sent_msg_wait_for_directACK.get('callback') is not None:
_LOGGER.debug('Look for direct ACK')
asyncio.ensure_future(self._wait_for_direct_ACK(),
loop=self._plm.loop)
else:
_LOGGER.debug('DA queue: %s',
self._sent_msg_wait_for_directACK)
_LOGGER.debug('Message ACK with no callback')
if (hasattr(msg, 'flags') and
hasattr(msg.flags, 'isDirectACK') and
msg.flags.isDirectACK):
_LOGGER.debug('Got Direct ACK message')
if self._send_msg_lock.locked():
self._directACK_received_queue.put_nowait(msg)
else:
_LOGGER.debug('But Direct ACK not expected')
if not self._is_duplicate(msg):
callbacks = self._message_callbacks.get_callbacks_from_message(msg)
for callback in callbacks:
_LOGGER.debug('Scheduling msg callback: %s', callback)
self._plm.loop.call_soon(callback, msg)
else:
_LOGGER.debug('msg is duplicate')
_LOGGER.debug(msg)
self._last_communication_received = datetime.datetime.now()
_LOGGER.debug('Ending Device.receive_message') | python | def receive_message(self, msg):
"""Receive a messages sent to this device."""
_LOGGER.debug('Starting Device.receive_message')
if hasattr(msg, 'isack') and msg.isack:
_LOGGER.debug('Got Message ACK')
if self._sent_msg_wait_for_directACK.get('callback') is not None:
_LOGGER.debug('Look for direct ACK')
asyncio.ensure_future(self._wait_for_direct_ACK(),
loop=self._plm.loop)
else:
_LOGGER.debug('DA queue: %s',
self._sent_msg_wait_for_directACK)
_LOGGER.debug('Message ACK with no callback')
if (hasattr(msg, 'flags') and
hasattr(msg.flags, 'isDirectACK') and
msg.flags.isDirectACK):
_LOGGER.debug('Got Direct ACK message')
if self._send_msg_lock.locked():
self._directACK_received_queue.put_nowait(msg)
else:
_LOGGER.debug('But Direct ACK not expected')
if not self._is_duplicate(msg):
callbacks = self._message_callbacks.get_callbacks_from_message(msg)
for callback in callbacks:
_LOGGER.debug('Scheduling msg callback: %s', callback)
self._plm.loop.call_soon(callback, msg)
else:
_LOGGER.debug('msg is duplicate')
_LOGGER.debug(msg)
self._last_communication_received = datetime.datetime.now()
_LOGGER.debug('Ending Device.receive_message') | ['def', 'receive_message', '(', 'self', ',', 'msg', ')', ':', '_LOGGER', '.', 'debug', '(', "'Starting Device.receive_message'", ')', 'if', 'hasattr', '(', 'msg', ',', "'isack'", ')', 'and', 'msg', '.', 'isack', ':', '_LOGGER', '.', 'debug', '(', "'Got Message ACK'", ')', 'if', 'self', '.', '_sent_msg_wait_for_directACK', '.', 'get', '(', "'callback'", ')', 'is', 'not', 'None', ':', '_LOGGER', '.', 'debug', '(', "'Look for direct ACK'", ')', 'asyncio', '.', 'ensure_future', '(', 'self', '.', '_wait_for_direct_ACK', '(', ')', ',', 'loop', '=', 'self', '.', '_plm', '.', 'loop', ')', 'else', ':', '_LOGGER', '.', 'debug', '(', "'DA queue: %s'", ',', 'self', '.', '_sent_msg_wait_for_directACK', ')', '_LOGGER', '.', 'debug', '(', "'Message ACK with no callback'", ')', 'if', '(', 'hasattr', '(', 'msg', ',', "'flags'", ')', 'and', 'hasattr', '(', 'msg', '.', 'flags', ',', "'isDirectACK'", ')', 'and', 'msg', '.', 'flags', '.', 'isDirectACK', ')', ':', '_LOGGER', '.', 'debug', '(', "'Got Direct ACK message'", ')', 'if', 'self', '.', '_send_msg_lock', '.', 'locked', '(', ')', ':', 'self', '.', '_directACK_received_queue', '.', 'put_nowait', '(', 'msg', ')', 'else', ':', '_LOGGER', '.', 'debug', '(', "'But Direct ACK not expected'", ')', 'if', 'not', 'self', '.', '_is_duplicate', '(', 'msg', ')', ':', 'callbacks', '=', 'self', '.', '_message_callbacks', '.', 'get_callbacks_from_message', '(', 'msg', ')', 'for', 'callback', 'in', 'callbacks', ':', '_LOGGER', '.', 'debug', '(', "'Scheduling msg callback: %s'", ',', 'callback', ')', 'self', '.', '_plm', '.', 'loop', '.', 'call_soon', '(', 'callback', ',', 'msg', ')', 'else', ':', '_LOGGER', '.', 'debug', '(', "'msg is duplicate'", ')', '_LOGGER', '.', 'debug', '(', 'msg', ')', 'self', '.', '_last_communication_received', '=', 'datetime', '.', 'datetime', '.', 'now', '(', ')', '_LOGGER', '.', 'debug', '(', "'Ending Device.receive_message'", ')'] | Receive a messages sent to this device. | ['Receive', 'a', 'messages', 'sent', 'to', 'this', 'device', '.'] | train | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/devices/__init__.py#L666-L696 |
2,899 | nion-software/nionswift-io | nionswift_plugin/TIFF_IO/tifffile.py | olympusini_metadata | def olympusini_metadata(inistr):
"""Return OlympusSIS metadata from INI string.
No documentation is available.
"""
def keyindex(key):
# split key into name and index
index = 0
i = len(key.rstrip('0123456789'))
if i < len(key):
index = int(key[i:]) - 1
key = key[:i]
return key, index
result = {}
bands = []
zpos = None
tpos = None
for line in inistr.splitlines():
line = line.strip()
if line == '' or line[0] == ';':
continue
if line[0] == '[' and line[-1] == ']':
section_name = line[1:-1]
result[section_name] = section = {}
if section_name == 'Dimension':
result['axes'] = axes = []
result['shape'] = shape = []
elif section_name == 'ASD':
result[section_name] = []
elif section_name == 'Z':
if 'Dimension' in result:
result[section_name]['ZPos'] = zpos = []
elif section_name == 'Time':
if 'Dimension' in result:
result[section_name]['TimePos'] = tpos = []
elif section_name == 'Band':
nbands = result['Dimension']['Band']
bands = [{'LUT': []} for i in range(nbands)]
result[section_name] = bands
iband = 0
else:
key, value = line.split('=')
if value.strip() == '':
value = None
elif ',' in value:
value = tuple(astype(v) for v in value.split(','))
else:
value = astype(value)
if section_name == 'Dimension':
section[key] = value
axes.append(key)
shape.append(value)
elif section_name == 'ASD':
if key == 'Count':
result['ASD'] = [{}] * value
else:
key, index = keyindex(key)
result['ASD'][index][key] = value
elif section_name == 'Band':
if key[:3] == 'LUT':
lut = bands[iband]['LUT']
value = struct.pack('<I', value)
lut.append(
[ord(value[0:1]), ord(value[1:2]), ord(value[2:3])])
else:
key, iband = keyindex(key)
bands[iband][key] = value
elif key[:4] == 'ZPos' and zpos is not None:
zpos.append(value)
elif key[:7] == 'TimePos' and tpos is not None:
tpos.append(value)
else:
section[key] = value
if 'axes' in result:
sisaxes = {'Band': 'C'}
axes = []
shape = []
for i, x in zip(result['shape'], result['axes']):
if i > 1:
axes.append(sisaxes.get(x, x[0].upper()))
shape.append(i)
result['axes'] = ''.join(axes)
result['shape'] = tuple(shape)
try:
result['Z']['ZPos'] = numpy.array(
result['Z']['ZPos'][:result['Dimension']['Z']], 'float64')
except Exception:
pass
try:
result['Time']['TimePos'] = numpy.array(
result['Time']['TimePos'][:result['Dimension']['Time']], 'int32')
except Exception:
pass
for band in bands:
band['LUT'] = numpy.array(band['LUT'], 'uint8')
return result | python | def olympusini_metadata(inistr):
"""Return OlympusSIS metadata from INI string.
No documentation is available.
"""
def keyindex(key):
# split key into name and index
index = 0
i = len(key.rstrip('0123456789'))
if i < len(key):
index = int(key[i:]) - 1
key = key[:i]
return key, index
result = {}
bands = []
zpos = None
tpos = None
for line in inistr.splitlines():
line = line.strip()
if line == '' or line[0] == ';':
continue
if line[0] == '[' and line[-1] == ']':
section_name = line[1:-1]
result[section_name] = section = {}
if section_name == 'Dimension':
result['axes'] = axes = []
result['shape'] = shape = []
elif section_name == 'ASD':
result[section_name] = []
elif section_name == 'Z':
if 'Dimension' in result:
result[section_name]['ZPos'] = zpos = []
elif section_name == 'Time':
if 'Dimension' in result:
result[section_name]['TimePos'] = tpos = []
elif section_name == 'Band':
nbands = result['Dimension']['Band']
bands = [{'LUT': []} for i in range(nbands)]
result[section_name] = bands
iband = 0
else:
key, value = line.split('=')
if value.strip() == '':
value = None
elif ',' in value:
value = tuple(astype(v) for v in value.split(','))
else:
value = astype(value)
if section_name == 'Dimension':
section[key] = value
axes.append(key)
shape.append(value)
elif section_name == 'ASD':
if key == 'Count':
result['ASD'] = [{}] * value
else:
key, index = keyindex(key)
result['ASD'][index][key] = value
elif section_name == 'Band':
if key[:3] == 'LUT':
lut = bands[iband]['LUT']
value = struct.pack('<I', value)
lut.append(
[ord(value[0:1]), ord(value[1:2]), ord(value[2:3])])
else:
key, iband = keyindex(key)
bands[iband][key] = value
elif key[:4] == 'ZPos' and zpos is not None:
zpos.append(value)
elif key[:7] == 'TimePos' and tpos is not None:
tpos.append(value)
else:
section[key] = value
if 'axes' in result:
sisaxes = {'Band': 'C'}
axes = []
shape = []
for i, x in zip(result['shape'], result['axes']):
if i > 1:
axes.append(sisaxes.get(x, x[0].upper()))
shape.append(i)
result['axes'] = ''.join(axes)
result['shape'] = tuple(shape)
try:
result['Z']['ZPos'] = numpy.array(
result['Z']['ZPos'][:result['Dimension']['Z']], 'float64')
except Exception:
pass
try:
result['Time']['TimePos'] = numpy.array(
result['Time']['TimePos'][:result['Dimension']['Time']], 'int32')
except Exception:
pass
for band in bands:
band['LUT'] = numpy.array(band['LUT'], 'uint8')
return result | ['def', 'olympusini_metadata', '(', 'inistr', ')', ':', 'def', 'keyindex', '(', 'key', ')', ':', '# split key into name and index', 'index', '=', '0', 'i', '=', 'len', '(', 'key', '.', 'rstrip', '(', "'0123456789'", ')', ')', 'if', 'i', '<', 'len', '(', 'key', ')', ':', 'index', '=', 'int', '(', 'key', '[', 'i', ':', ']', ')', '-', '1', 'key', '=', 'key', '[', ':', 'i', ']', 'return', 'key', ',', 'index', 'result', '=', '{', '}', 'bands', '=', '[', ']', 'zpos', '=', 'None', 'tpos', '=', 'None', 'for', 'line', 'in', 'inistr', '.', 'splitlines', '(', ')', ':', 'line', '=', 'line', '.', 'strip', '(', ')', 'if', 'line', '==', "''", 'or', 'line', '[', '0', ']', '==', "';'", ':', 'continue', 'if', 'line', '[', '0', ']', '==', "'['", 'and', 'line', '[', '-', '1', ']', '==', "']'", ':', 'section_name', '=', 'line', '[', '1', ':', '-', '1', ']', 'result', '[', 'section_name', ']', '=', 'section', '=', '{', '}', 'if', 'section_name', '==', "'Dimension'", ':', 'result', '[', "'axes'", ']', '=', 'axes', '=', '[', ']', 'result', '[', "'shape'", ']', '=', 'shape', '=', '[', ']', 'elif', 'section_name', '==', "'ASD'", ':', 'result', '[', 'section_name', ']', '=', '[', ']', 'elif', 'section_name', '==', "'Z'", ':', 'if', "'Dimension'", 'in', 'result', ':', 'result', '[', 'section_name', ']', '[', "'ZPos'", ']', '=', 'zpos', '=', '[', ']', 'elif', 'section_name', '==', "'Time'", ':', 'if', "'Dimension'", 'in', 'result', ':', 'result', '[', 'section_name', ']', '[', "'TimePos'", ']', '=', 'tpos', '=', '[', ']', 'elif', 'section_name', '==', "'Band'", ':', 'nbands', '=', 'result', '[', "'Dimension'", ']', '[', "'Band'", ']', 'bands', '=', '[', '{', "'LUT'", ':', '[', ']', '}', 'for', 'i', 'in', 'range', '(', 'nbands', ')', ']', 'result', '[', 'section_name', ']', '=', 'bands', 'iband', '=', '0', 'else', ':', 'key', ',', 'value', '=', 'line', '.', 'split', '(', "'='", ')', 'if', 'value', '.', 'strip', '(', ')', '==', "''", ':', 'value', '=', 'None', 'elif', "','", 'in', 'value', ':', 'value', '=', 'tuple', '(', 'astype', '(', 'v', ')', 'for', 'v', 'in', 'value', '.', 'split', '(', "','", ')', ')', 'else', ':', 'value', '=', 'astype', '(', 'value', ')', 'if', 'section_name', '==', "'Dimension'", ':', 'section', '[', 'key', ']', '=', 'value', 'axes', '.', 'append', '(', 'key', ')', 'shape', '.', 'append', '(', 'value', ')', 'elif', 'section_name', '==', "'ASD'", ':', 'if', 'key', '==', "'Count'", ':', 'result', '[', "'ASD'", ']', '=', '[', '{', '}', ']', '*', 'value', 'else', ':', 'key', ',', 'index', '=', 'keyindex', '(', 'key', ')', 'result', '[', "'ASD'", ']', '[', 'index', ']', '[', 'key', ']', '=', 'value', 'elif', 'section_name', '==', "'Band'", ':', 'if', 'key', '[', ':', '3', ']', '==', "'LUT'", ':', 'lut', '=', 'bands', '[', 'iband', ']', '[', "'LUT'", ']', 'value', '=', 'struct', '.', 'pack', '(', "'<I'", ',', 'value', ')', 'lut', '.', 'append', '(', '[', 'ord', '(', 'value', '[', '0', ':', '1', ']', ')', ',', 'ord', '(', 'value', '[', '1', ':', '2', ']', ')', ',', 'ord', '(', 'value', '[', '2', ':', '3', ']', ')', ']', ')', 'else', ':', 'key', ',', 'iband', '=', 'keyindex', '(', 'key', ')', 'bands', '[', 'iband', ']', '[', 'key', ']', '=', 'value', 'elif', 'key', '[', ':', '4', ']', '==', "'ZPos'", 'and', 'zpos', 'is', 'not', 'None', ':', 'zpos', '.', 'append', '(', 'value', ')', 'elif', 'key', '[', ':', '7', ']', '==', "'TimePos'", 'and', 'tpos', 'is', 'not', 'None', ':', 'tpos', '.', 'append', '(', 'value', ')', 'else', ':', 'section', '[', 'key', ']', '=', 'value', 'if', "'axes'", 'in', 'result', ':', 'sisaxes', '=', '{', "'Band'", ':', "'C'", '}', 'axes', '=', '[', ']', 'shape', '=', '[', ']', 'for', 'i', ',', 'x', 'in', 'zip', '(', 'result', '[', "'shape'", ']', ',', 'result', '[', "'axes'", ']', ')', ':', 'if', 'i', '>', '1', ':', 'axes', '.', 'append', '(', 'sisaxes', '.', 'get', '(', 'x', ',', 'x', '[', '0', ']', '.', 'upper', '(', ')', ')', ')', 'shape', '.', 'append', '(', 'i', ')', 'result', '[', "'axes'", ']', '=', "''", '.', 'join', '(', 'axes', ')', 'result', '[', "'shape'", ']', '=', 'tuple', '(', 'shape', ')', 'try', ':', 'result', '[', "'Z'", ']', '[', "'ZPos'", ']', '=', 'numpy', '.', 'array', '(', 'result', '[', "'Z'", ']', '[', "'ZPos'", ']', '[', ':', 'result', '[', "'Dimension'", ']', '[', "'Z'", ']', ']', ',', "'float64'", ')', 'except', 'Exception', ':', 'pass', 'try', ':', 'result', '[', "'Time'", ']', '[', "'TimePos'", ']', '=', 'numpy', '.', 'array', '(', 'result', '[', "'Time'", ']', '[', "'TimePos'", ']', '[', ':', 'result', '[', "'Dimension'", ']', '[', "'Time'", ']', ']', ',', "'int32'", ')', 'except', 'Exception', ':', 'pass', 'for', 'band', 'in', 'bands', ':', 'band', '[', "'LUT'", ']', '=', 'numpy', '.', 'array', '(', 'band', '[', "'LUT'", ']', ',', "'uint8'", ')', 'return', 'result'] | Return OlympusSIS metadata from INI string.
No documentation is available. | ['Return', 'OlympusSIS', 'metadata', 'from', 'INI', 'string', '.'] | train | https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9234-L9334 |
Subsets and Splits