code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def page_attr(title=None, **kwargs):
"""
Page Attr allows you to add page meta data in the request `g` context
:params **kwargs:
meta keys we're expecting:
title (str)
description (str)
url (str) (Will pick it up by itself if not set)
image (str)
site_name (str) (but can pick it up from config file)
object_type (str)
keywords (list)
locale (str)
card (str)
**Boolean By default these keys are True
use_opengraph
use_twitter
use_googleplus
python
"""
default = dict(
title="",
description="",
url="",
image="",
site_name="",
object_type="article",
locale="",
keywords=[],
use_opengraph=True,
use_googleplus=True,
use_twitter=True,
properties={}
)
meta = getattr(g, "__META__", default)
if title:
kwargs["title"] = title
meta.update(**kwargs)
setattr(g, "__META__", meta) | Page Attr allows you to add page meta data in the request `g` context
:params **kwargs:
meta keys we're expecting:
title (str)
description (str)
url (str) (Will pick it up by itself if not set)
image (str)
site_name (str) (but can pick it up from config file)
object_type (str)
keywords (list)
locale (str)
card (str)
**Boolean By default these keys are True
use_opengraph
use_twitter
use_googleplus
python | Below is the the instruction that describes the task:
### Input:
Page Attr allows you to add page meta data in the request `g` context
:params **kwargs:
meta keys we're expecting:
title (str)
description (str)
url (str) (Will pick it up by itself if not set)
image (str)
site_name (str) (but can pick it up from config file)
object_type (str)
keywords (list)
locale (str)
card (str)
**Boolean By default these keys are True
use_opengraph
use_twitter
use_googleplus
python
### Response:
def page_attr(title=None, **kwargs):
"""
Page Attr allows you to add page meta data in the request `g` context
:params **kwargs:
meta keys we're expecting:
title (str)
description (str)
url (str) (Will pick it up by itself if not set)
image (str)
site_name (str) (but can pick it up from config file)
object_type (str)
keywords (list)
locale (str)
card (str)
**Boolean By default these keys are True
use_opengraph
use_twitter
use_googleplus
python
"""
default = dict(
title="",
description="",
url="",
image="",
site_name="",
object_type="article",
locale="",
keywords=[],
use_opengraph=True,
use_googleplus=True,
use_twitter=True,
properties={}
)
meta = getattr(g, "__META__", default)
if title:
kwargs["title"] = title
meta.update(**kwargs)
setattr(g, "__META__", meta) |
def vcs_virtual_ipv6_address_ipv6address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual = ET.SubElement(vcs, "virtual")
ipv6 = ET.SubElement(virtual, "ipv6")
address = ET.SubElement(ipv6, "address")
ipv6address = ET.SubElement(address, "ipv6address")
ipv6address.text = kwargs.pop('ipv6address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def vcs_virtual_ipv6_address_ipv6address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual = ET.SubElement(vcs, "virtual")
ipv6 = ET.SubElement(virtual, "ipv6")
address = ET.SubElement(ipv6, "address")
ipv6address = ET.SubElement(address, "ipv6address")
ipv6address.text = kwargs.pop('ipv6address')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _removepkg(self, package):
"""removepkg Slackware command
"""
try:
subprocess.call("removepkg {0} {1}".format(self.flag, package),
shell=True)
if os.path.isfile(self.dep_path + package):
os.remove(self.dep_path + package) # remove log
except subprocess.CalledProcessError as er:
print(er)
raise SystemExit() | removepkg Slackware command | Below is the the instruction that describes the task:
### Input:
removepkg Slackware command
### Response:
def _removepkg(self, package):
"""removepkg Slackware command
"""
try:
subprocess.call("removepkg {0} {1}".format(self.flag, package),
shell=True)
if os.path.isfile(self.dep_path + package):
os.remove(self.dep_path + package) # remove log
except subprocess.CalledProcessError as er:
print(er)
raise SystemExit() |
def a_message_callback(ctx):
"""Message the captured pattern."""
message = ctx.ctrl.after.strip().splitlines()[-1]
ctx.device.chain.connection.emit_message(message, log_level=logging.INFO)
return True | Message the captured pattern. | Below is the the instruction that describes the task:
### Input:
Message the captured pattern.
### Response:
def a_message_callback(ctx):
"""Message the captured pattern."""
message = ctx.ctrl.after.strip().splitlines()[-1]
ctx.device.chain.connection.emit_message(message, log_level=logging.INFO)
return True |
def construct_publish_comands(additional_steps=None, nightly=False):
'''Get the shell commands we'll use to actually build and publish a package to PyPI.'''
publish_commands = (
['rm -rf dist']
+ (additional_steps if additional_steps else [])
+ [
'python setup.py sdist bdist_wheel{nightly}'.format(
nightly=' --nightly' if nightly else ''
),
'twine upload dist/*',
]
)
return publish_commands | Get the shell commands we'll use to actually build and publish a package to PyPI. | Below is the the instruction that describes the task:
### Input:
Get the shell commands we'll use to actually build and publish a package to PyPI.
### Response:
def construct_publish_comands(additional_steps=None, nightly=False):
'''Get the shell commands we'll use to actually build and publish a package to PyPI.'''
publish_commands = (
['rm -rf dist']
+ (additional_steps if additional_steps else [])
+ [
'python setup.py sdist bdist_wheel{nightly}'.format(
nightly=' --nightly' if nightly else ''
),
'twine upload dist/*',
]
)
return publish_commands |
def ParseArguments(self):
"""Parses the command line arguments.
Returns:
bool: True if the arguments were successfully parsed.
"""
loggers.ConfigureLogging()
argument_parser = argparse.ArgumentParser(
description=self.DESCRIPTION, epilog=self.EPILOG, add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.AddBasicOptions(argument_parser)
extraction_group = argument_parser.add_argument_group(
'extraction arguments')
argument_helper_names = ['extraction']
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
extraction_group, names=argument_helper_names)
extraction_group.add_argument(
'--storage_file', '--storage-file', metavar='PATH', type=str,
default=None, help=(
'The path of the storage file. If not specified, one will be made '
'in the form <timestamp>-<source>.plaso'))
self.AddStorageMediaImageOptions(extraction_group)
self.AddCredentialOptions(extraction_group)
info_group = argument_parser.add_argument_group('informational arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
info_group, names=['status_view'])
input_group = argument_parser.add_argument_group('input arguments')
input_group.add_argument(
'--source', dest='source', action='store',
type=str, help='The source to process')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
input_group, names=['data_location'])
output_group = argument_parser.add_argument_group('output arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
output_group, names=['language'])
self.AddTimeZoneOption(output_group)
output_format_group = argument_parser.add_argument_group(
'output format arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
output_format_group, names=['output_modules'])
processing_group = argument_parser.add_argument_group(
'processing arguments')
self.AddPerformanceOptions(processing_group)
self.AddProcessingOptions(processing_group)
try:
options = argument_parser.parse_args()
except UnicodeEncodeError:
# If we get here we are attempting to print help in a non-Unicode
# terminal.
self._output_writer.Write('\n')
self._output_writer.Write(argument_parser.format_help())
return False
try:
self.ParseOptions(options)
except errors.BadConfigOption as exception:
self._output_writer.Write('ERROR: {0!s}\n'.format(exception))
self._output_writer.Write('\n')
self._output_writer.Write(argument_parser.format_usage())
return False
loggers.ConfigureLogging(
debug_output=self._debug_mode, filename=self._log_file,
quiet_mode=self._quiet_mode)
return True | Parses the command line arguments.
Returns:
bool: True if the arguments were successfully parsed. | Below is the the instruction that describes the task:
### Input:
Parses the command line arguments.
Returns:
bool: True if the arguments were successfully parsed.
### Response:
def ParseArguments(self):
"""Parses the command line arguments.
Returns:
bool: True if the arguments were successfully parsed.
"""
loggers.ConfigureLogging()
argument_parser = argparse.ArgumentParser(
description=self.DESCRIPTION, epilog=self.EPILOG, add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.AddBasicOptions(argument_parser)
extraction_group = argument_parser.add_argument_group(
'extraction arguments')
argument_helper_names = ['extraction']
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
extraction_group, names=argument_helper_names)
extraction_group.add_argument(
'--storage_file', '--storage-file', metavar='PATH', type=str,
default=None, help=(
'The path of the storage file. If not specified, one will be made '
'in the form <timestamp>-<source>.plaso'))
self.AddStorageMediaImageOptions(extraction_group)
self.AddCredentialOptions(extraction_group)
info_group = argument_parser.add_argument_group('informational arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
info_group, names=['status_view'])
input_group = argument_parser.add_argument_group('input arguments')
input_group.add_argument(
'--source', dest='source', action='store',
type=str, help='The source to process')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
input_group, names=['data_location'])
output_group = argument_parser.add_argument_group('output arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
output_group, names=['language'])
self.AddTimeZoneOption(output_group)
output_format_group = argument_parser.add_argument_group(
'output format arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
output_format_group, names=['output_modules'])
processing_group = argument_parser.add_argument_group(
'processing arguments')
self.AddPerformanceOptions(processing_group)
self.AddProcessingOptions(processing_group)
try:
options = argument_parser.parse_args()
except UnicodeEncodeError:
# If we get here we are attempting to print help in a non-Unicode
# terminal.
self._output_writer.Write('\n')
self._output_writer.Write(argument_parser.format_help())
return False
try:
self.ParseOptions(options)
except errors.BadConfigOption as exception:
self._output_writer.Write('ERROR: {0!s}\n'.format(exception))
self._output_writer.Write('\n')
self._output_writer.Write(argument_parser.format_usage())
return False
loggers.ConfigureLogging(
debug_output=self._debug_mode, filename=self._log_file,
quiet_mode=self._quiet_mode)
return True |
def blockshaped(arr, nrows, ncols):
"""
Return an new array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array looks like n subblocks with
each subblock preserving the "physical" layout of arr.
"""
h, w = arr.shape
return (arr.reshape(h // nrows, nrows, -1, ncols)
.swapaxes(1, 2)
.reshape(-1, nrows, ncols)) | Return an new array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array looks like n subblocks with
each subblock preserving the "physical" layout of arr. | Below is the the instruction that describes the task:
### Input:
Return an new array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array looks like n subblocks with
each subblock preserving the "physical" layout of arr.
### Response:
def blockshaped(arr, nrows, ncols):
"""
Return an new array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array looks like n subblocks with
each subblock preserving the "physical" layout of arr.
"""
h, w = arr.shape
return (arr.reshape(h // nrows, nrows, -1, ncols)
.swapaxes(1, 2)
.reshape(-1, nrows, ncols)) |
def get_resolved_res_configs(self, rid, config=None):
"""
Return a list of resolved resource IDs with their corresponding configuration.
It has a similar return type as :meth:`get_res_configs` but also handles complex entries
and references.
Also instead of returning :class:`ARSCResTableEntry` in the tuple, the actual values are resolved.
This is the preferred way of resolving resource IDs to their resources.
:param int rid: the numerical ID of the resource
:param ARSCTableResConfig config: the desired configuration or None to retrieve all
:return: A list of tuples of (ARSCResTableConfig, str)
"""
resolver = ARSCParser.ResourceResolver(self, config)
return resolver.resolve(rid) | Return a list of resolved resource IDs with their corresponding configuration.
It has a similar return type as :meth:`get_res_configs` but also handles complex entries
and references.
Also instead of returning :class:`ARSCResTableEntry` in the tuple, the actual values are resolved.
This is the preferred way of resolving resource IDs to their resources.
:param int rid: the numerical ID of the resource
:param ARSCTableResConfig config: the desired configuration or None to retrieve all
:return: A list of tuples of (ARSCResTableConfig, str) | Below is the the instruction that describes the task:
### Input:
Return a list of resolved resource IDs with their corresponding configuration.
It has a similar return type as :meth:`get_res_configs` but also handles complex entries
and references.
Also instead of returning :class:`ARSCResTableEntry` in the tuple, the actual values are resolved.
This is the preferred way of resolving resource IDs to their resources.
:param int rid: the numerical ID of the resource
:param ARSCTableResConfig config: the desired configuration or None to retrieve all
:return: A list of tuples of (ARSCResTableConfig, str)
### Response:
def get_resolved_res_configs(self, rid, config=None):
"""
Return a list of resolved resource IDs with their corresponding configuration.
It has a similar return type as :meth:`get_res_configs` but also handles complex entries
and references.
Also instead of returning :class:`ARSCResTableEntry` in the tuple, the actual values are resolved.
This is the preferred way of resolving resource IDs to their resources.
:param int rid: the numerical ID of the resource
:param ARSCTableResConfig config: the desired configuration or None to retrieve all
:return: A list of tuples of (ARSCResTableConfig, str)
"""
resolver = ARSCParser.ResourceResolver(self, config)
return resolver.resolve(rid) |
def delete_all(self):
'''Deletes all feature collections.
This does not destroy the ES index, but instead only
deletes all FCs with the configured document type
(defaults to ``fc``).
'''
try:
self.conn.indices.delete_mapping(
index=self.index, doc_type=self.type)
except TransportError:
logger.warn('type %r in index %r already deleted',
self.index, self.type, exc_info=True) | Deletes all feature collections.
This does not destroy the ES index, but instead only
deletes all FCs with the configured document type
(defaults to ``fc``). | Below is the the instruction that describes the task:
### Input:
Deletes all feature collections.
This does not destroy the ES index, but instead only
deletes all FCs with the configured document type
(defaults to ``fc``).
### Response:
def delete_all(self):
'''Deletes all feature collections.
This does not destroy the ES index, but instead only
deletes all FCs with the configured document type
(defaults to ``fc``).
'''
try:
self.conn.indices.delete_mapping(
index=self.index, doc_type=self.type)
except TransportError:
logger.warn('type %r in index %r already deleted',
self.index, self.type, exc_info=True) |
def _transform(comps, trans):
"""
Transform the given string with transform type trans
"""
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components | Transform the given string with transform type trans | Below is the the instruction that describes the task:
### Input:
Transform the given string with transform type trans
### Response:
def _transform(comps, trans):
"""
Transform the given string with transform type trans
"""
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components |
def _revs_equal(rev1, rev2, rev_type):
'''
Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then
the comparison will be done using str.startwith() to allow short SHA1s to
compare successfully.
NOTE: This means that rev2 must be the short rev.
'''
if (rev1 is None and rev2 is not None) \
or (rev2 is None and rev1 is not None):
return False
elif rev1 is rev2 is None:
return True
elif rev_type == 'sha1':
return rev1.startswith(rev2)
else:
return rev1 == rev2 | Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then
the comparison will be done using str.startwith() to allow short SHA1s to
compare successfully.
NOTE: This means that rev2 must be the short rev. | Below is the the instruction that describes the task:
### Input:
Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then
the comparison will be done using str.startwith() to allow short SHA1s to
compare successfully.
NOTE: This means that rev2 must be the short rev.
### Response:
def _revs_equal(rev1, rev2, rev_type):
'''
Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then
the comparison will be done using str.startwith() to allow short SHA1s to
compare successfully.
NOTE: This means that rev2 must be the short rev.
'''
if (rev1 is None and rev2 is not None) \
or (rev2 is None and rev1 is not None):
return False
elif rev1 is rev2 is None:
return True
elif rev_type == 'sha1':
return rev1.startswith(rev2)
else:
return rev1 == rev2 |
def var_replace(self, text):
"""Replaces all instances of @VAR with their values in the specified text.
"""
result = text
for var in self._vardict:
result = result.replace("@{}".format(var), self._vardict[var])
return result | Replaces all instances of @VAR with their values in the specified text. | Below is the the instruction that describes the task:
### Input:
Replaces all instances of @VAR with their values in the specified text.
### Response:
def var_replace(self, text):
"""Replaces all instances of @VAR with their values in the specified text.
"""
result = text
for var in self._vardict:
result = result.replace("@{}".format(var), self._vardict[var])
return result |
def copy_to_tmp(source):
"""
Copies ``source`` to a temporary directory, and returns the copied
location.
If source is a file, the copied location is also a file.
"""
tmp_dir = tempfile.mkdtemp()
# Use pathlib because os.path.basename is different depending on whether
# the path ends in a /
p = pathlib.Path(source)
dirname = p.name or 'temp'
new_dir = os.path.join(tmp_dir, dirname)
if os.path.isdir(source):
shutil.copytree(source, new_dir)
else:
shutil.copy2(source, new_dir)
return new_dir | Copies ``source`` to a temporary directory, and returns the copied
location.
If source is a file, the copied location is also a file. | Below is the the instruction that describes the task:
### Input:
Copies ``source`` to a temporary directory, and returns the copied
location.
If source is a file, the copied location is also a file.
### Response:
def copy_to_tmp(source):
"""
Copies ``source`` to a temporary directory, and returns the copied
location.
If source is a file, the copied location is also a file.
"""
tmp_dir = tempfile.mkdtemp()
# Use pathlib because os.path.basename is different depending on whether
# the path ends in a /
p = pathlib.Path(source)
dirname = p.name or 'temp'
new_dir = os.path.join(tmp_dir, dirname)
if os.path.isdir(source):
shutil.copytree(source, new_dir)
else:
shutil.copy2(source, new_dir)
return new_dir |
def delete(name, *effects, **kwargs):
"""
Annotate a delete action to the model being defined.
Should be delete(name, *effects, label=None, desc=None)
but it is not supported by python < 3.
@param name: item name unique for the model being defined.
@type name: str or unicode
@param effects:
@type effects: str or unicode
@param label: the action label or None.
@type label: str or unicode or None
@param desc: the action description or None if not documented.
@type desc: str or unicode or None
"""
label = kwargs.pop("label", None)
desc = kwargs.pop("desc", None)
if kwargs:
raise TypeError("delete() got an unexpected keyword '%s'"
% kwargs.keys()[0])
_annotate("delete", name, effects=effects, label=label, desc=desc) | Annotate a delete action to the model being defined.
Should be delete(name, *effects, label=None, desc=None)
but it is not supported by python < 3.
@param name: item name unique for the model being defined.
@type name: str or unicode
@param effects:
@type effects: str or unicode
@param label: the action label or None.
@type label: str or unicode or None
@param desc: the action description or None if not documented.
@type desc: str or unicode or None | Below is the the instruction that describes the task:
### Input:
Annotate a delete action to the model being defined.
Should be delete(name, *effects, label=None, desc=None)
but it is not supported by python < 3.
@param name: item name unique for the model being defined.
@type name: str or unicode
@param effects:
@type effects: str or unicode
@param label: the action label or None.
@type label: str or unicode or None
@param desc: the action description or None if not documented.
@type desc: str or unicode or None
### Response:
def delete(name, *effects, **kwargs):
"""
Annotate a delete action to the model being defined.
Should be delete(name, *effects, label=None, desc=None)
but it is not supported by python < 3.
@param name: item name unique for the model being defined.
@type name: str or unicode
@param effects:
@type effects: str or unicode
@param label: the action label or None.
@type label: str or unicode or None
@param desc: the action description or None if not documented.
@type desc: str or unicode or None
"""
label = kwargs.pop("label", None)
desc = kwargs.pop("desc", None)
if kwargs:
raise TypeError("delete() got an unexpected keyword '%s'"
% kwargs.keys()[0])
_annotate("delete", name, effects=effects, label=label, desc=desc) |
def _extend_nodelist(extends_node, context, instance_types):
"""
Returns a list of results found in the parent template(s)
:type extends_node: ExtendsNode
"""
results = []
# Find all blocks in the complete inheritance chain
blocks = extends_node.blocks.copy() # dict with all blocks in the current template
_extend_blocks(extends_node, blocks, context)
# Dive into all blocks of the page one by one
all_block_names = list(blocks.keys())
for block in list(blocks.values()):
results += _scan_nodes(block.nodelist, context, instance_types, block, ignore_blocks=all_block_names)
# Scan topmost template for nodes that exist outside of blocks
parent_template = _find_topmost_template(extends_node, context)
if not parent_template:
return []
else:
results += _scan_nodes(parent_template.nodelist, context, instance_types, ignore_blocks=all_block_names)
return results | Returns a list of results found in the parent template(s)
:type extends_node: ExtendsNode | Below is the the instruction that describes the task:
### Input:
Returns a list of results found in the parent template(s)
:type extends_node: ExtendsNode
### Response:
def _extend_nodelist(extends_node, context, instance_types):
"""
Returns a list of results found in the parent template(s)
:type extends_node: ExtendsNode
"""
results = []
# Find all blocks in the complete inheritance chain
blocks = extends_node.blocks.copy() # dict with all blocks in the current template
_extend_blocks(extends_node, blocks, context)
# Dive into all blocks of the page one by one
all_block_names = list(blocks.keys())
for block in list(blocks.values()):
results += _scan_nodes(block.nodelist, context, instance_types, block, ignore_blocks=all_block_names)
# Scan topmost template for nodes that exist outside of blocks
parent_template = _find_topmost_template(extends_node, context)
if not parent_template:
return []
else:
results += _scan_nodes(parent_template.nodelist, context, instance_types, ignore_blocks=all_block_names)
return results |
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == 'string':
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
"Trying to store a string with len [{itemsize}] in "
"[{cname}] column but\nthis column has a limit of "
"[{c_itemsize}]!\nConsider using min_itemsize to "
"preset the sizes on these columns".format(
itemsize=itemsize, cname=self.cname,
c_itemsize=c.itemsize))
return c.itemsize
return None | validate this column: return the compared against itemsize | Below is the the instruction that describes the task:
### Input:
validate this column: return the compared against itemsize
### Response:
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == 'string':
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
"Trying to store a string with len [{itemsize}] in "
"[{cname}] column but\nthis column has a limit of "
"[{c_itemsize}]!\nConsider using min_itemsize to "
"preset the sizes on these columns".format(
itemsize=itemsize, cname=self.cname,
c_itemsize=c.itemsize))
return c.itemsize
return None |
def rename(self, newpath):
"Move folder to a new name, possibly a whole new path"
# POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder
#url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath)
params = {'mvDir':'/%s%s' % (self.jfs.username, newpath)}
r = self.jfs.post(self.path,
extra_headers={'Content-Type':'application/octet-stream'},
params=params)
return r | Move folder to a new name, possibly a whole new path | Below is the the instruction that describes the task:
### Input:
Move folder to a new name, possibly a whole new path
### Response:
def rename(self, newpath):
"Move folder to a new name, possibly a whole new path"
# POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder
#url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath)
params = {'mvDir':'/%s%s' % (self.jfs.username, newpath)}
r = self.jfs.post(self.path,
extra_headers={'Content-Type':'application/octet-stream'},
params=params)
return r |
def _all_dirs(base_path):
"""
Return all dirs in base_path, relative to base_path
"""
for root, dirs, files in os.walk(base_path, followlinks=True):
for dir in dirs:
yield os.path.relpath(os.path.join(root, dir), base_path) | Return all dirs in base_path, relative to base_path | Below is the the instruction that describes the task:
### Input:
Return all dirs in base_path, relative to base_path
### Response:
def _all_dirs(base_path):
"""
Return all dirs in base_path, relative to base_path
"""
for root, dirs, files in os.walk(base_path, followlinks=True):
for dir in dirs:
yield os.path.relpath(os.path.join(root, dir), base_path) |
def process(in_path, out_file, n_jobs, framesync):
"""Computes the features for the selected dataset or file."""
if os.path.isfile(in_path):
# Single file mode
# Get (if they exitst) or compute features
file_struct = msaf.io.FileStruct(in_path)
file_struct.features_file = out_file
compute_all_features(file_struct, framesync)
else:
# Collection mode
file_structs = msaf.io.get_dataset_files(in_path)
# Call in parallel
return Parallel(n_jobs=n_jobs)(delayed(compute_all_features)(
file_struct, framesync) for file_struct in file_structs) | Computes the features for the selected dataset or file. | Below is the the instruction that describes the task:
### Input:
Computes the features for the selected dataset or file.
### Response:
def process(in_path, out_file, n_jobs, framesync):
"""Computes the features for the selected dataset or file."""
if os.path.isfile(in_path):
# Single file mode
# Get (if they exitst) or compute features
file_struct = msaf.io.FileStruct(in_path)
file_struct.features_file = out_file
compute_all_features(file_struct, framesync)
else:
# Collection mode
file_structs = msaf.io.get_dataset_files(in_path)
# Call in parallel
return Parallel(n_jobs=n_jobs)(delayed(compute_all_features)(
file_struct, framesync) for file_struct in file_structs) |
def put(self, segment):
"""Adds a segment to the download pool and write queue."""
if self.closed:
return
if segment is not None:
future = self.executor.submit(self.fetch, segment,
retries=self.retries)
else:
future = None
self.queue(self.futures, (segment, future)) | Adds a segment to the download pool and write queue. | Below is the the instruction that describes the task:
### Input:
Adds a segment to the download pool and write queue.
### Response:
def put(self, segment):
"""Adds a segment to the download pool and write queue."""
if self.closed:
return
if segment is not None:
future = self.executor.submit(self.fetch, segment,
retries=self.retries)
else:
future = None
self.queue(self.futures, (segment, future)) |
def makenode(clss, symbol, *nexts):
""" Stores the symbol in an AST instance,
and left and right to the given ones
"""
result = clss(symbol)
for i in nexts:
if i is None:
continue
if not isinstance(i, clss):
raise NotAnAstError(i)
result.appendChild(i)
return result | Stores the symbol in an AST instance,
and left and right to the given ones | Below is the the instruction that describes the task:
### Input:
Stores the symbol in an AST instance,
and left and right to the given ones
### Response:
def makenode(clss, symbol, *nexts):
""" Stores the symbol in an AST instance,
and left and right to the given ones
"""
result = clss(symbol)
for i in nexts:
if i is None:
continue
if not isinstance(i, clss):
raise NotAnAstError(i)
result.appendChild(i)
return result |
def all_dags(self, nodes=None):
"""
Computes all possible directed acyclic graphs with a given set of nodes,
sparse ones first. `2**(n*(n-1))` graphs need to be searched, given `n` nodes,
so this is likely not feasible for n>6. This is a generator.
Parameters
----------
nodes: list of nodes for the DAGs (optional)
A list of the node names that the generated DAGs should have.
If not provided, nodes are taken from data.
Returns
-------
dags: Generator object for nx.DiGraphs
Generator that yields all acyclic nx.DiGraphs, ordered by number of edges. Empty DAG first.
Examples
--------
>>> import pandas as pd
>>> from pgmpy.estimators import ExhaustiveSearch
>>> s = ExhaustiveSearch(pd.DataFrame(data={'Temperature': [23, 19],
'Weather': ['sunny', 'cloudy'],
'Humidity': [65, 75]}))
>>> list(s.all_dags())
[<networkx.classes.digraph.DiGraph object at 0x7f6955216438>,
<networkx.classes.digraph.DiGraph object at 0x7f6955216518>,
....
>>> [dag.edges() for dag in s.all_dags()]
[[], [('Humidity', 'Temperature')], [('Humidity', 'Weather')],
[('Temperature', 'Weather')], [('Temperature', 'Humidity')],
....
[('Weather', 'Humidity'), ('Weather', 'Temperature'), ('Temperature', 'Humidity')]]
"""
if nodes is None:
nodes = sorted(self.state_names.keys())
if len(nodes) > 6:
warn("Generating all DAGs of n nodes likely not feasible for n>6!")
warn("Attempting to search through {0} graphs".format(2**(len(nodes)*(len(nodes)-1))))
edges = list(combinations(nodes, 2)) # n*(n-1) possible directed edges
edges.extend([(y, x) for x, y in edges])
all_graphs = powerset(edges) # 2^(n*(n-1)) graphs
for graph_edges in all_graphs:
graph = nx.DiGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from(graph_edges)
if nx.is_directed_acyclic_graph(graph):
yield graph | Computes all possible directed acyclic graphs with a given set of nodes,
sparse ones first. `2**(n*(n-1))` graphs need to be searched, given `n` nodes,
so this is likely not feasible for n>6. This is a generator.
Parameters
----------
nodes: list of nodes for the DAGs (optional)
A list of the node names that the generated DAGs should have.
If not provided, nodes are taken from data.
Returns
-------
dags: Generator object for nx.DiGraphs
Generator that yields all acyclic nx.DiGraphs, ordered by number of edges. Empty DAG first.
Examples
--------
>>> import pandas as pd
>>> from pgmpy.estimators import ExhaustiveSearch
>>> s = ExhaustiveSearch(pd.DataFrame(data={'Temperature': [23, 19],
'Weather': ['sunny', 'cloudy'],
'Humidity': [65, 75]}))
>>> list(s.all_dags())
[<networkx.classes.digraph.DiGraph object at 0x7f6955216438>,
<networkx.classes.digraph.DiGraph object at 0x7f6955216518>,
....
>>> [dag.edges() for dag in s.all_dags()]
[[], [('Humidity', 'Temperature')], [('Humidity', 'Weather')],
[('Temperature', 'Weather')], [('Temperature', 'Humidity')],
....
[('Weather', 'Humidity'), ('Weather', 'Temperature'), ('Temperature', 'Humidity')]] | Below is the the instruction that describes the task:
### Input:
Computes all possible directed acyclic graphs with a given set of nodes,
sparse ones first. `2**(n*(n-1))` graphs need to be searched, given `n` nodes,
so this is likely not feasible for n>6. This is a generator.
Parameters
----------
nodes: list of nodes for the DAGs (optional)
A list of the node names that the generated DAGs should have.
If not provided, nodes are taken from data.
Returns
-------
dags: Generator object for nx.DiGraphs
Generator that yields all acyclic nx.DiGraphs, ordered by number of edges. Empty DAG first.
Examples
--------
>>> import pandas as pd
>>> from pgmpy.estimators import ExhaustiveSearch
>>> s = ExhaustiveSearch(pd.DataFrame(data={'Temperature': [23, 19],
'Weather': ['sunny', 'cloudy'],
'Humidity': [65, 75]}))
>>> list(s.all_dags())
[<networkx.classes.digraph.DiGraph object at 0x7f6955216438>,
<networkx.classes.digraph.DiGraph object at 0x7f6955216518>,
....
>>> [dag.edges() for dag in s.all_dags()]
[[], [('Humidity', 'Temperature')], [('Humidity', 'Weather')],
[('Temperature', 'Weather')], [('Temperature', 'Humidity')],
....
[('Weather', 'Humidity'), ('Weather', 'Temperature'), ('Temperature', 'Humidity')]]
### Response:
def all_dags(self, nodes=None):
"""
Computes all possible directed acyclic graphs with a given set of nodes,
sparse ones first. `2**(n*(n-1))` graphs need to be searched, given `n` nodes,
so this is likely not feasible for n>6. This is a generator.
Parameters
----------
nodes: list of nodes for the DAGs (optional)
A list of the node names that the generated DAGs should have.
If not provided, nodes are taken from data.
Returns
-------
dags: Generator object for nx.DiGraphs
Generator that yields all acyclic nx.DiGraphs, ordered by number of edges. Empty DAG first.
Examples
--------
>>> import pandas as pd
>>> from pgmpy.estimators import ExhaustiveSearch
>>> s = ExhaustiveSearch(pd.DataFrame(data={'Temperature': [23, 19],
'Weather': ['sunny', 'cloudy'],
'Humidity': [65, 75]}))
>>> list(s.all_dags())
[<networkx.classes.digraph.DiGraph object at 0x7f6955216438>,
<networkx.classes.digraph.DiGraph object at 0x7f6955216518>,
....
>>> [dag.edges() for dag in s.all_dags()]
[[], [('Humidity', 'Temperature')], [('Humidity', 'Weather')],
[('Temperature', 'Weather')], [('Temperature', 'Humidity')],
....
[('Weather', 'Humidity'), ('Weather', 'Temperature'), ('Temperature', 'Humidity')]]
"""
if nodes is None:
nodes = sorted(self.state_names.keys())
if len(nodes) > 6:
warn("Generating all DAGs of n nodes likely not feasible for n>6!")
warn("Attempting to search through {0} graphs".format(2**(len(nodes)*(len(nodes)-1))))
edges = list(combinations(nodes, 2)) # n*(n-1) possible directed edges
edges.extend([(y, x) for x, y in edges])
all_graphs = powerset(edges) # 2^(n*(n-1)) graphs
for graph_edges in all_graphs:
graph = nx.DiGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from(graph_edges)
if nx.is_directed_acyclic_graph(graph):
yield graph |
def build_graph(self):
"""Build a whole graph for the model."""
self.global_step = tf.Variable(0, trainable=False)
self._build_model()
if self.mode == "train":
self._build_train_op()
else:
# Additional initialization for the test network.
self.variables = ray.experimental.tf_utils.TensorFlowVariables(
self.cost)
self.summaries = tf.summary.merge_all() | Build a whole graph for the model. | Below is the the instruction that describes the task:
### Input:
Build a whole graph for the model.
### Response:
def build_graph(self):
"""Build a whole graph for the model."""
self.global_step = tf.Variable(0, trainable=False)
self._build_model()
if self.mode == "train":
self._build_train_op()
else:
# Additional initialization for the test network.
self.variables = ray.experimental.tf_utils.TensorFlowVariables(
self.cost)
self.summaries = tf.summary.merge_all() |
def append_position(path, position, separator=''):
"""
Concatenate a path and a position,
between the filename and the extension.
"""
filename, extension = os.path.splitext(path)
return ''.join([filename, separator, str(position), extension]) | Concatenate a path and a position,
between the filename and the extension. | Below is the the instruction that describes the task:
### Input:
Concatenate a path and a position,
between the filename and the extension.
### Response:
def append_position(path, position, separator=''):
"""
Concatenate a path and a position,
between the filename and the extension.
"""
filename, extension = os.path.splitext(path)
return ''.join([filename, separator, str(position), extension]) |
def update(self, *args, **kwargs):
"""
Equivalent to the python dict update method.
Update the dictionary with the key/value pairs from other, overwriting
existing keys.
Args:
other (dict): The source of key value pairs to add to headers
Keyword Args:
All keyword arguments are stored in header directly
Returns:
None
"""
for next_dict in chain(args, (kwargs, )):
for k, v in next_dict.items():
self[k] = v | Equivalent to the python dict update method.
Update the dictionary with the key/value pairs from other, overwriting
existing keys.
Args:
other (dict): The source of key value pairs to add to headers
Keyword Args:
All keyword arguments are stored in header directly
Returns:
None | Below is the the instruction that describes the task:
### Input:
Equivalent to the python dict update method.
Update the dictionary with the key/value pairs from other, overwriting
existing keys.
Args:
other (dict): The source of key value pairs to add to headers
Keyword Args:
All keyword arguments are stored in header directly
Returns:
None
### Response:
def update(self, *args, **kwargs):
"""
Equivalent to the python dict update method.
Update the dictionary with the key/value pairs from other, overwriting
existing keys.
Args:
other (dict): The source of key value pairs to add to headers
Keyword Args:
All keyword arguments are stored in header directly
Returns:
None
"""
for next_dict in chain(args, (kwargs, )):
for k, v in next_dict.items():
self[k] = v |
def probe_image(self, labels, instance, column_name=None, num_scaled_images=50,
top_percent=10):
""" Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label.
"""
if len(self._image_columns) > 1 and not column_name:
raise ValueError('There are multiple image columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._image_columns:
raise ValueError('Specified column_name "%s" not found in the model input.' %
column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
image_path = instance[image_column_name]
with file_io.FileIO(image_path, 'rb') as fi:
im = Image.open(fi)
resized_image = im.resize((299, 299))
# Produce a list of scaled images, create instances (csv lines) from these images.
step = 1. / num_scaled_images
scales = np.arange(0.0, 1.0, step) + step
csv_lines = []
for s in scales:
pixels = (np.asarray(resized_image) * s).astype('uint8')
scaled_image = Image.fromarray(pixels)
buf = io.BytesIO()
scaled_image.save(buf, "JPEG")
encoded_image = base64.urlsafe_b64encode(buf.getvalue()).decode('ascii')
instance_copy = dict(instance)
instance_copy[image_column_name] = encoded_image
buf = six.StringIO()
writer = csv.DictWriter(buf, fieldnames=self._headers, lineterminator='')
writer.writerow(instance_copy)
csv_lines.append(buf.getvalue())
integrated_gradients_images = []
for label in labels:
# Send to tf model to get gradients.
grads = self._image_gradients(csv_lines, label, image_column_name)
integrated_grads = resized_image * np.average(grads, axis=0)
# Gray scale the grads by removing color dimension.
# abs() is for getting the most impactful pixels regardless positive or negative.
grayed = np.average(abs(integrated_grads), axis=2)
grayed = np.transpose([grayed, grayed, grayed], axes=[1, 2, 0])
# Only show the most impactful pixels.
p = np.percentile(grayed, 100 - top_percent)
viz_window = np.where(grayed > p, 1, 0)
vis = resized_image * viz_window
im_vis = Image.fromarray(np.uint8(vis))
integrated_gradients_images.append(im_vis)
return resized_image, integrated_gradients_images | Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label. | Below is the the instruction that describes the task:
### Input:
Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label.
### Response:
def probe_image(self, labels, instance, column_name=None, num_scaled_images=50,
top_percent=10):
""" Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label.
"""
if len(self._image_columns) > 1 and not column_name:
raise ValueError('There are multiple image columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._image_columns:
raise ValueError('Specified column_name "%s" not found in the model input.' %
column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
image_path = instance[image_column_name]
with file_io.FileIO(image_path, 'rb') as fi:
im = Image.open(fi)
resized_image = im.resize((299, 299))
# Produce a list of scaled images, create instances (csv lines) from these images.
step = 1. / num_scaled_images
scales = np.arange(0.0, 1.0, step) + step
csv_lines = []
for s in scales:
pixels = (np.asarray(resized_image) * s).astype('uint8')
scaled_image = Image.fromarray(pixels)
buf = io.BytesIO()
scaled_image.save(buf, "JPEG")
encoded_image = base64.urlsafe_b64encode(buf.getvalue()).decode('ascii')
instance_copy = dict(instance)
instance_copy[image_column_name] = encoded_image
buf = six.StringIO()
writer = csv.DictWriter(buf, fieldnames=self._headers, lineterminator='')
writer.writerow(instance_copy)
csv_lines.append(buf.getvalue())
integrated_gradients_images = []
for label in labels:
# Send to tf model to get gradients.
grads = self._image_gradients(csv_lines, label, image_column_name)
integrated_grads = resized_image * np.average(grads, axis=0)
# Gray scale the grads by removing color dimension.
# abs() is for getting the most impactful pixels regardless positive or negative.
grayed = np.average(abs(integrated_grads), axis=2)
grayed = np.transpose([grayed, grayed, grayed], axes=[1, 2, 0])
# Only show the most impactful pixels.
p = np.percentile(grayed, 100 - top_percent)
viz_window = np.where(grayed > p, 1, 0)
vis = resized_image * viz_window
im_vis = Image.fromarray(np.uint8(vis))
integrated_gradients_images.append(im_vis)
return resized_image, integrated_gradients_images |
def get_special_elections(self, obj):
"""States holding a special election on election day."""
return reverse(
'electionnight_api_special-election-list',
request=self.context['request'],
kwargs={'date': obj.date}
) | States holding a special election on election day. | Below is the the instruction that describes the task:
### Input:
States holding a special election on election day.
### Response:
def get_special_elections(self, obj):
"""States holding a special election on election day."""
return reverse(
'electionnight_api_special-election-list',
request=self.context['request'],
kwargs={'date': obj.date}
) |
def ToJson(self, index):
"""
Convert object members to a dictionary that can be parsed as JSON.
Args:
index (int): The index of the output in a transaction
Returns:
dict:
"""
return {
'n': index,
'asset': self.AssetId.To0xString(),
'value': self.Value.ToNeoJsonString(),
'address': self.Address
} | Convert object members to a dictionary that can be parsed as JSON.
Args:
index (int): The index of the output in a transaction
Returns:
dict: | Below is the the instruction that describes the task:
### Input:
Convert object members to a dictionary that can be parsed as JSON.
Args:
index (int): The index of the output in a transaction
Returns:
dict:
### Response:
def ToJson(self, index):
"""
Convert object members to a dictionary that can be parsed as JSON.
Args:
index (int): The index of the output in a transaction
Returns:
dict:
"""
return {
'n': index,
'asset': self.AssetId.To0xString(),
'value': self.Value.ToNeoJsonString(),
'address': self.Address
} |
def ssl_wrap_socket(
socket: socket.socket,
ssl_options: Union[Dict[str, Any], ssl.SSLContext],
server_hostname: str = None,
**kwargs: Any
) -> ssl.SSLSocket:
"""Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either an `ssl.SSLContext` object or a
dictionary (as accepted by `ssl_options_to_context`). Additional
keyword arguments are passed to ``wrap_socket`` (either the
`~ssl.SSLContext` method or the `ssl` module function as
appropriate).
"""
context = ssl_options_to_context(ssl_options)
if ssl.HAS_SNI:
# In python 3.4, wrap_socket only accepts the server_hostname
# argument if HAS_SNI is true.
# TODO: add a unittest (python added server-side SNI support in 3.4)
# In the meantime it can be manually tested with
# python3 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(socket, server_hostname=server_hostname, **kwargs)
else:
return context.wrap_socket(socket, **kwargs) | Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either an `ssl.SSLContext` object or a
dictionary (as accepted by `ssl_options_to_context`). Additional
keyword arguments are passed to ``wrap_socket`` (either the
`~ssl.SSLContext` method or the `ssl` module function as
appropriate). | Below is the the instruction that describes the task:
### Input:
Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either an `ssl.SSLContext` object or a
dictionary (as accepted by `ssl_options_to_context`). Additional
keyword arguments are passed to ``wrap_socket`` (either the
`~ssl.SSLContext` method or the `ssl` module function as
appropriate).
### Response:
def ssl_wrap_socket(
socket: socket.socket,
ssl_options: Union[Dict[str, Any], ssl.SSLContext],
server_hostname: str = None,
**kwargs: Any
) -> ssl.SSLSocket:
"""Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either an `ssl.SSLContext` object or a
dictionary (as accepted by `ssl_options_to_context`). Additional
keyword arguments are passed to ``wrap_socket`` (either the
`~ssl.SSLContext` method or the `ssl` module function as
appropriate).
"""
context = ssl_options_to_context(ssl_options)
if ssl.HAS_SNI:
# In python 3.4, wrap_socket only accepts the server_hostname
# argument if HAS_SNI is true.
# TODO: add a unittest (python added server-side SNI support in 3.4)
# In the meantime it can be manually tested with
# python3 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(socket, server_hostname=server_hostname, **kwargs)
else:
return context.wrap_socket(socket, **kwargs) |
def validate_repo_url(url):
"""
Validates and formats `url` to be valid URL pointing to a repo on bitbucket.org or github.com
:param url: str, URL
:return: str, valid URL if valid repo, emptry string otherwise
"""
try:
if "github.com" in url:
return re.findall(r"https?://w?w?w?.?github.com/[\w\-]+/[\w.-]+", url)[0]
elif "bitbucket.org" in url:
return re.findall(r"https?://bitbucket.org/[\w.-]+/[\w.-]+", url)[0] + "/src/"
elif "launchpad.net" in url:
return re.findall(r"https?://launchpad.net/[\w.-]+", url)[0]
elif "sourceforge.net" in url:
mo = re.match(r"https?://sourceforge.net/projects/"
r"([\w.-]+)/", url, re.I)
template = "https://sourceforge.net/p/{}/code/HEAD/tree/trunk/src/"
return template.format(mo.groups()[0])
except (IndexError, AttributeError):
pass
return "" | Validates and formats `url` to be valid URL pointing to a repo on bitbucket.org or github.com
:param url: str, URL
:return: str, valid URL if valid repo, emptry string otherwise | Below is the the instruction that describes the task:
### Input:
Validates and formats `url` to be valid URL pointing to a repo on bitbucket.org or github.com
:param url: str, URL
:return: str, valid URL if valid repo, emptry string otherwise
### Response:
def validate_repo_url(url):
"""
Validates and formats `url` to be valid URL pointing to a repo on bitbucket.org or github.com
:param url: str, URL
:return: str, valid URL if valid repo, emptry string otherwise
"""
try:
if "github.com" in url:
return re.findall(r"https?://w?w?w?.?github.com/[\w\-]+/[\w.-]+", url)[0]
elif "bitbucket.org" in url:
return re.findall(r"https?://bitbucket.org/[\w.-]+/[\w.-]+", url)[0] + "/src/"
elif "launchpad.net" in url:
return re.findall(r"https?://launchpad.net/[\w.-]+", url)[0]
elif "sourceforge.net" in url:
mo = re.match(r"https?://sourceforge.net/projects/"
r"([\w.-]+)/", url, re.I)
template = "https://sourceforge.net/p/{}/code/HEAD/tree/trunk/src/"
return template.format(mo.groups()[0])
except (IndexError, AttributeError):
pass
return "" |
def serve_static(request, path, insecure=False, **kwargs):
"""Collect and serve static files.
This view serves up static files, much like Django's
:py:func:`~django.views.static.serve` view, with the addition that it
collects static files first (if enabled). This allows images, fonts, and
other assets to be served up without first loading a page using the
``{% javascript %}`` or ``{% stylesheet %}`` template tags.
You can use this view by adding the following to any :file:`urls.py`::
urlpatterns += static('static/', view='pipeline.views.serve_static')
"""
# Follow the same logic Django uses for determining access to the
# static-serving view.
if not django_settings.DEBUG and not insecure:
raise ImproperlyConfigured("The staticfiles view can only be used in "
"debug mode or if the --insecure "
"option of 'runserver' is used")
if not settings.PIPELINE_ENABLED and settings.PIPELINE_COLLECTOR_ENABLED:
# Collect only the requested file, in order to serve the result as
# fast as possible. This won't interfere with the template tags in any
# way, as those will still cause Django to collect all media.
default_collector.collect(request, files=[path])
return serve(request, path, document_root=django_settings.STATIC_ROOT,
**kwargs) | Collect and serve static files.
This view serves up static files, much like Django's
:py:func:`~django.views.static.serve` view, with the addition that it
collects static files first (if enabled). This allows images, fonts, and
other assets to be served up without first loading a page using the
``{% javascript %}`` or ``{% stylesheet %}`` template tags.
You can use this view by adding the following to any :file:`urls.py`::
urlpatterns += static('static/', view='pipeline.views.serve_static') | Below is the the instruction that describes the task:
### Input:
Collect and serve static files.
This view serves up static files, much like Django's
:py:func:`~django.views.static.serve` view, with the addition that it
collects static files first (if enabled). This allows images, fonts, and
other assets to be served up without first loading a page using the
``{% javascript %}`` or ``{% stylesheet %}`` template tags.
You can use this view by adding the following to any :file:`urls.py`::
urlpatterns += static('static/', view='pipeline.views.serve_static')
### Response:
def serve_static(request, path, insecure=False, **kwargs):
"""Collect and serve static files.
This view serves up static files, much like Django's
:py:func:`~django.views.static.serve` view, with the addition that it
collects static files first (if enabled). This allows images, fonts, and
other assets to be served up without first loading a page using the
``{% javascript %}`` or ``{% stylesheet %}`` template tags.
You can use this view by adding the following to any :file:`urls.py`::
urlpatterns += static('static/', view='pipeline.views.serve_static')
"""
# Follow the same logic Django uses for determining access to the
# static-serving view.
if not django_settings.DEBUG and not insecure:
raise ImproperlyConfigured("The staticfiles view can only be used in "
"debug mode or if the --insecure "
"option of 'runserver' is used")
if not settings.PIPELINE_ENABLED and settings.PIPELINE_COLLECTOR_ENABLED:
# Collect only the requested file, in order to serve the result as
# fast as possible. This won't interfere with the template tags in any
# way, as those will still cause Django to collect all media.
default_collector.collect(request, files=[path])
return serve(request, path, document_root=django_settings.STATIC_ROOT,
**kwargs) |
def key_leaf(self, data, schema, tree):
"""
The deepest validation we can make in any given circumstance for a key.
Does not recurse, it will just receive both values and the tree,
passing them on to the :fun:`enforce` function.
"""
key, value = data
schema_key, schema_value = schema
enforce(key, schema_key, tree, 'key') | The deepest validation we can make in any given circumstance for a key.
Does not recurse, it will just receive both values and the tree,
passing them on to the :fun:`enforce` function. | Below is the the instruction that describes the task:
### Input:
The deepest validation we can make in any given circumstance for a key.
Does not recurse, it will just receive both values and the tree,
passing them on to the :fun:`enforce` function.
### Response:
def key_leaf(self, data, schema, tree):
"""
The deepest validation we can make in any given circumstance for a key.
Does not recurse, it will just receive both values and the tree,
passing them on to the :fun:`enforce` function.
"""
key, value = data
schema_key, schema_value = schema
enforce(key, schema_key, tree, 'key') |
def fs_r(self, percent=0.9, N=None):
"""Get the row factor scores (dimensionality-reduced representation),
choosing how many factors to retain, directly or based on the explained
variance.
'percent': The minimum variance that the retained factors are required
to explain (default: 90% = 0.9)
'N': The number of factors to retain. Overrides 'percent'.
If the rank is less than N, N is ignored.
"""
if not 0 <= percent <= 1:
raise ValueError("Percent should be a real number between 0 and 1.")
if N:
if not isinstance(N, (int, int64)) or N <= 0:
raise ValueError("N should be a positive integer.")
N = min(N, self.rank)
self.k = 1 + flatnonzero(cumsum(self.L) >= sum(self.L)*percent)[0]
# S = zeros((self._numitems, self.k))
# the sign of the square root can be either way; singular value vs. eigenvalue
# fill_diagonal(S, -sqrt(self.E) if self.cor else self.s)
num2ret = N if N else self.k
s = -sqrt(self.L) if self.cor else self.s
S = diagsvd(s[:num2ret], self._numitems, num2ret)
self.F = self.D_r.dot(self.P).dot(S)
return self.F | Get the row factor scores (dimensionality-reduced representation),
choosing how many factors to retain, directly or based on the explained
variance.
'percent': The minimum variance that the retained factors are required
to explain (default: 90% = 0.9)
'N': The number of factors to retain. Overrides 'percent'.
If the rank is less than N, N is ignored. | Below is the the instruction that describes the task:
### Input:
Get the row factor scores (dimensionality-reduced representation),
choosing how many factors to retain, directly or based on the explained
variance.
'percent': The minimum variance that the retained factors are required
to explain (default: 90% = 0.9)
'N': The number of factors to retain. Overrides 'percent'.
If the rank is less than N, N is ignored.
### Response:
def fs_r(self, percent=0.9, N=None):
"""Get the row factor scores (dimensionality-reduced representation),
choosing how many factors to retain, directly or based on the explained
variance.
'percent': The minimum variance that the retained factors are required
to explain (default: 90% = 0.9)
'N': The number of factors to retain. Overrides 'percent'.
If the rank is less than N, N is ignored.
"""
if not 0 <= percent <= 1:
raise ValueError("Percent should be a real number between 0 and 1.")
if N:
if not isinstance(N, (int, int64)) or N <= 0:
raise ValueError("N should be a positive integer.")
N = min(N, self.rank)
self.k = 1 + flatnonzero(cumsum(self.L) >= sum(self.L)*percent)[0]
# S = zeros((self._numitems, self.k))
# the sign of the square root can be either way; singular value vs. eigenvalue
# fill_diagonal(S, -sqrt(self.E) if self.cor else self.s)
num2ret = N if N else self.k
s = -sqrt(self.L) if self.cor else self.s
S = diagsvd(s[:num2ret], self._numitems, num2ret)
self.F = self.D_r.dot(self.P).dot(S)
return self.F |
def _assign_enterprise_role_to_users(self, _get_batch_method, options, is_feature_role=False):
"""
Assigns enterprise role to users.
"""
role_name = options['role']
batch_limit = options['batch_limit']
batch_sleep = options['batch_sleep']
batch_offset = options['batch_offset']
current_batch_index = batch_offset
users_batch = _get_batch_method(
batch_offset,
batch_offset + batch_limit
)
role_class = SystemWideEnterpriseRole
role_assignment_class = SystemWideEnterpriseUserRoleAssignment
if is_feature_role:
role_class = EnterpriseFeatureRole
role_assignment_class = EnterpriseFeatureUserRoleAssignment
enterprise_role = role_class.objects.get(name=role_name)
while users_batch.count() > 0:
for index, user in enumerate(users_batch):
LOGGER.info(
'Processing user with index %s and id %s',
current_batch_index + index, user.id
)
role_assignment_class.objects.get_or_create(
user=user,
role=enterprise_role
)
sleep(batch_sleep)
current_batch_index += len(users_batch)
users_batch = _get_batch_method(
current_batch_index,
current_batch_index + batch_limit
) | Assigns enterprise role to users. | Below is the the instruction that describes the task:
### Input:
Assigns enterprise role to users.
### Response:
def _assign_enterprise_role_to_users(self, _get_batch_method, options, is_feature_role=False):
"""
Assigns enterprise role to users.
"""
role_name = options['role']
batch_limit = options['batch_limit']
batch_sleep = options['batch_sleep']
batch_offset = options['batch_offset']
current_batch_index = batch_offset
users_batch = _get_batch_method(
batch_offset,
batch_offset + batch_limit
)
role_class = SystemWideEnterpriseRole
role_assignment_class = SystemWideEnterpriseUserRoleAssignment
if is_feature_role:
role_class = EnterpriseFeatureRole
role_assignment_class = EnterpriseFeatureUserRoleAssignment
enterprise_role = role_class.objects.get(name=role_name)
while users_batch.count() > 0:
for index, user in enumerate(users_batch):
LOGGER.info(
'Processing user with index %s and id %s',
current_batch_index + index, user.id
)
role_assignment_class.objects.get_or_create(
user=user,
role=enterprise_role
)
sleep(batch_sleep)
current_batch_index += len(users_batch)
users_batch = _get_batch_method(
current_batch_index,
current_batch_index + batch_limit
) |
def callbacks(cls, eventType=None):
"""
Returns a list of callback methods that can be invoked whenever an event is processed.
:return: {subclass of <Event>: <list>, ..}
"""
key = '_{0}__callbacks'.format(cls.__name__)
try:
callbacks = getattr(cls, key)
except AttributeError:
callbacks = {}
setattr(cls, key, callbacks)
return callbacks.get(eventType, []) if eventType is not None else callbacks | Returns a list of callback methods that can be invoked whenever an event is processed.
:return: {subclass of <Event>: <list>, ..} | Below is the the instruction that describes the task:
### Input:
Returns a list of callback methods that can be invoked whenever an event is processed.
:return: {subclass of <Event>: <list>, ..}
### Response:
def callbacks(cls, eventType=None):
"""
Returns a list of callback methods that can be invoked whenever an event is processed.
:return: {subclass of <Event>: <list>, ..}
"""
key = '_{0}__callbacks'.format(cls.__name__)
try:
callbacks = getattr(cls, key)
except AttributeError:
callbacks = {}
setattr(cls, key, callbacks)
return callbacks.get(eventType, []) if eventType is not None else callbacks |
def find_country(session, code):
"""Find a country.
Find a country by its ISO-3166 `code` (i.e ES for Spain,
US for United States of America) using the given `session.
When the country does not exist the function will return
`None`.
:param session: database session
:param code: ISO-3166 code of the country to find
:return: a country object; `None` when the country
does not exist
"""
country = session.query(Country).\
filter(Country.code == code).first()
return country | Find a country.
Find a country by its ISO-3166 `code` (i.e ES for Spain,
US for United States of America) using the given `session.
When the country does not exist the function will return
`None`.
:param session: database session
:param code: ISO-3166 code of the country to find
:return: a country object; `None` when the country
does not exist | Below is the the instruction that describes the task:
### Input:
Find a country.
Find a country by its ISO-3166 `code` (i.e ES for Spain,
US for United States of America) using the given `session.
When the country does not exist the function will return
`None`.
:param session: database session
:param code: ISO-3166 code of the country to find
:return: a country object; `None` when the country
does not exist
### Response:
def find_country(session, code):
"""Find a country.
Find a country by its ISO-3166 `code` (i.e ES for Spain,
US for United States of America) using the given `session.
When the country does not exist the function will return
`None`.
:param session: database session
:param code: ISO-3166 code of the country to find
:return: a country object; `None` when the country
does not exist
"""
country = session.query(Country).\
filter(Country.code == code).first()
return country |
def plot_triaxial_depths_speed(tag):
'''Plot triaxial accelerometer data for whole deployment, descents, and
ascents
Only x and z axes are ploted since these are associated with stroking
Args
----
tag: pandas.DataFrame
Tag dataframe with acceleromter, depth, and propeller columns
'''
import numpy
from . import plotutils
# TODO return to multiple inputs rather than dataframe
fig, axes = plt.subplots(3, 3, sharex='col', sharey='row')
((ax1, ax4, ax7), (ax2, ax5, ax8), (ax3, ax6, ax9)) = axes
# Create mask of all True for length of depths
all_ind = numpy.arange(0, len(tag), dtype=int)
cols = [('x', tag['Ax_g'], [ax1, ax2, ax3]),
('y', tag['Ay_g'], [ax4, ax5, ax6]),
('z', tag['Az_g'], [ax7, ax8, ax9])]
for label, y, axes in cols:
axes[0].title.set_text('Accelerometer {}-axis'.format(label))
axes[0].plot(range(len(y)), y, color=_colors[0],
linewidth=_linewidth, label='x')
axes[1].title.set_text('Depths')
axes[1] = plotutils.plot_noncontiguous(axes[1], tag['depth'], all_ind,
color=_colors[1])
axes[1].invert_yaxis()
axes[2] = plotutils.plot_noncontiguous(axes[2], tag['propeller'],
all_ind, color=_colors[2],
label='propeller')
plt.show()
return None | Plot triaxial accelerometer data for whole deployment, descents, and
ascents
Only x and z axes are ploted since these are associated with stroking
Args
----
tag: pandas.DataFrame
Tag dataframe with acceleromter, depth, and propeller columns | Below is the the instruction that describes the task:
### Input:
Plot triaxial accelerometer data for whole deployment, descents, and
ascents
Only x and z axes are ploted since these are associated with stroking
Args
----
tag: pandas.DataFrame
Tag dataframe with acceleromter, depth, and propeller columns
### Response:
def plot_triaxial_depths_speed(tag):
'''Plot triaxial accelerometer data for whole deployment, descents, and
ascents
Only x and z axes are ploted since these are associated with stroking
Args
----
tag: pandas.DataFrame
Tag dataframe with acceleromter, depth, and propeller columns
'''
import numpy
from . import plotutils
# TODO return to multiple inputs rather than dataframe
fig, axes = plt.subplots(3, 3, sharex='col', sharey='row')
((ax1, ax4, ax7), (ax2, ax5, ax8), (ax3, ax6, ax9)) = axes
# Create mask of all True for length of depths
all_ind = numpy.arange(0, len(tag), dtype=int)
cols = [('x', tag['Ax_g'], [ax1, ax2, ax3]),
('y', tag['Ay_g'], [ax4, ax5, ax6]),
('z', tag['Az_g'], [ax7, ax8, ax9])]
for label, y, axes in cols:
axes[0].title.set_text('Accelerometer {}-axis'.format(label))
axes[0].plot(range(len(y)), y, color=_colors[0],
linewidth=_linewidth, label='x')
axes[1].title.set_text('Depths')
axes[1] = plotutils.plot_noncontiguous(axes[1], tag['depth'], all_ind,
color=_colors[1])
axes[1].invert_yaxis()
axes[2] = plotutils.plot_noncontiguous(axes[2], tag['propeller'],
all_ind, color=_colors[2],
label='propeller')
plt.show()
return None |
def ram_dumper(**kwargs):
"""Dump data to 'memory' for later usage."""
logging.debug("trying to save stuff in memory")
farms = kwargs["farms"]
experiments = kwargs["experiments"]
engine = kwargs["engine"]
try:
engine_name = engine.__name__
except AttributeError:
engine_name = engine.__dict__.__name__
accepted_engines = ["summary_engine",]
if engine_name in accepted_engines:
logging.debug("found the engine that I will try to dump from: "
f"{engine_name}")
for experiment, farm in zip(experiments, farms):
name = experiment.journal.name
project = experiment.journal.project
experiment.memory_dumped[engine_name] = farm
logging.debug(f"farm put into memory_dumped ({project}::{name})") | Dump data to 'memory' for later usage. | Below is the the instruction that describes the task:
### Input:
Dump data to 'memory' for later usage.
### Response:
def ram_dumper(**kwargs):
"""Dump data to 'memory' for later usage."""
logging.debug("trying to save stuff in memory")
farms = kwargs["farms"]
experiments = kwargs["experiments"]
engine = kwargs["engine"]
try:
engine_name = engine.__name__
except AttributeError:
engine_name = engine.__dict__.__name__
accepted_engines = ["summary_engine",]
if engine_name in accepted_engines:
logging.debug("found the engine that I will try to dump from: "
f"{engine_name}")
for experiment, farm in zip(experiments, farms):
name = experiment.journal.name
project = experiment.journal.project
experiment.memory_dumped[engine_name] = farm
logging.debug(f"farm put into memory_dumped ({project}::{name})") |
def get_ast_field_name(ast):
"""Return the normalized field name for the given AST node."""
replacements = {
# We always rewrite the following field names into their proper underlying counterparts.
TYPENAME_META_FIELD_NAME: '@class'
}
base_field_name = ast.name.value
normalized_name = replacements.get(base_field_name, base_field_name)
return normalized_name | Return the normalized field name for the given AST node. | Below is the the instruction that describes the task:
### Input:
Return the normalized field name for the given AST node.
### Response:
def get_ast_field_name(ast):
"""Return the normalized field name for the given AST node."""
replacements = {
# We always rewrite the following field names into their proper underlying counterparts.
TYPENAME_META_FIELD_NAME: '@class'
}
base_field_name = ast.name.value
normalized_name = replacements.get(base_field_name, base_field_name)
return normalized_name |
def des_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts DES ciphertext using a 56 bit key
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector used for encryption - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt(Security.kSecAttrKeyTypeDES, key, data, iv, Security.kSecPaddingPKCS5Key) | Decrypts DES ciphertext using a 56 bit key
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector used for encryption - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext | Below is the the instruction that describes the task:
### Input:
Decrypts DES ciphertext using a 56 bit key
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector used for encryption - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
### Response:
def des_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts DES ciphertext using a 56 bit key
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector used for encryption - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt(Security.kSecAttrKeyTypeDES, key, data, iv, Security.kSecPaddingPKCS5Key) |
def _defaultErrorHandler(varBinds, **context):
"""Raise exception on any error if user callback is missing"""
errors = context.get('errors')
if errors:
err = errors[-1]
raise err['error'] | Raise exception on any error if user callback is missing | Below is the the instruction that describes the task:
### Input:
Raise exception on any error if user callback is missing
### Response:
def _defaultErrorHandler(varBinds, **context):
"""Raise exception on any error if user callback is missing"""
errors = context.get('errors')
if errors:
err = errors[-1]
raise err['error'] |
def annotate(self, gpl, annotation_column, gpl_on="ID", gsm_on="ID_REF",
in_place=False):
"""Annotate GSM with provided GPL
Args:
gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with
annotation_column (str`): Column in a table for annotation
gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID".
gsm_on (:obj:`str`): Use this column in GPL to merge.
Defaults to "ID_REF".
in_place (:obj:`bool`): Substitute table in GSM by new annotated
table. Defaults to False.
Returns:
:obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None
Raises:
TypeError: GPL should be GPL or pandas.DataFrame
"""
if isinstance(gpl, GPL):
annotation_table = gpl.table
elif isinstance(gpl, DataFrame):
annotation_table = gpl
else:
raise TypeError("gpl should be a GPL object or a pandas.DataFrame")
# annotate by merging
annotated = self.table.merge(
annotation_table[[gpl_on, annotation_column]], left_on=gsm_on,
right_on=gpl_on)
del annotated[gpl_on]
if in_place:
self.table = annotated
return None
else:
return annotated | Annotate GSM with provided GPL
Args:
gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with
annotation_column (str`): Column in a table for annotation
gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID".
gsm_on (:obj:`str`): Use this column in GPL to merge.
Defaults to "ID_REF".
in_place (:obj:`bool`): Substitute table in GSM by new annotated
table. Defaults to False.
Returns:
:obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None
Raises:
TypeError: GPL should be GPL or pandas.DataFrame | Below is the the instruction that describes the task:
### Input:
Annotate GSM with provided GPL
Args:
gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with
annotation_column (str`): Column in a table for annotation
gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID".
gsm_on (:obj:`str`): Use this column in GPL to merge.
Defaults to "ID_REF".
in_place (:obj:`bool`): Substitute table in GSM by new annotated
table. Defaults to False.
Returns:
:obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None
Raises:
TypeError: GPL should be GPL or pandas.DataFrame
### Response:
def annotate(self, gpl, annotation_column, gpl_on="ID", gsm_on="ID_REF",
in_place=False):
"""Annotate GSM with provided GPL
Args:
gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with
annotation_column (str`): Column in a table for annotation
gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID".
gsm_on (:obj:`str`): Use this column in GPL to merge.
Defaults to "ID_REF".
in_place (:obj:`bool`): Substitute table in GSM by new annotated
table. Defaults to False.
Returns:
:obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None
Raises:
TypeError: GPL should be GPL or pandas.DataFrame
"""
if isinstance(gpl, GPL):
annotation_table = gpl.table
elif isinstance(gpl, DataFrame):
annotation_table = gpl
else:
raise TypeError("gpl should be a GPL object or a pandas.DataFrame")
# annotate by merging
annotated = self.table.merge(
annotation_table[[gpl_on, annotation_column]], left_on=gsm_on,
right_on=gpl_on)
del annotated[gpl_on]
if in_place:
self.table = annotated
return None
else:
return annotated |
def export(self, xformat='csv'):
"""action: export annotations to CSV."""
if self.annot is None: # remove if buttons are disabled
self.parent.statusBar().showMessage('No score file loaded')
return
if xformat == 'csv':
filename = splitext(self.annot.xml_file)[0] + '.csv'
filename, _ = QFileDialog.getSaveFileName(self, 'Export stages',
filename,
'Sleep stages (*.csv)')
if 'remlogic' in xformat:
filename = splitext(self.annot.xml_file)[0] + '.txt'
filename, _ = QFileDialog.getSaveFileName(self, 'Export stages',
filename,
'Sleep stages (*.txt)')
if filename == '':
return
self.annot.export(filename, xformat=xformat) | action: export annotations to CSV. | Below is the the instruction that describes the task:
### Input:
action: export annotations to CSV.
### Response:
def export(self, xformat='csv'):
"""action: export annotations to CSV."""
if self.annot is None: # remove if buttons are disabled
self.parent.statusBar().showMessage('No score file loaded')
return
if xformat == 'csv':
filename = splitext(self.annot.xml_file)[0] + '.csv'
filename, _ = QFileDialog.getSaveFileName(self, 'Export stages',
filename,
'Sleep stages (*.csv)')
if 'remlogic' in xformat:
filename = splitext(self.annot.xml_file)[0] + '.txt'
filename, _ = QFileDialog.getSaveFileName(self, 'Export stages',
filename,
'Sleep stages (*.txt)')
if filename == '':
return
self.annot.export(filename, xformat=xformat) |
def cos_values(period=360):
"""
Provides an infinite source of values representing a cosine wave (from -1
to +1) which repeats every *period* values. For example, to produce a
"siren" effect with a couple of LEDs that repeats once a second::
from gpiozero import PWMLED
from gpiozero.tools import cos_values, scaled, inverted
from signal import pause
red = PWMLED(2)
blue = PWMLED(3)
red.source_delay = 0.01
blue.source_delay = red.source_delay
red.source = scaled(cos_values(100), 0, 1, -1, 1)
blue.source = inverted(red)
pause()
If you require a different range than -1 to +1, see :func:`scaled`.
"""
angles = (2 * pi * i / period for i in range(period))
for a in cycle(angles):
yield cos(a) | Provides an infinite source of values representing a cosine wave (from -1
to +1) which repeats every *period* values. For example, to produce a
"siren" effect with a couple of LEDs that repeats once a second::
from gpiozero import PWMLED
from gpiozero.tools import cos_values, scaled, inverted
from signal import pause
red = PWMLED(2)
blue = PWMLED(3)
red.source_delay = 0.01
blue.source_delay = red.source_delay
red.source = scaled(cos_values(100), 0, 1, -1, 1)
blue.source = inverted(red)
pause()
If you require a different range than -1 to +1, see :func:`scaled`. | Below is the the instruction that describes the task:
### Input:
Provides an infinite source of values representing a cosine wave (from -1
to +1) which repeats every *period* values. For example, to produce a
"siren" effect with a couple of LEDs that repeats once a second::
from gpiozero import PWMLED
from gpiozero.tools import cos_values, scaled, inverted
from signal import pause
red = PWMLED(2)
blue = PWMLED(3)
red.source_delay = 0.01
blue.source_delay = red.source_delay
red.source = scaled(cos_values(100), 0, 1, -1, 1)
blue.source = inverted(red)
pause()
If you require a different range than -1 to +1, see :func:`scaled`.
### Response:
def cos_values(period=360):
"""
Provides an infinite source of values representing a cosine wave (from -1
to +1) which repeats every *period* values. For example, to produce a
"siren" effect with a couple of LEDs that repeats once a second::
from gpiozero import PWMLED
from gpiozero.tools import cos_values, scaled, inverted
from signal import pause
red = PWMLED(2)
blue = PWMLED(3)
red.source_delay = 0.01
blue.source_delay = red.source_delay
red.source = scaled(cos_values(100), 0, 1, -1, 1)
blue.source = inverted(red)
pause()
If you require a different range than -1 to +1, see :func:`scaled`.
"""
angles = (2 * pi * i / period for i in range(period))
for a in cycle(angles):
yield cos(a) |
def writer(fo,
schema,
records,
codec='null',
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None):
"""Write records to fo (stream) according to schema
Parameters
----------
fo: file-like
Output stream
records: iterable
Records to write. This is commonly a list of the dictionary
representation of the records, but it can be any iterable
codec: string, optional
Compression codec, can be 'null', 'deflate' or 'snappy' (if installed)
sync_interval: int, optional
Size of sync interval
metadata: dict, optional
Header metadata
validator: None, True or a function
Validator function. If None (the default) - no validation. If True then
then fastavro.validation.validate will be used. If it's a function, it
should have the same signature as fastavro.writer.validate and raise an
exeption on error.
sync_marker: bytes, optional
A byte string used as the avro sync marker. If not provided, a random
byte string will be used.
Example::
from fastavro import writer, parse_schema
schema = {
'doc': 'A weather reading.',
'name': 'Weather',
'namespace': 'test',
'type': 'record',
'fields': [
{'name': 'station', 'type': 'string'},
{'name': 'time', 'type': 'long'},
{'name': 'temp', 'type': 'int'},
],
}
parsed_schema = parse_schema(schema)
records = [
{u'station': u'011990-99999', u'temp': 0, u'time': 1433269388},
{u'station': u'011990-99999', u'temp': 22, u'time': 1433270389},
{u'station': u'011990-99999', u'temp': -11, u'time': 1433273379},
{u'station': u'012650-99999', u'temp': 111, u'time': 1433275478},
]
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
Given an existing avro file, it's possible to append to it by re-opening
the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't
able to read some of the existing header information and an error will be
raised. For example::
# Write initial records
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
# Write some more records
with open('weather.avro', 'a+b') as out:
writer(out, parsed_schema, more_records)
"""
# Sanity check that records is not a single dictionary (as that is a common
# mistake and the exception that gets raised is not helpful)
if isinstance(records, dict):
raise ValueError('"records" argument should be an iterable, not dict')
output = Writer(
fo,
schema,
codec,
sync_interval,
metadata,
validator,
sync_marker,
)
for record in records:
output.write(record)
output.flush() | Write records to fo (stream) according to schema
Parameters
----------
fo: file-like
Output stream
records: iterable
Records to write. This is commonly a list of the dictionary
representation of the records, but it can be any iterable
codec: string, optional
Compression codec, can be 'null', 'deflate' or 'snappy' (if installed)
sync_interval: int, optional
Size of sync interval
metadata: dict, optional
Header metadata
validator: None, True or a function
Validator function. If None (the default) - no validation. If True then
then fastavro.validation.validate will be used. If it's a function, it
should have the same signature as fastavro.writer.validate and raise an
exeption on error.
sync_marker: bytes, optional
A byte string used as the avro sync marker. If not provided, a random
byte string will be used.
Example::
from fastavro import writer, parse_schema
schema = {
'doc': 'A weather reading.',
'name': 'Weather',
'namespace': 'test',
'type': 'record',
'fields': [
{'name': 'station', 'type': 'string'},
{'name': 'time', 'type': 'long'},
{'name': 'temp', 'type': 'int'},
],
}
parsed_schema = parse_schema(schema)
records = [
{u'station': u'011990-99999', u'temp': 0, u'time': 1433269388},
{u'station': u'011990-99999', u'temp': 22, u'time': 1433270389},
{u'station': u'011990-99999', u'temp': -11, u'time': 1433273379},
{u'station': u'012650-99999', u'temp': 111, u'time': 1433275478},
]
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
Given an existing avro file, it's possible to append to it by re-opening
the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't
able to read some of the existing header information and an error will be
raised. For example::
# Write initial records
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
# Write some more records
with open('weather.avro', 'a+b') as out:
writer(out, parsed_schema, more_records) | Below is the the instruction that describes the task:
### Input:
Write records to fo (stream) according to schema
Parameters
----------
fo: file-like
Output stream
records: iterable
Records to write. This is commonly a list of the dictionary
representation of the records, but it can be any iterable
codec: string, optional
Compression codec, can be 'null', 'deflate' or 'snappy' (if installed)
sync_interval: int, optional
Size of sync interval
metadata: dict, optional
Header metadata
validator: None, True or a function
Validator function. If None (the default) - no validation. If True then
then fastavro.validation.validate will be used. If it's a function, it
should have the same signature as fastavro.writer.validate and raise an
exeption on error.
sync_marker: bytes, optional
A byte string used as the avro sync marker. If not provided, a random
byte string will be used.
Example::
from fastavro import writer, parse_schema
schema = {
'doc': 'A weather reading.',
'name': 'Weather',
'namespace': 'test',
'type': 'record',
'fields': [
{'name': 'station', 'type': 'string'},
{'name': 'time', 'type': 'long'},
{'name': 'temp', 'type': 'int'},
],
}
parsed_schema = parse_schema(schema)
records = [
{u'station': u'011990-99999', u'temp': 0, u'time': 1433269388},
{u'station': u'011990-99999', u'temp': 22, u'time': 1433270389},
{u'station': u'011990-99999', u'temp': -11, u'time': 1433273379},
{u'station': u'012650-99999', u'temp': 111, u'time': 1433275478},
]
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
Given an existing avro file, it's possible to append to it by re-opening
the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't
able to read some of the existing header information and an error will be
raised. For example::
# Write initial records
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
# Write some more records
with open('weather.avro', 'a+b') as out:
writer(out, parsed_schema, more_records)
### Response:
def writer(fo,
schema,
records,
codec='null',
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None):
"""Write records to fo (stream) according to schema
Parameters
----------
fo: file-like
Output stream
records: iterable
Records to write. This is commonly a list of the dictionary
representation of the records, but it can be any iterable
codec: string, optional
Compression codec, can be 'null', 'deflate' or 'snappy' (if installed)
sync_interval: int, optional
Size of sync interval
metadata: dict, optional
Header metadata
validator: None, True or a function
Validator function. If None (the default) - no validation. If True then
then fastavro.validation.validate will be used. If it's a function, it
should have the same signature as fastavro.writer.validate and raise an
exeption on error.
sync_marker: bytes, optional
A byte string used as the avro sync marker. If not provided, a random
byte string will be used.
Example::
from fastavro import writer, parse_schema
schema = {
'doc': 'A weather reading.',
'name': 'Weather',
'namespace': 'test',
'type': 'record',
'fields': [
{'name': 'station', 'type': 'string'},
{'name': 'time', 'type': 'long'},
{'name': 'temp', 'type': 'int'},
],
}
parsed_schema = parse_schema(schema)
records = [
{u'station': u'011990-99999', u'temp': 0, u'time': 1433269388},
{u'station': u'011990-99999', u'temp': 22, u'time': 1433270389},
{u'station': u'011990-99999', u'temp': -11, u'time': 1433273379},
{u'station': u'012650-99999', u'temp': 111, u'time': 1433275478},
]
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
Given an existing avro file, it's possible to append to it by re-opening
the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't
able to read some of the existing header information and an error will be
raised. For example::
# Write initial records
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
# Write some more records
with open('weather.avro', 'a+b') as out:
writer(out, parsed_schema, more_records)
"""
# Sanity check that records is not a single dictionary (as that is a common
# mistake and the exception that gets raised is not helpful)
if isinstance(records, dict):
raise ValueError('"records" argument should be an iterable, not dict')
output = Writer(
fo,
schema,
codec,
sync_interval,
metadata,
validator,
sync_marker,
)
for record in records:
output.write(record)
output.flush() |
def calculate_bmi(closed, submitted):
"""
BMI is the ratio of the number of closed items to the number of total items
submitted in a particular period of analysis. The items can be issues, pull
requests and such
:param closed: dataframe returned from get_timeseries() containing closed items
:param submitted: dataframe returned from get_timeseries() containing total items
:returns: a dataframe with "date" and "bmi" columns where the date column is also
the index.
bmi is the ratio of the number of items closed by the total
number of items submitted in a "period" of analysis
"""
if sorted(closed.keys()) != sorted(submitted.keys()):
raise AttributeError("The buckets supplied are not congruent!")
dates = closed.index.values
closed_values = closed['value']
submitted_values = submitted['value']
ratios = []
for x, y in zip(closed_values, submitted_values):
if y == 0:
ratios.append(0.0)
else:
ratios.append(float("%.2f" % (x / y)))
df = pd.DataFrame.from_records({"date": dates, "bmi": ratios}, index="date")
return df.fillna(0) | BMI is the ratio of the number of closed items to the number of total items
submitted in a particular period of analysis. The items can be issues, pull
requests and such
:param closed: dataframe returned from get_timeseries() containing closed items
:param submitted: dataframe returned from get_timeseries() containing total items
:returns: a dataframe with "date" and "bmi" columns where the date column is also
the index.
bmi is the ratio of the number of items closed by the total
number of items submitted in a "period" of analysis | Below is the the instruction that describes the task:
### Input:
BMI is the ratio of the number of closed items to the number of total items
submitted in a particular period of analysis. The items can be issues, pull
requests and such
:param closed: dataframe returned from get_timeseries() containing closed items
:param submitted: dataframe returned from get_timeseries() containing total items
:returns: a dataframe with "date" and "bmi" columns where the date column is also
the index.
bmi is the ratio of the number of items closed by the total
number of items submitted in a "period" of analysis
### Response:
def calculate_bmi(closed, submitted):
"""
BMI is the ratio of the number of closed items to the number of total items
submitted in a particular period of analysis. The items can be issues, pull
requests and such
:param closed: dataframe returned from get_timeseries() containing closed items
:param submitted: dataframe returned from get_timeseries() containing total items
:returns: a dataframe with "date" and "bmi" columns where the date column is also
the index.
bmi is the ratio of the number of items closed by the total
number of items submitted in a "period" of analysis
"""
if sorted(closed.keys()) != sorted(submitted.keys()):
raise AttributeError("The buckets supplied are not congruent!")
dates = closed.index.values
closed_values = closed['value']
submitted_values = submitted['value']
ratios = []
for x, y in zip(closed_values, submitted_values):
if y == 0:
ratios.append(0.0)
else:
ratios.append(float("%.2f" % (x / y)))
df = pd.DataFrame.from_records({"date": dates, "bmi": ratios}, index="date")
return df.fillna(0) |
def get_hosting_devices_for_agent(self, context, host):
"""Fetches routers that a Cisco cfg agent is managing.
This function is supposed to be called when the agent has started,
is ready to take on assignments and before any callbacks to fetch
logical resources are issued.
:param context: contains user information
:param host: originator of callback
:returns: dict of hosting devices managed by the cfg agent
"""
agent_ids = self._dmplugin.get_cfg_agents(context, active=None,
filters={'host': [host]})
if agent_ids:
return [self._dmplugin.get_device_info_for_agent(context, hd_db)
for hd_db in self._dmplugin.get_hosting_devices_db(
context, filters={'cfg_agent_id': [agent_ids[0].id]})]
return [] | Fetches routers that a Cisco cfg agent is managing.
This function is supposed to be called when the agent has started,
is ready to take on assignments and before any callbacks to fetch
logical resources are issued.
:param context: contains user information
:param host: originator of callback
:returns: dict of hosting devices managed by the cfg agent | Below is the the instruction that describes the task:
### Input:
Fetches routers that a Cisco cfg agent is managing.
This function is supposed to be called when the agent has started,
is ready to take on assignments and before any callbacks to fetch
logical resources are issued.
:param context: contains user information
:param host: originator of callback
:returns: dict of hosting devices managed by the cfg agent
### Response:
def get_hosting_devices_for_agent(self, context, host):
"""Fetches routers that a Cisco cfg agent is managing.
This function is supposed to be called when the agent has started,
is ready to take on assignments and before any callbacks to fetch
logical resources are issued.
:param context: contains user information
:param host: originator of callback
:returns: dict of hosting devices managed by the cfg agent
"""
agent_ids = self._dmplugin.get_cfg_agents(context, active=None,
filters={'host': [host]})
if agent_ids:
return [self._dmplugin.get_device_info_for_agent(context, hd_db)
for hd_db in self._dmplugin.get_hosting_devices_db(
context, filters={'cfg_agent_id': [agent_ids[0].id]})]
return [] |
def get_collections(self, username="", calculate_size=False, ext_preload=False, offset=0, limit=10):
"""Fetch collection folders
:param username: The user to list folders for, if omitted the authenticated user is used
:param calculate_size: The option to include the content count per each collection folder
:param ext_preload: Include first 5 deviations from the folder
:param offset: the pagination offset
:param limit: the pagination limit
"""
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/collections/folders', {
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/collections/folders', {
"username":username,
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
folders = []
for item in response['results']:
f = {}
f['folderid'] = item['folderid']
f['name'] = item['name']
if "size" in item:
f['size'] = item['size']
if "deviations" in item:
f['deviations'] = []
for deviation_item in item['deviations']:
d = Deviation()
d.from_dict(deviation_item)
f['deviations'].append(d)
folders.append(f)
return {
"results" : folders,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} | Fetch collection folders
:param username: The user to list folders for, if omitted the authenticated user is used
:param calculate_size: The option to include the content count per each collection folder
:param ext_preload: Include first 5 deviations from the folder
:param offset: the pagination offset
:param limit: the pagination limit | Below is the the instruction that describes the task:
### Input:
Fetch collection folders
:param username: The user to list folders for, if omitted the authenticated user is used
:param calculate_size: The option to include the content count per each collection folder
:param ext_preload: Include first 5 deviations from the folder
:param offset: the pagination offset
:param limit: the pagination limit
### Response:
def get_collections(self, username="", calculate_size=False, ext_preload=False, offset=0, limit=10):
"""Fetch collection folders
:param username: The user to list folders for, if omitted the authenticated user is used
:param calculate_size: The option to include the content count per each collection folder
:param ext_preload: Include first 5 deviations from the folder
:param offset: the pagination offset
:param limit: the pagination limit
"""
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/collections/folders', {
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/collections/folders', {
"username":username,
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
folders = []
for item in response['results']:
f = {}
f['folderid'] = item['folderid']
f['name'] = item['name']
if "size" in item:
f['size'] = item['size']
if "deviations" in item:
f['deviations'] = []
for deviation_item in item['deviations']:
d = Deviation()
d.from_dict(deviation_item)
f['deviations'].append(d)
folders.append(f)
return {
"results" : folders,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
def getText(cls,
parent=None,
windowTitle='Get Text',
label='',
text='',
plain=True,
wrapped=True):
"""
Prompts the user for a text entry using the text edit class.
:param parent | <QWidget>
windowTitle | <str>
label | <str>
text | <str>
plain | <bool> | return plain text or not
:return (<str> text, <bool> accepted)
"""
# create the dialog
dlg = QDialog(parent)
dlg.setWindowTitle(windowTitle)
# create the layout
layout = QVBoxLayout()
# create the label
if label:
lbl = QLabel(dlg)
lbl.setText(label)
layout.addWidget(lbl)
# create the widget
widget = cls(dlg)
widget.setText(text)
if not wrapped:
widget.setLineWrapMode(XTextEdit.NoWrap)
layout.addWidget(widget)
# create the buttons
btns = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal,
dlg)
layout.addWidget(btns)
dlg.setLayout(layout)
dlg.adjustSize()
# create connections
btns.accepted.connect(dlg.accept)
btns.rejected.connect(dlg.reject)
if dlg.exec_():
if plain:
return (widget.toPlainText(), True)
else:
return (widget.toHtml(), True)
else:
return ('', False) | Prompts the user for a text entry using the text edit class.
:param parent | <QWidget>
windowTitle | <str>
label | <str>
text | <str>
plain | <bool> | return plain text or not
:return (<str> text, <bool> accepted) | Below is the the instruction that describes the task:
### Input:
Prompts the user for a text entry using the text edit class.
:param parent | <QWidget>
windowTitle | <str>
label | <str>
text | <str>
plain | <bool> | return plain text or not
:return (<str> text, <bool> accepted)
### Response:
def getText(cls,
parent=None,
windowTitle='Get Text',
label='',
text='',
plain=True,
wrapped=True):
"""
Prompts the user for a text entry using the text edit class.
:param parent | <QWidget>
windowTitle | <str>
label | <str>
text | <str>
plain | <bool> | return plain text or not
:return (<str> text, <bool> accepted)
"""
# create the dialog
dlg = QDialog(parent)
dlg.setWindowTitle(windowTitle)
# create the layout
layout = QVBoxLayout()
# create the label
if label:
lbl = QLabel(dlg)
lbl.setText(label)
layout.addWidget(lbl)
# create the widget
widget = cls(dlg)
widget.setText(text)
if not wrapped:
widget.setLineWrapMode(XTextEdit.NoWrap)
layout.addWidget(widget)
# create the buttons
btns = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal,
dlg)
layout.addWidget(btns)
dlg.setLayout(layout)
dlg.adjustSize()
# create connections
btns.accepted.connect(dlg.accept)
btns.rejected.connect(dlg.reject)
if dlg.exec_():
if plain:
return (widget.toPlainText(), True)
else:
return (widget.toHtml(), True)
else:
return ('', False) |
def dwell_axes(self, axes):
'''
Sets motors to low current, for when they are not moving.
Dwell for XYZA axes is only called after HOMING
Dwell for BC axes is called after both HOMING and MOVING
axes:
String containing the axes to set to low current (eg: 'XYZABC')
'''
axes = ''.join(set(axes) & set(AXES) - set(DISABLE_AXES))
dwelling_currents = {
ax: self._dwelling_current_settings['now'][ax]
for ax in axes
if self._active_axes[ax] is True
}
if dwelling_currents:
self._save_current(dwelling_currents, axes_active=False) | Sets motors to low current, for when they are not moving.
Dwell for XYZA axes is only called after HOMING
Dwell for BC axes is called after both HOMING and MOVING
axes:
String containing the axes to set to low current (eg: 'XYZABC') | Below is the the instruction that describes the task:
### Input:
Sets motors to low current, for when they are not moving.
Dwell for XYZA axes is only called after HOMING
Dwell for BC axes is called after both HOMING and MOVING
axes:
String containing the axes to set to low current (eg: 'XYZABC')
### Response:
def dwell_axes(self, axes):
'''
Sets motors to low current, for when they are not moving.
Dwell for XYZA axes is only called after HOMING
Dwell for BC axes is called after both HOMING and MOVING
axes:
String containing the axes to set to low current (eg: 'XYZABC')
'''
axes = ''.join(set(axes) & set(AXES) - set(DISABLE_AXES))
dwelling_currents = {
ax: self._dwelling_current_settings['now'][ax]
for ax in axes
if self._active_axes[ax] is True
}
if dwelling_currents:
self._save_current(dwelling_currents, axes_active=False) |
def frontogenesis(thta, u, v, dx, dy, dim_order='yx'):
r"""Calculate the 2D kinematic frontogenesis of a temperature field.
The implementation is a form of the Petterssen Frontogenesis and uses the formula
outlined in [Bluestein1993]_ pg.248-253.
.. math:: F=\frac{1}{2}\left|\nabla \theta\right|[D cos(2\beta)-\delta]
* :math:`F` is 2D kinematic frontogenesis
* :math:`\theta` is potential temperature
* :math:`D` is the total deformation
* :math:`\beta` is the angle between the axis of dilitation and the isentropes
* :math:`\delta` is the divergence
Parameters
----------
thta : (M, N) ndarray
Potential temperature
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
2D Frontogenesis in [temperature units]/m/s
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
Conversion factor to go from [temperature units]/m/s to [temperature units/100km/3h]
:math:`1.08e4*1.e5`
"""
# Get gradients of potential temperature in both x and y
ddy_thta = first_derivative(thta, delta=dy, axis=-2)
ddx_thta = first_derivative(thta, delta=dx, axis=-1)
# Compute the magnitude of the potential temperature gradient
mag_thta = np.sqrt(ddx_thta**2 + ddy_thta**2)
# Get the shearing, stretching, and total deformation of the wind field
shrd = shearing_deformation(u, v, dx, dy, dim_order=dim_order)
strd = stretching_deformation(u, v, dx, dy, dim_order=dim_order)
tdef = total_deformation(u, v, dx, dy, dim_order=dim_order)
# Get the divergence of the wind field
div = divergence(u, v, dx, dy, dim_order=dim_order)
# Compute the angle (beta) between the wind field and the gradient of potential temperature
psi = 0.5 * np.arctan2(shrd, strd)
beta = np.arcsin((-ddx_thta * np.cos(psi) - ddy_thta * np.sin(psi)) / mag_thta)
return 0.5 * mag_thta * (tdef * np.cos(2 * beta) - div) | r"""Calculate the 2D kinematic frontogenesis of a temperature field.
The implementation is a form of the Petterssen Frontogenesis and uses the formula
outlined in [Bluestein1993]_ pg.248-253.
.. math:: F=\frac{1}{2}\left|\nabla \theta\right|[D cos(2\beta)-\delta]
* :math:`F` is 2D kinematic frontogenesis
* :math:`\theta` is potential temperature
* :math:`D` is the total deformation
* :math:`\beta` is the angle between the axis of dilitation and the isentropes
* :math:`\delta` is the divergence
Parameters
----------
thta : (M, N) ndarray
Potential temperature
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
2D Frontogenesis in [temperature units]/m/s
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
Conversion factor to go from [temperature units]/m/s to [temperature units/100km/3h]
:math:`1.08e4*1.e5` | Below is the the instruction that describes the task:
### Input:
r"""Calculate the 2D kinematic frontogenesis of a temperature field.
The implementation is a form of the Petterssen Frontogenesis and uses the formula
outlined in [Bluestein1993]_ pg.248-253.
.. math:: F=\frac{1}{2}\left|\nabla \theta\right|[D cos(2\beta)-\delta]
* :math:`F` is 2D kinematic frontogenesis
* :math:`\theta` is potential temperature
* :math:`D` is the total deformation
* :math:`\beta` is the angle between the axis of dilitation and the isentropes
* :math:`\delta` is the divergence
Parameters
----------
thta : (M, N) ndarray
Potential temperature
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
2D Frontogenesis in [temperature units]/m/s
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
Conversion factor to go from [temperature units]/m/s to [temperature units/100km/3h]
:math:`1.08e4*1.e5`
### Response:
def frontogenesis(thta, u, v, dx, dy, dim_order='yx'):
r"""Calculate the 2D kinematic frontogenesis of a temperature field.
The implementation is a form of the Petterssen Frontogenesis and uses the formula
outlined in [Bluestein1993]_ pg.248-253.
.. math:: F=\frac{1}{2}\left|\nabla \theta\right|[D cos(2\beta)-\delta]
* :math:`F` is 2D kinematic frontogenesis
* :math:`\theta` is potential temperature
* :math:`D` is the total deformation
* :math:`\beta` is the angle between the axis of dilitation and the isentropes
* :math:`\delta` is the divergence
Parameters
----------
thta : (M, N) ndarray
Potential temperature
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
2D Frontogenesis in [temperature units]/m/s
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
Conversion factor to go from [temperature units]/m/s to [temperature units/100km/3h]
:math:`1.08e4*1.e5`
"""
# Get gradients of potential temperature in both x and y
ddy_thta = first_derivative(thta, delta=dy, axis=-2)
ddx_thta = first_derivative(thta, delta=dx, axis=-1)
# Compute the magnitude of the potential temperature gradient
mag_thta = np.sqrt(ddx_thta**2 + ddy_thta**2)
# Get the shearing, stretching, and total deformation of the wind field
shrd = shearing_deformation(u, v, dx, dy, dim_order=dim_order)
strd = stretching_deformation(u, v, dx, dy, dim_order=dim_order)
tdef = total_deformation(u, v, dx, dy, dim_order=dim_order)
# Get the divergence of the wind field
div = divergence(u, v, dx, dy, dim_order=dim_order)
# Compute the angle (beta) between the wind field and the gradient of potential temperature
psi = 0.5 * np.arctan2(shrd, strd)
beta = np.arcsin((-ddx_thta * np.cos(psi) - ddy_thta * np.sin(psi)) / mag_thta)
return 0.5 * mag_thta * (tdef * np.cos(2 * beta) - div) |
def add_image_info_cb(self, viewer, channel, image_info):
"""Almost the same as add_image_cb(), except that the image
may not be loaded in memory.
"""
chname = channel.name
name = image_info.name
self.logger.debug("name=%s" % (name))
# Updates of any extant information
try:
image = channel.get_loaded_image(name)
except KeyError:
# images that are not yet loaded will show "N/A" for keywords
image = None
self.add_image_cb(viewer, chname, image, image_info) | Almost the same as add_image_cb(), except that the image
may not be loaded in memory. | Below is the the instruction that describes the task:
### Input:
Almost the same as add_image_cb(), except that the image
may not be loaded in memory.
### Response:
def add_image_info_cb(self, viewer, channel, image_info):
"""Almost the same as add_image_cb(), except that the image
may not be loaded in memory.
"""
chname = channel.name
name = image_info.name
self.logger.debug("name=%s" % (name))
# Updates of any extant information
try:
image = channel.get_loaded_image(name)
except KeyError:
# images that are not yet loaded will show "N/A" for keywords
image = None
self.add_image_cb(viewer, chname, image, image_info) |
def read_i2c_block_data(self, addr, cmd, length=32):
"""Perform a read from the specified cmd register of device. Length number
of bytes (default of 32) will be read and returned as a bytearray.
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
reg = c_uint8(cmd)
result = create_string_buffer(length)
# Build ioctl request.
request = make_i2c_rdwr_data([
(addr, 0, 1, pointer(reg)), # Write cmd register.
(addr, I2C_M_RD, length, cast(result, POINTER(c_uint8))) # Read data.
])
# Make ioctl call and return result data.
ioctl(self._device.fileno(), I2C_RDWR, request)
return bytearray(result.raw) | Perform a read from the specified cmd register of device. Length number
of bytes (default of 32) will be read and returned as a bytearray. | Below is the the instruction that describes the task:
### Input:
Perform a read from the specified cmd register of device. Length number
of bytes (default of 32) will be read and returned as a bytearray.
### Response:
def read_i2c_block_data(self, addr, cmd, length=32):
"""Perform a read from the specified cmd register of device. Length number
of bytes (default of 32) will be read and returned as a bytearray.
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
reg = c_uint8(cmd)
result = create_string_buffer(length)
# Build ioctl request.
request = make_i2c_rdwr_data([
(addr, 0, 1, pointer(reg)), # Write cmd register.
(addr, I2C_M_RD, length, cast(result, POINTER(c_uint8))) # Read data.
])
# Make ioctl call and return result data.
ioctl(self._device.fileno(), I2C_RDWR, request)
return bytearray(result.raw) |
def _initSymbols(ptc):
"""
Helper function to initialize the single character constants
and other symbols needed.
"""
ptc.timeSep = [ u':' ]
ptc.dateSep = [ u'/' ]
ptc.meridian = [ u'AM', u'PM' ]
ptc.usesMeridian = True
ptc.uses24 = False
if pyicu and ptc.usePyICU:
am = u''
pm = u''
ts = ''
# ICU doesn't seem to provide directly the
# date or time seperator - so we have to
# figure it out
o = ptc.icu_tf['short']
s = ptc.timeFormats['short']
ptc.usesMeridian = u'a' in s
ptc.uses24 = u'H' in s
# '11:45 AM' or '11:45'
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
# ': AM' or ':'
s = s.replace('11', '').replace('45', '')
if len(s) > 0:
ts = s[0]
if ptc.usesMeridian:
# '23:45 AM' or '23:45'
am = s[1:].strip()
s = o.format(datetime.datetime(2003, 10, 30, 23, 45))
if ptc.uses24:
s = s.replace('23', '')
else:
s = s.replace('11', '')
# 'PM' or ''
pm = s.replace('45', '').replace(ts, '').strip()
ptc.timeSep = [ ts ]
ptc.meridian = [ am, pm ]
o = ptc.icu_df['short']
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
s = s.replace('10', '').replace('30', '').replace('03', '').replace('2003', '')
if len(s) > 0:
ds = s[0]
else:
ds = '/'
ptc.dateSep = [ ds ]
s = ptc.dateFormats['short']
l = s.lower().split(ds)
dp_order = []
for s in l:
if len(s) > 0:
dp_order.append(s[:1])
ptc.dp_order = dp_order
else:
ptc.timeSep = ptc.locale.timeSep
ptc.dateSep = ptc.locale.dateSep
ptc.meridian = ptc.locale.meridian
ptc.usesMeridian = ptc.locale.usesMeridian
ptc.uses24 = ptc.locale.uses24
ptc.dp_order = ptc.locale.dp_order
# build am and pm lists to contain
# original case, lowercase and first-char
# versions of the meridian text
if len(ptc.meridian) > 0:
am = ptc.meridian[0]
ptc.am = [ am ]
if len(am) > 0:
ptc.am.append(am[0])
am = am.lower()
ptc.am.append(am)
ptc.am.append(am[0])
else:
am = ''
ptc.am = [ '', '' ]
if len(ptc.meridian) > 1:
pm = ptc.meridian[1]
ptc.pm = [ pm ]
if len(pm) > 0:
ptc.pm.append(pm[0])
pm = pm.lower()
ptc.pm.append(pm)
ptc.pm.append(pm[0])
else:
pm = ''
ptc.pm = [ '', '' ] | Helper function to initialize the single character constants
and other symbols needed. | Below is the the instruction that describes the task:
### Input:
Helper function to initialize the single character constants
and other symbols needed.
### Response:
def _initSymbols(ptc):
"""
Helper function to initialize the single character constants
and other symbols needed.
"""
ptc.timeSep = [ u':' ]
ptc.dateSep = [ u'/' ]
ptc.meridian = [ u'AM', u'PM' ]
ptc.usesMeridian = True
ptc.uses24 = False
if pyicu and ptc.usePyICU:
am = u''
pm = u''
ts = ''
# ICU doesn't seem to provide directly the
# date or time seperator - so we have to
# figure it out
o = ptc.icu_tf['short']
s = ptc.timeFormats['short']
ptc.usesMeridian = u'a' in s
ptc.uses24 = u'H' in s
# '11:45 AM' or '11:45'
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
# ': AM' or ':'
s = s.replace('11', '').replace('45', '')
if len(s) > 0:
ts = s[0]
if ptc.usesMeridian:
# '23:45 AM' or '23:45'
am = s[1:].strip()
s = o.format(datetime.datetime(2003, 10, 30, 23, 45))
if ptc.uses24:
s = s.replace('23', '')
else:
s = s.replace('11', '')
# 'PM' or ''
pm = s.replace('45', '').replace(ts, '').strip()
ptc.timeSep = [ ts ]
ptc.meridian = [ am, pm ]
o = ptc.icu_df['short']
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
s = s.replace('10', '').replace('30', '').replace('03', '').replace('2003', '')
if len(s) > 0:
ds = s[0]
else:
ds = '/'
ptc.dateSep = [ ds ]
s = ptc.dateFormats['short']
l = s.lower().split(ds)
dp_order = []
for s in l:
if len(s) > 0:
dp_order.append(s[:1])
ptc.dp_order = dp_order
else:
ptc.timeSep = ptc.locale.timeSep
ptc.dateSep = ptc.locale.dateSep
ptc.meridian = ptc.locale.meridian
ptc.usesMeridian = ptc.locale.usesMeridian
ptc.uses24 = ptc.locale.uses24
ptc.dp_order = ptc.locale.dp_order
# build am and pm lists to contain
# original case, lowercase and first-char
# versions of the meridian text
if len(ptc.meridian) > 0:
am = ptc.meridian[0]
ptc.am = [ am ]
if len(am) > 0:
ptc.am.append(am[0])
am = am.lower()
ptc.am.append(am)
ptc.am.append(am[0])
else:
am = ''
ptc.am = [ '', '' ]
if len(ptc.meridian) > 1:
pm = ptc.meridian[1]
ptc.pm = [ pm ]
if len(pm) > 0:
ptc.pm.append(pm[0])
pm = pm.lower()
ptc.pm.append(pm)
ptc.pm.append(pm[0])
else:
pm = ''
ptc.pm = [ '', '' ] |
def create(cls, name, port=179, external_distance=20, internal_distance=200,
local_distance=200, subnet_distance=None):
"""
Create a custom BGP Profile
:param str name: name of profile
:param int port: port for BGP process
:param int external_distance: external administrative distance; (1-255)
:param int internal_distance: internal administrative distance (1-255)
:param int local_distance: local administrative distance (aggregation) (1-255)
:param list subnet_distance: configure specific subnet's with respective distances
:type tuple subnet_distance: (subnet element(Network), distance(int))
:raises CreateElementFailed: reason for failure
:return: instance with meta
:rtype: BGPProfile
"""
json = {'name': name,
'external': external_distance,
'internal': internal_distance,
'local': local_distance,
'port': port}
if subnet_distance:
d = [{'distance': distance, 'subnet': subnet.href}
for subnet, distance in subnet_distance]
json.update(distance_entry=d)
return ElementCreator(cls, json) | Create a custom BGP Profile
:param str name: name of profile
:param int port: port for BGP process
:param int external_distance: external administrative distance; (1-255)
:param int internal_distance: internal administrative distance (1-255)
:param int local_distance: local administrative distance (aggregation) (1-255)
:param list subnet_distance: configure specific subnet's with respective distances
:type tuple subnet_distance: (subnet element(Network), distance(int))
:raises CreateElementFailed: reason for failure
:return: instance with meta
:rtype: BGPProfile | Below is the the instruction that describes the task:
### Input:
Create a custom BGP Profile
:param str name: name of profile
:param int port: port for BGP process
:param int external_distance: external administrative distance; (1-255)
:param int internal_distance: internal administrative distance (1-255)
:param int local_distance: local administrative distance (aggregation) (1-255)
:param list subnet_distance: configure specific subnet's with respective distances
:type tuple subnet_distance: (subnet element(Network), distance(int))
:raises CreateElementFailed: reason for failure
:return: instance with meta
:rtype: BGPProfile
### Response:
def create(cls, name, port=179, external_distance=20, internal_distance=200,
local_distance=200, subnet_distance=None):
"""
Create a custom BGP Profile
:param str name: name of profile
:param int port: port for BGP process
:param int external_distance: external administrative distance; (1-255)
:param int internal_distance: internal administrative distance (1-255)
:param int local_distance: local administrative distance (aggregation) (1-255)
:param list subnet_distance: configure specific subnet's with respective distances
:type tuple subnet_distance: (subnet element(Network), distance(int))
:raises CreateElementFailed: reason for failure
:return: instance with meta
:rtype: BGPProfile
"""
json = {'name': name,
'external': external_distance,
'internal': internal_distance,
'local': local_distance,
'port': port}
if subnet_distance:
d = [{'distance': distance, 'subnet': subnet.href}
for subnet, distance in subnet_distance]
json.update(distance_entry=d)
return ElementCreator(cls, json) |
def _add_nonce(self, response):
"""
Store a nonce from a response we received.
:param twisted.web.iweb.IResponse response: The HTTP response.
:return: The response, unmodified.
"""
nonce = response.headers.getRawHeaders(
REPLAY_NONCE_HEADER, [None])[0]
with LOG_JWS_ADD_NONCE(raw_nonce=nonce) as action:
if nonce is None:
raise errors.MissingNonce(response)
else:
try:
decoded_nonce = Header._fields['nonce'].decode(
nonce.decode('ascii')
)
action.add_success_fields(nonce=decoded_nonce)
except DeserializationError as error:
raise errors.BadNonce(nonce, error)
self._nonces.add(decoded_nonce)
return response | Store a nonce from a response we received.
:param twisted.web.iweb.IResponse response: The HTTP response.
:return: The response, unmodified. | Below is the the instruction that describes the task:
### Input:
Store a nonce from a response we received.
:param twisted.web.iweb.IResponse response: The HTTP response.
:return: The response, unmodified.
### Response:
def _add_nonce(self, response):
"""
Store a nonce from a response we received.
:param twisted.web.iweb.IResponse response: The HTTP response.
:return: The response, unmodified.
"""
nonce = response.headers.getRawHeaders(
REPLAY_NONCE_HEADER, [None])[0]
with LOG_JWS_ADD_NONCE(raw_nonce=nonce) as action:
if nonce is None:
raise errors.MissingNonce(response)
else:
try:
decoded_nonce = Header._fields['nonce'].decode(
nonce.decode('ascii')
)
action.add_success_fields(nonce=decoded_nonce)
except DeserializationError as error:
raise errors.BadNonce(nonce, error)
self._nonces.add(decoded_nonce)
return response |
def load_data(limit=0, split=0.8):
"""Load data from the IMDB dataset."""
# Partition off part of the train data for evaluation
train_data, _ = thinc.extra.datasets.imdb()
random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels]
split = int(len(train_data) * split)
return (texts[:split], cats[:split]), (texts[split:], cats[split:]) | Load data from the IMDB dataset. | Below is the the instruction that describes the task:
### Input:
Load data from the IMDB dataset.
### Response:
def load_data(limit=0, split=0.8):
"""Load data from the IMDB dataset."""
# Partition off part of the train data for evaluation
train_data, _ = thinc.extra.datasets.imdb()
random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels]
split = int(len(train_data) * split)
return (texts[:split], cats[:split]), (texts[split:], cats[split:]) |
def GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object or None if not available.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object | Retrieves information about the file entry.
Returns:
VFSStat: a stat object or None if not available. | Below is the the instruction that describes the task:
### Input:
Retrieves information about the file entry.
Returns:
VFSStat: a stat object or None if not available.
### Response:
def GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object or None if not available.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object |
def get_cov(config):
"""Returns the coverage object of pytest-cov."""
# Check with hasplugin to avoid getplugin exception in older pytest.
if config.pluginmanager.hasplugin('_cov'):
plugin = config.pluginmanager.getplugin('_cov')
if plugin.cov_controller:
return plugin.cov_controller.cov
return None | Returns the coverage object of pytest-cov. | Below is the the instruction that describes the task:
### Input:
Returns the coverage object of pytest-cov.
### Response:
def get_cov(config):
"""Returns the coverage object of pytest-cov."""
# Check with hasplugin to avoid getplugin exception in older pytest.
if config.pluginmanager.hasplugin('_cov'):
plugin = config.pluginmanager.getplugin('_cov')
if plugin.cov_controller:
return plugin.cov_controller.cov
return None |
def _get_location(self, state, hash_id):
"""
Get previously saved location
A location is composed of: address, pc, finding, at_init, condition
"""
return state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id] | Get previously saved location
A location is composed of: address, pc, finding, at_init, condition | Below is the the instruction that describes the task:
### Input:
Get previously saved location
A location is composed of: address, pc, finding, at_init, condition
### Response:
def _get_location(self, state, hash_id):
"""
Get previously saved location
A location is composed of: address, pc, finding, at_init, condition
"""
return state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id] |
async def async_connect(self):
"""Connect to the ASUS-WRT Telnet server."""
self._reader, self._writer = await asyncio.open_connection(
self._host, self._port)
with (await self._io_lock):
try:
await asyncio.wait_for(self._reader.readuntil(b'login: '), 9)
except asyncio.streams.IncompleteReadError:
_LOGGER.error(
"Unable to read from router on %s:%s" % (
self._host, self._port))
return
except TimeoutError:
_LOGGER.error("Host timeout.")
self._writer.write((self._username + '\n').encode('ascii'))
await self._reader.readuntil(b'Password: ')
self._writer.write((self._password + '\n').encode('ascii'))
self._prompt_string = (await self._reader.readuntil(
b'#')).split(b'\n')[-1]
self._connected = True | Connect to the ASUS-WRT Telnet server. | Below is the the instruction that describes the task:
### Input:
Connect to the ASUS-WRT Telnet server.
### Response:
async def async_connect(self):
"""Connect to the ASUS-WRT Telnet server."""
self._reader, self._writer = await asyncio.open_connection(
self._host, self._port)
with (await self._io_lock):
try:
await asyncio.wait_for(self._reader.readuntil(b'login: '), 9)
except asyncio.streams.IncompleteReadError:
_LOGGER.error(
"Unable to read from router on %s:%s" % (
self._host, self._port))
return
except TimeoutError:
_LOGGER.error("Host timeout.")
self._writer.write((self._username + '\n').encode('ascii'))
await self._reader.readuntil(b'Password: ')
self._writer.write((self._password + '\n').encode('ascii'))
self._prompt_string = (await self._reader.readuntil(
b'#')).split(b'\n')[-1]
self._connected = True |
def _generate_symbol(path, width, height, command='C'):
"""Sequence generator for SVG path."""
if len(path) == 0:
return
# Initial point.
yield 'M'
yield path[0].anchor[1] * width
yield path[0].anchor[0] * height
yield command
# Closed path or open path
points = (zip(path, path[1:] + path[0:1]) if path.is_closed()
else zip(path, path[1:]))
# Rest of the points.
for p1, p2 in points:
yield p1.leaving[1] * width
yield p1.leaving[0] * height
yield p2.preceding[1] * width
yield p2.preceding[0] * height
yield p2.anchor[1] * width
yield p2.anchor[0] * height
if path.is_closed():
yield 'Z' | Sequence generator for SVG path. | Below is the the instruction that describes the task:
### Input:
Sequence generator for SVG path.
### Response:
def _generate_symbol(path, width, height, command='C'):
"""Sequence generator for SVG path."""
if len(path) == 0:
return
# Initial point.
yield 'M'
yield path[0].anchor[1] * width
yield path[0].anchor[0] * height
yield command
# Closed path or open path
points = (zip(path, path[1:] + path[0:1]) if path.is_closed()
else zip(path, path[1:]))
# Rest of the points.
for p1, p2 in points:
yield p1.leaving[1] * width
yield p1.leaving[0] * height
yield p2.preceding[1] * width
yield p2.preceding[0] * height
yield p2.anchor[1] * width
yield p2.anchor[0] * height
if path.is_closed():
yield 'Z' |
def _prepare_batch_request(self):
"""Prepares headers and body for a batch request.
:rtype: tuple (dict, str)
:returns: The pair of headers and body of the batch request to be sent.
:raises: :class:`ValueError` if no requests have been deferred.
"""
if len(self._requests) == 0:
raise ValueError("No deferred requests")
multi = MIMEMultipart()
for method, uri, headers, body in self._requests:
subrequest = MIMEApplicationHTTP(method, uri, headers, body)
multi.attach(subrequest)
# The `email` package expects to deal with "native" strings
if six.PY3: # pragma: NO COVER Python3
buf = io.StringIO()
else:
buf = io.BytesIO()
generator = Generator(buf, False, 0)
generator.flatten(multi)
payload = buf.getvalue()
# Strip off redundant header text
_, body = payload.split("\n\n", 1)
return dict(multi._headers), body | Prepares headers and body for a batch request.
:rtype: tuple (dict, str)
:returns: The pair of headers and body of the batch request to be sent.
:raises: :class:`ValueError` if no requests have been deferred. | Below is the the instruction that describes the task:
### Input:
Prepares headers and body for a batch request.
:rtype: tuple (dict, str)
:returns: The pair of headers and body of the batch request to be sent.
:raises: :class:`ValueError` if no requests have been deferred.
### Response:
def _prepare_batch_request(self):
"""Prepares headers and body for a batch request.
:rtype: tuple (dict, str)
:returns: The pair of headers and body of the batch request to be sent.
:raises: :class:`ValueError` if no requests have been deferred.
"""
if len(self._requests) == 0:
raise ValueError("No deferred requests")
multi = MIMEMultipart()
for method, uri, headers, body in self._requests:
subrequest = MIMEApplicationHTTP(method, uri, headers, body)
multi.attach(subrequest)
# The `email` package expects to deal with "native" strings
if six.PY3: # pragma: NO COVER Python3
buf = io.StringIO()
else:
buf = io.BytesIO()
generator = Generator(buf, False, 0)
generator.flatten(multi)
payload = buf.getvalue()
# Strip off redundant header text
_, body = payload.split("\n\n", 1)
return dict(multi._headers), body |
def dump_commands(commands, directory=None, sub_dir=None):
"""
Dump SQL commands to .sql files.
:param commands: List of SQL commands
:param directory: Directory to dump commands to
:param sub_dir: Sub directory
:return: Directory failed commands were dumped to
"""
print('\t' + str(len(commands)), 'failed commands')
# Create dump_dir directory
if directory and os.path.isfile(directory):
dump_dir = set_dump_directory(os.path.dirname(directory), sub_dir)
return_dir = dump_dir
elif directory:
dump_dir = set_dump_directory(directory, sub_dir)
return_dir = dump_dir
else:
dump_dir = TemporaryDirectory().name
return_dir = TemporaryDirectory()
# Create list of (path, content) tuples
command_filepath = [(fail, os.path.join(dump_dir, str(count) + '.sql')) for count, fail in enumerate(commands)]
# Dump failed commands to text file in the same directory as the commands
# Utilize's multiprocessing module if it is available
timer = Timer()
if MULTIPROCESS:
pool = Pool(cpu_count())
pool.map(write_text_tup, command_filepath)
pool.close()
print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end),
'\n\t\tMethod : (multiprocessing)\n\t\tDirectory : {0}'.format(dump_dir))
else:
for tup in command_filepath:
write_text_tup(tup)
print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end),
'\n\t\tMethod : (sequential)\n\t\tDirectory : {0}'.format(dump_dir))
# Return base directory of dumped commands
return return_dir | Dump SQL commands to .sql files.
:param commands: List of SQL commands
:param directory: Directory to dump commands to
:param sub_dir: Sub directory
:return: Directory failed commands were dumped to | Below is the the instruction that describes the task:
### Input:
Dump SQL commands to .sql files.
:param commands: List of SQL commands
:param directory: Directory to dump commands to
:param sub_dir: Sub directory
:return: Directory failed commands were dumped to
### Response:
def dump_commands(commands, directory=None, sub_dir=None):
"""
Dump SQL commands to .sql files.
:param commands: List of SQL commands
:param directory: Directory to dump commands to
:param sub_dir: Sub directory
:return: Directory failed commands were dumped to
"""
print('\t' + str(len(commands)), 'failed commands')
# Create dump_dir directory
if directory and os.path.isfile(directory):
dump_dir = set_dump_directory(os.path.dirname(directory), sub_dir)
return_dir = dump_dir
elif directory:
dump_dir = set_dump_directory(directory, sub_dir)
return_dir = dump_dir
else:
dump_dir = TemporaryDirectory().name
return_dir = TemporaryDirectory()
# Create list of (path, content) tuples
command_filepath = [(fail, os.path.join(dump_dir, str(count) + '.sql')) for count, fail in enumerate(commands)]
# Dump failed commands to text file in the same directory as the commands
# Utilize's multiprocessing module if it is available
timer = Timer()
if MULTIPROCESS:
pool = Pool(cpu_count())
pool.map(write_text_tup, command_filepath)
pool.close()
print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end),
'\n\t\tMethod : (multiprocessing)\n\t\tDirectory : {0}'.format(dump_dir))
else:
for tup in command_filepath:
write_text_tup(tup)
print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end),
'\n\t\tMethod : (sequential)\n\t\tDirectory : {0}'.format(dump_dir))
# Return base directory of dumped commands
return return_dir |
def to_constant(expression):
"""
Iff the expression can be simplified to a Constant get the actual concrete value.
This discards/ignore any taint
"""
value = simplify(expression)
if isinstance(value, Expression) and value.taint:
raise ValueError("Can not simplify tainted values to constant")
if isinstance(value, Constant):
return value.value
elif isinstance(value, Array):
if expression.index_max:
ba = bytearray()
for i in range(expression.index_max):
value_i = simplify(value[i])
if not isinstance(value_i, Constant):
break
ba.append(value_i.value)
else:
return bytes(ba)
return expression
return value | Iff the expression can be simplified to a Constant get the actual concrete value.
This discards/ignore any taint | Below is the the instruction that describes the task:
### Input:
Iff the expression can be simplified to a Constant get the actual concrete value.
This discards/ignore any taint
### Response:
def to_constant(expression):
"""
Iff the expression can be simplified to a Constant get the actual concrete value.
This discards/ignore any taint
"""
value = simplify(expression)
if isinstance(value, Expression) and value.taint:
raise ValueError("Can not simplify tainted values to constant")
if isinstance(value, Constant):
return value.value
elif isinstance(value, Array):
if expression.index_max:
ba = bytearray()
for i in range(expression.index_max):
value_i = simplify(value[i])
if not isinstance(value_i, Constant):
break
ba.append(value_i.value)
else:
return bytes(ba)
return expression
return value |
def draw_pathfinder_trajectory(
self,
trajectory,
color="#ff0000",
offset=None,
scale=(1, 1),
show_dt=False,
dt_offset=0.0,
**kwargs
):
"""
Special helper function for drawing trajectories generated by
robotpy-pathfinder
:param trajectory: A list of pathfinder segment objects
:param offset: If specified, should be x/y tuple to add to the path
relative to the robot coordinates
:param scale: Multiply all points by this (x,y) tuple
:param show_dt: draw text every N seconds along path, or False
:param dt_offset: add this to each dt shown
:param kwargs: Keyword options to pass to tkinter.create_line
"""
# pathfinder x/y coordinates are switched
pts = [(pt.x, -pt.y) for pt in trajectory]
robot_coordinates = offset if offset else True
self.draw_line(
pts,
color=color,
robot_coordinates=robot_coordinates,
relative_to_first=True,
arrow=True,
scale=scale,
)
if show_dt:
dt = trajectory[0].dt
def _defer_text():
# defer this execution to save effort when drawing
px_per_ft = UserRenderer._global_ui.field.px_per_ft
line = self._elements[-1]
for i in range(0, len(pts), int(show_dt / dt)):
text = "t=%.2f" % (dt_offset + i * dt,)
el = TextElement(
text, line.pts[i], 0, "#000000", int(px_per_ft * 0.5)
)
UserRenderer._global_ui.field.add_moving_element(el)
self._elements.append(el)
self._run(_defer_text) | Special helper function for drawing trajectories generated by
robotpy-pathfinder
:param trajectory: A list of pathfinder segment objects
:param offset: If specified, should be x/y tuple to add to the path
relative to the robot coordinates
:param scale: Multiply all points by this (x,y) tuple
:param show_dt: draw text every N seconds along path, or False
:param dt_offset: add this to each dt shown
:param kwargs: Keyword options to pass to tkinter.create_line | Below is the the instruction that describes the task:
### Input:
Special helper function for drawing trajectories generated by
robotpy-pathfinder
:param trajectory: A list of pathfinder segment objects
:param offset: If specified, should be x/y tuple to add to the path
relative to the robot coordinates
:param scale: Multiply all points by this (x,y) tuple
:param show_dt: draw text every N seconds along path, or False
:param dt_offset: add this to each dt shown
:param kwargs: Keyword options to pass to tkinter.create_line
### Response:
def draw_pathfinder_trajectory(
self,
trajectory,
color="#ff0000",
offset=None,
scale=(1, 1),
show_dt=False,
dt_offset=0.0,
**kwargs
):
"""
Special helper function for drawing trajectories generated by
robotpy-pathfinder
:param trajectory: A list of pathfinder segment objects
:param offset: If specified, should be x/y tuple to add to the path
relative to the robot coordinates
:param scale: Multiply all points by this (x,y) tuple
:param show_dt: draw text every N seconds along path, or False
:param dt_offset: add this to each dt shown
:param kwargs: Keyword options to pass to tkinter.create_line
"""
# pathfinder x/y coordinates are switched
pts = [(pt.x, -pt.y) for pt in trajectory]
robot_coordinates = offset if offset else True
self.draw_line(
pts,
color=color,
robot_coordinates=robot_coordinates,
relative_to_first=True,
arrow=True,
scale=scale,
)
if show_dt:
dt = trajectory[0].dt
def _defer_text():
# defer this execution to save effort when drawing
px_per_ft = UserRenderer._global_ui.field.px_per_ft
line = self._elements[-1]
for i in range(0, len(pts), int(show_dt / dt)):
text = "t=%.2f" % (dt_offset + i * dt,)
el = TextElement(
text, line.pts[i], 0, "#000000", int(px_per_ft * 0.5)
)
UserRenderer._global_ui.field.add_moving_element(el)
self._elements.append(el)
self._run(_defer_text) |
def _bootstrap_fedora(name, **kwargs):
'''
Bootstrap a Fedora container
'''
dst = _make_container_root(name)
if not kwargs.get('version', False):
if __grains__['os'].lower() == 'fedora':
version = __grains__['osrelease']
else:
version = '21'
else:
version = '21'
cmd = ('yum -y --releasever={0} --nogpg --installroot={1} '
'--disablerepo="*" --enablerepo=fedora install systemd passwd yum '
'fedora-release vim-minimal'.format(version, dst))
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode'] != 0:
_build_failed(dst, name)
return ret | Bootstrap a Fedora container | Below is the the instruction that describes the task:
### Input:
Bootstrap a Fedora container
### Response:
def _bootstrap_fedora(name, **kwargs):
'''
Bootstrap a Fedora container
'''
dst = _make_container_root(name)
if not kwargs.get('version', False):
if __grains__['os'].lower() == 'fedora':
version = __grains__['osrelease']
else:
version = '21'
else:
version = '21'
cmd = ('yum -y --releasever={0} --nogpg --installroot={1} '
'--disablerepo="*" --enablerepo=fedora install systemd passwd yum '
'fedora-release vim-minimal'.format(version, dst))
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode'] != 0:
_build_failed(dst, name)
return ret |
def parse_segdict_key(self, key):
"""
Return ifo and name from the segdict key.
"""
splt = key.split(':')
if len(splt) == 2:
return splt[0], splt[1]
else:
err_msg = "Key should be of the format 'ifo:name', got %s." %(key,)
raise ValueError(err_msg) | Return ifo and name from the segdict key. | Below is the the instruction that describes the task:
### Input:
Return ifo and name from the segdict key.
### Response:
def parse_segdict_key(self, key):
"""
Return ifo and name from the segdict key.
"""
splt = key.split(':')
if len(splt) == 2:
return splt[0], splt[1]
else:
err_msg = "Key should be of the format 'ifo:name', got %s." %(key,)
raise ValueError(err_msg) |
def show_firmware_version_output_show_firmware_version_switchid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
output = ET.SubElement(show_firmware_version, "output")
show_firmware_version = ET.SubElement(output, "show-firmware-version")
switchid = ET.SubElement(show_firmware_version, "switchid")
switchid.text = kwargs.pop('switchid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_firmware_version_output_show_firmware_version_switchid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
output = ET.SubElement(show_firmware_version, "output")
show_firmware_version = ET.SubElement(output, "show-firmware-version")
switchid = ET.SubElement(show_firmware_version, "switchid")
switchid.text = kwargs.pop('switchid')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def action_factory(name, parameter=False):
"""Factory method for creating new actions (w/wo parameters).
:param name: Name of the action (prefix with your module name).
:param parameter: Determines if action should take parameters or not.
Default is ``False``.
"""
if parameter:
return partial(ParameterizedActionNeed, name)
else:
return ActionNeed(name) | Factory method for creating new actions (w/wo parameters).
:param name: Name of the action (prefix with your module name).
:param parameter: Determines if action should take parameters or not.
Default is ``False``. | Below is the the instruction that describes the task:
### Input:
Factory method for creating new actions (w/wo parameters).
:param name: Name of the action (prefix with your module name).
:param parameter: Determines if action should take parameters or not.
Default is ``False``.
### Response:
def action_factory(name, parameter=False):
"""Factory method for creating new actions (w/wo parameters).
:param name: Name of the action (prefix with your module name).
:param parameter: Determines if action should take parameters or not.
Default is ``False``.
"""
if parameter:
return partial(ParameterizedActionNeed, name)
else:
return ActionNeed(name) |
def _prep_bins():
"""
Support for running straight out of a cloned source directory instead
of an installed distribution
"""
from os import path
from sys import platform, maxsize
from shutil import copy
bit_suffix = "-x86_64" if maxsize > 2**32 else "-x86"
package_root = path.abspath(path.dirname(__file__))
prebuilt_path = path.join(package_root, "prebuilt", platform + bit_suffix)
config = {"MANIFEST_DIR": prebuilt_path}
try:
execfile(path.join(prebuilt_path, "manifest.pycfg"), config)
except IOError:
return # there are no prebuilts for this platform - nothing to do
files = map(lambda x: path.join(prebuilt_path, x), config["FILES"])
for prebuilt_file in files:
try:
copy(path.join(prebuilt_path, prebuilt_file), package_root)
except IOError:
pass | Support for running straight out of a cloned source directory instead
of an installed distribution | Below is the the instruction that describes the task:
### Input:
Support for running straight out of a cloned source directory instead
of an installed distribution
### Response:
def _prep_bins():
"""
Support for running straight out of a cloned source directory instead
of an installed distribution
"""
from os import path
from sys import platform, maxsize
from shutil import copy
bit_suffix = "-x86_64" if maxsize > 2**32 else "-x86"
package_root = path.abspath(path.dirname(__file__))
prebuilt_path = path.join(package_root, "prebuilt", platform + bit_suffix)
config = {"MANIFEST_DIR": prebuilt_path}
try:
execfile(path.join(prebuilt_path, "manifest.pycfg"), config)
except IOError:
return # there are no prebuilts for this platform - nothing to do
files = map(lambda x: path.join(prebuilt_path, x), config["FILES"])
for prebuilt_file in files:
try:
copy(path.join(prebuilt_path, prebuilt_file), package_root)
except IOError:
pass |
def set_level(self,
level,
console_only=False):
"""
Defines the logging level (from standard logging module) for log messages.
:param level: Level of logging for the file logger.
:param console_only: [Optional] If True then the file logger will not be affected.
"""
self.queue.put(dill.dumps(SetLevelCommand(level=level,
console_only=console_only))) | Defines the logging level (from standard logging module) for log messages.
:param level: Level of logging for the file logger.
:param console_only: [Optional] If True then the file logger will not be affected. | Below is the the instruction that describes the task:
### Input:
Defines the logging level (from standard logging module) for log messages.
:param level: Level of logging for the file logger.
:param console_only: [Optional] If True then the file logger will not be affected.
### Response:
def set_level(self,
level,
console_only=False):
"""
Defines the logging level (from standard logging module) for log messages.
:param level: Level of logging for the file logger.
:param console_only: [Optional] If True then the file logger will not be affected.
"""
self.queue.put(dill.dumps(SetLevelCommand(level=level,
console_only=console_only))) |
def copyNodeList(self):
"""Do a recursive copy of the node list. Use
xmlDocCopyNodeList() if possible to ensure string interning. """
ret = libxml2mod.xmlCopyNodeList(self._o)
if ret is None:raise treeError('xmlCopyNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | Do a recursive copy of the node list. Use
xmlDocCopyNodeList() if possible to ensure string interning. | Below is the the instruction that describes the task:
### Input:
Do a recursive copy of the node list. Use
xmlDocCopyNodeList() if possible to ensure string interning.
### Response:
def copyNodeList(self):
"""Do a recursive copy of the node list. Use
xmlDocCopyNodeList() if possible to ensure string interning. """
ret = libxml2mod.xmlCopyNodeList(self._o)
if ret is None:raise treeError('xmlCopyNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp |
def dsort(self, order):
r"""
Sort rows.
:param order: Sort order
:type order: :ref:`CsvColFilter`
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.dsort
:raises:
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
# Make order conforming to a list of dictionaries
order = order if isinstance(order, list) else [order]
norder = [{item: "A"} if not isinstance(item, dict) else item for item in order]
# Verify that all columns exist in file
self._in_header([list(item.keys())[0] for item in norder])
# Get column indexes
clist = []
for nitem in norder:
for key, value in nitem.items():
clist.append(
(
key
if isinstance(key, int)
else self._header_upper.index(key.upper()),
value.upper() == "D",
)
)
# From the Python documentation:
# "Starting with Python 2.3, the sort() method is guaranteed to be
# stable. A sort is stable if it guarantees not to change the
# relative order of elements that compare equal - this is helpful
# for sorting in multiple passes (for example, sort by department,
# then by salary grade)."
# This means that the sorts have to be done from "minor" column to
# "major" column
for (cindex, rvalue) in reversed(clist):
fpointer = operator.itemgetter(cindex)
self._data.sort(key=fpointer, reverse=rvalue) | r"""
Sort rows.
:param order: Sort order
:type order: :ref:`CsvColFilter`
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.dsort
:raises:
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]] | Below is the the instruction that describes the task:
### Input:
r"""
Sort rows.
:param order: Sort order
:type order: :ref:`CsvColFilter`
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.dsort
:raises:
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
### Response:
def dsort(self, order):
r"""
Sort rows.
:param order: Sort order
:type order: :ref:`CsvColFilter`
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.dsort
:raises:
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
# Make order conforming to a list of dictionaries
order = order if isinstance(order, list) else [order]
norder = [{item: "A"} if not isinstance(item, dict) else item for item in order]
# Verify that all columns exist in file
self._in_header([list(item.keys())[0] for item in norder])
# Get column indexes
clist = []
for nitem in norder:
for key, value in nitem.items():
clist.append(
(
key
if isinstance(key, int)
else self._header_upper.index(key.upper()),
value.upper() == "D",
)
)
# From the Python documentation:
# "Starting with Python 2.3, the sort() method is guaranteed to be
# stable. A sort is stable if it guarantees not to change the
# relative order of elements that compare equal - this is helpful
# for sorting in multiple passes (for example, sort by department,
# then by salary grade)."
# This means that the sorts have to be done from "minor" column to
# "major" column
for (cindex, rvalue) in reversed(clist):
fpointer = operator.itemgetter(cindex)
self._data.sort(key=fpointer, reverse=rvalue) |
def _escapify(label):
"""Escape the characters in label which need it.
@returns: the escaped string
@rtype: string"""
text = ''
for c in label:
if c in _escaped:
text += '\\' + c
elif ord(c) > 0x20 and ord(c) < 0x7F:
text += c
else:
text += '\\%03d' % ord(c)
return text | Escape the characters in label which need it.
@returns: the escaped string
@rtype: string | Below is the the instruction that describes the task:
### Input:
Escape the characters in label which need it.
@returns: the escaped string
@rtype: string
### Response:
def _escapify(label):
"""Escape the characters in label which need it.
@returns: the escaped string
@rtype: string"""
text = ''
for c in label:
if c in _escaped:
text += '\\' + c
elif ord(c) > 0x20 and ord(c) < 0x7F:
text += c
else:
text += '\\%03d' % ord(c)
return text |
def deduplicate(directories: List[str], recursive: bool,
dummy_run: bool) -> None:
"""
De-duplicate files within one or more directories. Remove files
that are identical to ones already considered.
Args:
directories: list of directories to process
recursive: process subdirectories (recursively)?
dummy_run: say what it'll do, but don't do it
"""
# -------------------------------------------------------------------------
# Catalogue files by their size
# -------------------------------------------------------------------------
files_by_size = {} # type: Dict[int, List[str]] # maps size to list of filenames # noqa
num_considered = 0
for filename in gen_filenames(directories, recursive=recursive):
if not os.path.isfile(filename):
continue
size = os.stat(filename)[stat.ST_SIZE]
a = files_by_size.setdefault(size, [])
a.append(filename)
num_considered += 1
log.debug("files_by_size =\n{}", pformat(files_by_size))
# -------------------------------------------------------------------------
# By size, look for duplicates using a hash of the first part only
# -------------------------------------------------------------------------
log.info("Finding potential duplicates...")
potential_duplicate_sets = []
potential_count = 0
sizes = list(files_by_size.keys())
sizes.sort()
for k in sizes:
files_of_this_size = files_by_size[k]
out_files = [] # type: List[str]
# ... list of all files having >1 file per hash, for this size
hashes = {} # type: Dict[str, Union[bool, str]]
# ... key is a hash; value is either True or a filename
if len(files_of_this_size) == 1:
continue
log.info("Testing {} files of size {}...", len(files_of_this_size), k)
for filename in files_of_this_size:
if not os.path.isfile(filename):
continue
log.debug("Quick-scanning file: {}", filename)
with open(filename, 'rb') as fd:
hasher = md5()
hasher.update(fd.read(INITIAL_HASH_SIZE))
hash_value = hasher.digest()
if hash_value in hashes:
# We have discovered the SECOND OR SUBSEQUENT hash match.
first_file_or_true = hashes[hash_value]
if first_file_or_true is not True:
# We have discovered the SECOND file;
# first_file_or_true contains the name of the FIRST.
out_files.append(first_file_or_true)
hashes[hash_value] = True
out_files.append(filename)
else:
# We have discovered the FIRST file with this hash.
hashes[hash_value] = filename
if out_files:
potential_duplicate_sets.append(out_files)
potential_count = potential_count + len(out_files)
del files_by_size
log.info("Found {} sets of potential duplicates, based on hashing the "
"first {} bytes of each...", potential_count, INITIAL_HASH_SIZE)
log.debug("potential_duplicate_sets =\n{}",
pformat(potential_duplicate_sets))
# -------------------------------------------------------------------------
# Within each set, check for duplicates using a hash of the entire file
# -------------------------------------------------------------------------
log.info("Scanning for real duplicates...")
num_scanned = 0
num_to_scan = sum(len(one_set) for one_set in potential_duplicate_sets)
duplicate_sets = [] # type: List[List[str]]
for one_set in potential_duplicate_sets:
out_files = [] # type: List[str]
hashes = {}
for filename in one_set:
num_scanned += 1
log.info("Scanning file [{}/{}]: {}",
num_scanned, num_to_scan, filename)
with open(filename, 'rb') as fd:
hasher = md5()
while True:
r = fd.read(MAIN_READ_CHUNK_SIZE)
if len(r) == 0:
break
hasher.update(r)
hash_value = hasher.digest()
if hash_value in hashes:
if not out_files:
out_files.append(hashes[hash_value])
out_files.append(filename)
else:
hashes[hash_value] = filename
if len(out_files):
duplicate_sets.append(out_files)
log.debug("duplicate_sets = \n{}", pformat(duplicate_sets))
num_originals = 0
num_deleted = 0
for d in duplicate_sets:
print("Original is: {}".format(d[0]))
num_originals += 1
for f in d[1:]:
if dummy_run:
print("Would delete: {}".format(f))
else:
print("Deleting: {}".format(f))
os.remove(f)
num_deleted += 1
print()
num_unique = num_considered - (num_originals + num_deleted)
print(
"{action} {d} duplicates, leaving {o} originals (and {u} unique files "
"not touched; {c} files considered in total)".format(
action="Would delete" if dummy_run else "Deleted",
d=num_deleted,
o=num_originals,
u=num_unique,
c=num_considered
)
) | De-duplicate files within one or more directories. Remove files
that are identical to ones already considered.
Args:
directories: list of directories to process
recursive: process subdirectories (recursively)?
dummy_run: say what it'll do, but don't do it | Below is the the instruction that describes the task:
### Input:
De-duplicate files within one or more directories. Remove files
that are identical to ones already considered.
Args:
directories: list of directories to process
recursive: process subdirectories (recursively)?
dummy_run: say what it'll do, but don't do it
### Response:
def deduplicate(directories: List[str], recursive: bool,
dummy_run: bool) -> None:
"""
De-duplicate files within one or more directories. Remove files
that are identical to ones already considered.
Args:
directories: list of directories to process
recursive: process subdirectories (recursively)?
dummy_run: say what it'll do, but don't do it
"""
# -------------------------------------------------------------------------
# Catalogue files by their size
# -------------------------------------------------------------------------
files_by_size = {} # type: Dict[int, List[str]] # maps size to list of filenames # noqa
num_considered = 0
for filename in gen_filenames(directories, recursive=recursive):
if not os.path.isfile(filename):
continue
size = os.stat(filename)[stat.ST_SIZE]
a = files_by_size.setdefault(size, [])
a.append(filename)
num_considered += 1
log.debug("files_by_size =\n{}", pformat(files_by_size))
# -------------------------------------------------------------------------
# By size, look for duplicates using a hash of the first part only
# -------------------------------------------------------------------------
log.info("Finding potential duplicates...")
potential_duplicate_sets = []
potential_count = 0
sizes = list(files_by_size.keys())
sizes.sort()
for k in sizes:
files_of_this_size = files_by_size[k]
out_files = [] # type: List[str]
# ... list of all files having >1 file per hash, for this size
hashes = {} # type: Dict[str, Union[bool, str]]
# ... key is a hash; value is either True or a filename
if len(files_of_this_size) == 1:
continue
log.info("Testing {} files of size {}...", len(files_of_this_size), k)
for filename in files_of_this_size:
if not os.path.isfile(filename):
continue
log.debug("Quick-scanning file: {}", filename)
with open(filename, 'rb') as fd:
hasher = md5()
hasher.update(fd.read(INITIAL_HASH_SIZE))
hash_value = hasher.digest()
if hash_value in hashes:
# We have discovered the SECOND OR SUBSEQUENT hash match.
first_file_or_true = hashes[hash_value]
if first_file_or_true is not True:
# We have discovered the SECOND file;
# first_file_or_true contains the name of the FIRST.
out_files.append(first_file_or_true)
hashes[hash_value] = True
out_files.append(filename)
else:
# We have discovered the FIRST file with this hash.
hashes[hash_value] = filename
if out_files:
potential_duplicate_sets.append(out_files)
potential_count = potential_count + len(out_files)
del files_by_size
log.info("Found {} sets of potential duplicates, based on hashing the "
"first {} bytes of each...", potential_count, INITIAL_HASH_SIZE)
log.debug("potential_duplicate_sets =\n{}",
pformat(potential_duplicate_sets))
# -------------------------------------------------------------------------
# Within each set, check for duplicates using a hash of the entire file
# -------------------------------------------------------------------------
log.info("Scanning for real duplicates...")
num_scanned = 0
num_to_scan = sum(len(one_set) for one_set in potential_duplicate_sets)
duplicate_sets = [] # type: List[List[str]]
for one_set in potential_duplicate_sets:
out_files = [] # type: List[str]
hashes = {}
for filename in one_set:
num_scanned += 1
log.info("Scanning file [{}/{}]: {}",
num_scanned, num_to_scan, filename)
with open(filename, 'rb') as fd:
hasher = md5()
while True:
r = fd.read(MAIN_READ_CHUNK_SIZE)
if len(r) == 0:
break
hasher.update(r)
hash_value = hasher.digest()
if hash_value in hashes:
if not out_files:
out_files.append(hashes[hash_value])
out_files.append(filename)
else:
hashes[hash_value] = filename
if len(out_files):
duplicate_sets.append(out_files)
log.debug("duplicate_sets = \n{}", pformat(duplicate_sets))
num_originals = 0
num_deleted = 0
for d in duplicate_sets:
print("Original is: {}".format(d[0]))
num_originals += 1
for f in d[1:]:
if dummy_run:
print("Would delete: {}".format(f))
else:
print("Deleting: {}".format(f))
os.remove(f)
num_deleted += 1
print()
num_unique = num_considered - (num_originals + num_deleted)
print(
"{action} {d} duplicates, leaving {o} originals (and {u} unique files "
"not touched; {c} files considered in total)".format(
action="Would delete" if dummy_run else "Deleted",
d=num_deleted,
o=num_originals,
u=num_unique,
c=num_considered
)
) |
def replace(self, key, value, expire=0, noreply=None):
"""
The memcached "replace" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the value was stored and False if it wasn't (because the key didn't
already exist).
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'replace', {key: value}, expire, noreply)[key] | The memcached "replace" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the value was stored and False if it wasn't (because the key didn't
already exist). | Below is the the instruction that describes the task:
### Input:
The memcached "replace" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the value was stored and False if it wasn't (because the key didn't
already exist).
### Response:
def replace(self, key, value, expire=0, noreply=None):
"""
The memcached "replace" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the value was stored and False if it wasn't (because the key didn't
already exist).
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'replace', {key: value}, expire, noreply)[key] |
def add_colortable(self, fobj, name):
r"""Add a color table from a file to the registry.
Parameters
----------
fobj : file-like object
The file to read the color table from
name : str
The name under which the color table will be stored
"""
self[name] = read_colortable(fobj)
self[name + '_r'] = self[name][::-1] | r"""Add a color table from a file to the registry.
Parameters
----------
fobj : file-like object
The file to read the color table from
name : str
The name under which the color table will be stored | Below is the the instruction that describes the task:
### Input:
r"""Add a color table from a file to the registry.
Parameters
----------
fobj : file-like object
The file to read the color table from
name : str
The name under which the color table will be stored
### Response:
def add_colortable(self, fobj, name):
r"""Add a color table from a file to the registry.
Parameters
----------
fobj : file-like object
The file to read the color table from
name : str
The name under which the color table will be stored
"""
self[name] = read_colortable(fobj)
self[name + '_r'] = self[name][::-1] |
def range(self, count):
"""
Create a list of colors evenly spaced along this scale's domain.
:param int count: The number of colors to return.
:rtype: list
:returns: A list of spectra.Color objects.
"""
if count <= 1:
raise ValueError("Range size must be greater than 1.")
dom = self._domain
distance = dom[-1] - dom[0]
props = [ self(dom[0] + distance * float(x)/(count-1))
for x in range(count) ]
return props | Create a list of colors evenly spaced along this scale's domain.
:param int count: The number of colors to return.
:rtype: list
:returns: A list of spectra.Color objects. | Below is the the instruction that describes the task:
### Input:
Create a list of colors evenly spaced along this scale's domain.
:param int count: The number of colors to return.
:rtype: list
:returns: A list of spectra.Color objects.
### Response:
def range(self, count):
"""
Create a list of colors evenly spaced along this scale's domain.
:param int count: The number of colors to return.
:rtype: list
:returns: A list of spectra.Color objects.
"""
if count <= 1:
raise ValueError("Range size must be greater than 1.")
dom = self._domain
distance = dom[-1] - dom[0]
props = [ self(dom[0] + distance * float(x)/(count-1))
for x in range(count) ]
return props |
def set_custom_serializer(self, _type, serializer):
"""
Assign a serializer for the type.
:param _type: (Type), the target type of the serializer
:param serializer: (Serializer), Custom Serializer constructor function
"""
validate_type(_type)
validate_serializer(serializer, StreamSerializer)
self._custom_serializers[_type] = serializer | Assign a serializer for the type.
:param _type: (Type), the target type of the serializer
:param serializer: (Serializer), Custom Serializer constructor function | Below is the the instruction that describes the task:
### Input:
Assign a serializer for the type.
:param _type: (Type), the target type of the serializer
:param serializer: (Serializer), Custom Serializer constructor function
### Response:
def set_custom_serializer(self, _type, serializer):
"""
Assign a serializer for the type.
:param _type: (Type), the target type of the serializer
:param serializer: (Serializer), Custom Serializer constructor function
"""
validate_type(_type)
validate_serializer(serializer, StreamSerializer)
self._custom_serializers[_type] = serializer |
def batch_write(self, tablename, return_capacity=None,
return_item_collection_metrics=NONE):
"""
Perform a batch write on a table
Parameters
----------
tablename : str
Name of the table to write to
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
Examples
--------
.. code-block:: python
with connection.batch_write('mytable') as batch:
batch.put({'id': 'id1', 'foo': 'bar'})
batch.delete({'id': 'oldid'})
"""
return_capacity = self._default_capacity(return_capacity)
return BatchWriter(self, tablename, return_capacity=return_capacity,
return_item_collection_metrics=return_item_collection_metrics) | Perform a batch write on a table
Parameters
----------
tablename : str
Name of the table to write to
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
Examples
--------
.. code-block:: python
with connection.batch_write('mytable') as batch:
batch.put({'id': 'id1', 'foo': 'bar'})
batch.delete({'id': 'oldid'}) | Below is the the instruction that describes the task:
### Input:
Perform a batch write on a table
Parameters
----------
tablename : str
Name of the table to write to
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
Examples
--------
.. code-block:: python
with connection.batch_write('mytable') as batch:
batch.put({'id': 'id1', 'foo': 'bar'})
batch.delete({'id': 'oldid'})
### Response:
def batch_write(self, tablename, return_capacity=None,
return_item_collection_metrics=NONE):
"""
Perform a batch write on a table
Parameters
----------
tablename : str
Name of the table to write to
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
Examples
--------
.. code-block:: python
with connection.batch_write('mytable') as batch:
batch.put({'id': 'id1', 'foo': 'bar'})
batch.delete({'id': 'oldid'})
"""
return_capacity = self._default_capacity(return_capacity)
return BatchWriter(self, tablename, return_capacity=return_capacity,
return_item_collection_metrics=return_item_collection_metrics) |
def get_sentence_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True):
"""Get the ngrams that are in the Sentence of the given Mention, not including itself.
Note that if a candidate is passed in, all of its Mentions will be
searched.
:param mention: The Mention whose Sentence is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a *generator* of ngrams
"""
spans = _to_spans(mention)
for span in spans:
for ngram in get_left_ngrams(
span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
for ngram in get_right_ngrams(
span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram | Get the ngrams that are in the Sentence of the given Mention, not including itself.
Note that if a candidate is passed in, all of its Mentions will be
searched.
:param mention: The Mention whose Sentence is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a *generator* of ngrams | Below is the the instruction that describes the task:
### Input:
Get the ngrams that are in the Sentence of the given Mention, not including itself.
Note that if a candidate is passed in, all of its Mentions will be
searched.
:param mention: The Mention whose Sentence is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a *generator* of ngrams
### Response:
def get_sentence_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True):
"""Get the ngrams that are in the Sentence of the given Mention, not including itself.
Note that if a candidate is passed in, all of its Mentions will be
searched.
:param mention: The Mention whose Sentence is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a *generator* of ngrams
"""
spans = _to_spans(mention)
for span in spans:
for ngram in get_left_ngrams(
span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
for ngram in get_right_ngrams(
span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram |
def fromxml(node):
"""Static method returning an MetaField instance (any subclass of AbstractMetaField) from the given XML description. Node can be a string or an etree._Element."""
if not isinstance(node,ElementTree._Element): #pylint: disable=protected-access
node = parsexmlstring(node)
if node.tag.lower() != 'meta':
raise Exception("Expected meta tag but got '" + node.tag + "' instead")
key = node.attrib['id']
if node.text:
value = node.text
else:
value = None
operator = 'set'
if 'operator' in node.attrib:
operator= node.attrib['operator']
if operator == 'set':
cls = SetMetaField
elif operator == 'unset':
cls = UnsetMetaField
elif operator == 'copy':
cls = CopyMetaField
elif operator == 'parameter':
cls = ParameterMetaField
return cls(key, value) | Static method returning an MetaField instance (any subclass of AbstractMetaField) from the given XML description. Node can be a string or an etree._Element. | Below is the the instruction that describes the task:
### Input:
Static method returning an MetaField instance (any subclass of AbstractMetaField) from the given XML description. Node can be a string or an etree._Element.
### Response:
def fromxml(node):
"""Static method returning an MetaField instance (any subclass of AbstractMetaField) from the given XML description. Node can be a string or an etree._Element."""
if not isinstance(node,ElementTree._Element): #pylint: disable=protected-access
node = parsexmlstring(node)
if node.tag.lower() != 'meta':
raise Exception("Expected meta tag but got '" + node.tag + "' instead")
key = node.attrib['id']
if node.text:
value = node.text
else:
value = None
operator = 'set'
if 'operator' in node.attrib:
operator= node.attrib['operator']
if operator == 'set':
cls = SetMetaField
elif operator == 'unset':
cls = UnsetMetaField
elif operator == 'copy':
cls = CopyMetaField
elif operator == 'parameter':
cls = ParameterMetaField
return cls(key, value) |
def to_match(self):
"""Return a unicode object with the MATCH representation of this Variable."""
self.validate()
# We don't want the dollar sign as part of the variable name.
variable_with_no_dollar_sign = self.variable_name[1:]
match_variable_name = '{%s}' % (six.text_type(variable_with_no_dollar_sign),)
# We can't directly pass a Date or DateTime object, so we have to pass it as a string
# and then parse it inline. For date format parameter meanings, see:
# http://docs.oracle.com/javase/7/docs/api/java/text/SimpleDateFormat.html
# For the semantics of the date() OrientDB SQL function, see:
# http://orientdb.com/docs/last/SQL-Functions.html#date
if GraphQLDate.is_same_type(self.inferred_type):
return u'date(%s, "%s")' % (match_variable_name, STANDARD_DATE_FORMAT)
elif GraphQLDateTime.is_same_type(self.inferred_type):
return u'date(%s, "%s")' % (match_variable_name, STANDARD_DATETIME_FORMAT)
else:
return match_variable_name | Return a unicode object with the MATCH representation of this Variable. | Below is the the instruction that describes the task:
### Input:
Return a unicode object with the MATCH representation of this Variable.
### Response:
def to_match(self):
"""Return a unicode object with the MATCH representation of this Variable."""
self.validate()
# We don't want the dollar sign as part of the variable name.
variable_with_no_dollar_sign = self.variable_name[1:]
match_variable_name = '{%s}' % (six.text_type(variable_with_no_dollar_sign),)
# We can't directly pass a Date or DateTime object, so we have to pass it as a string
# and then parse it inline. For date format parameter meanings, see:
# http://docs.oracle.com/javase/7/docs/api/java/text/SimpleDateFormat.html
# For the semantics of the date() OrientDB SQL function, see:
# http://orientdb.com/docs/last/SQL-Functions.html#date
if GraphQLDate.is_same_type(self.inferred_type):
return u'date(%s, "%s")' % (match_variable_name, STANDARD_DATE_FORMAT)
elif GraphQLDateTime.is_same_type(self.inferred_type):
return u'date(%s, "%s")' % (match_variable_name, STANDARD_DATETIME_FORMAT)
else:
return match_variable_name |
def rpc_get_historic_names_by_address(self, address, offset, count, **con_info):
"""
Get the list of names owned by an address throughout history
Return {'status': True, 'names': [{'name': ..., 'block_id': ..., 'vtxindex': ...}]} on success
Return {'error': ...} on error
"""
if not check_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_offset(offset):
return {'error': 'invalid offset', 'http_status': 400}
if not check_count(count, 10):
return {'error': 'invalid count', 'http_status': 400}
db = get_db_state(self.working_dir)
names = db.get_historic_names_by_address(address, offset, count)
db.close()
if names is None:
names = []
return self.success_response( {'names': names} ) | Get the list of names owned by an address throughout history
Return {'status': True, 'names': [{'name': ..., 'block_id': ..., 'vtxindex': ...}]} on success
Return {'error': ...} on error | Below is the the instruction that describes the task:
### Input:
Get the list of names owned by an address throughout history
Return {'status': True, 'names': [{'name': ..., 'block_id': ..., 'vtxindex': ...}]} on success
Return {'error': ...} on error
### Response:
def rpc_get_historic_names_by_address(self, address, offset, count, **con_info):
"""
Get the list of names owned by an address throughout history
Return {'status': True, 'names': [{'name': ..., 'block_id': ..., 'vtxindex': ...}]} on success
Return {'error': ...} on error
"""
if not check_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_offset(offset):
return {'error': 'invalid offset', 'http_status': 400}
if not check_count(count, 10):
return {'error': 'invalid count', 'http_status': 400}
db = get_db_state(self.working_dir)
names = db.get_historic_names_by_address(address, offset, count)
db.close()
if names is None:
names = []
return self.success_response( {'names': names} ) |
def setup_rules_file():
"""
Copy the udev rules file for Opentrons Modules to opentrons_data directory
and trigger the new rules.
This rules file in opentrons_data is symlinked into udev rules directory
TODO: Move this file to resources and move the symlink to point to
/data/system/
"""
import shutil
import subprocess
rules_file = os.path.join(
os.path.abspath(os.path.dirname(__file__)), '..',
'config', 'modules', '95-opentrons-modules.rules')
shutil.copy2(
rules_file,
'/data/user_storage/opentrons_data/95-opentrons-modules.rules')
res0 = subprocess.run('udevadm control --reload-rules',
shell=True, stdout=subprocess.PIPE).stdout.decode()
if res0:
log.warning(res0.strip())
res1 = subprocess.run('udevadm trigger',
shell=True, stdout=subprocess.PIPE).stdout.decode()
if res1:
log.warning(res1.strip()) | Copy the udev rules file for Opentrons Modules to opentrons_data directory
and trigger the new rules.
This rules file in opentrons_data is symlinked into udev rules directory
TODO: Move this file to resources and move the symlink to point to
/data/system/ | Below is the the instruction that describes the task:
### Input:
Copy the udev rules file for Opentrons Modules to opentrons_data directory
and trigger the new rules.
This rules file in opentrons_data is symlinked into udev rules directory
TODO: Move this file to resources and move the symlink to point to
/data/system/
### Response:
def setup_rules_file():
"""
Copy the udev rules file for Opentrons Modules to opentrons_data directory
and trigger the new rules.
This rules file in opentrons_data is symlinked into udev rules directory
TODO: Move this file to resources and move the symlink to point to
/data/system/
"""
import shutil
import subprocess
rules_file = os.path.join(
os.path.abspath(os.path.dirname(__file__)), '..',
'config', 'modules', '95-opentrons-modules.rules')
shutil.copy2(
rules_file,
'/data/user_storage/opentrons_data/95-opentrons-modules.rules')
res0 = subprocess.run('udevadm control --reload-rules',
shell=True, stdout=subprocess.PIPE).stdout.decode()
if res0:
log.warning(res0.strip())
res1 = subprocess.run('udevadm trigger',
shell=True, stdout=subprocess.PIPE).stdout.decode()
if res1:
log.warning(res1.strip()) |
def is_enabled(self, cls):
"""Return whether the given component class is enabled."""
if cls not in self.enabled:
self.enabled[cls] = self.is_component_enabled(cls)
return self.enabled[cls] | Return whether the given component class is enabled. | Below is the the instruction that describes the task:
### Input:
Return whether the given component class is enabled.
### Response:
def is_enabled(self, cls):
"""Return whether the given component class is enabled."""
if cls not in self.enabled:
self.enabled[cls] = self.is_component_enabled(cls)
return self.enabled[cls] |
def _prepare_photometry_input(data, error, mask, wcs, unit):
"""
Parse the inputs to `aperture_photometry`.
`aperture_photometry` accepts a wide range of inputs, e.g. ``data``
could be a numpy array, a Quantity array, or a fits HDU. This
requires some parsing and validation to ensure that all inputs are
complete and consistent. For example, the data could carry a unit
and the wcs itself, so we need to check that it is consistent with
the unit and wcs given as input parameters.
"""
if isinstance(data, fits.HDUList):
for i in range(len(data)):
if data[i].data is not None:
warnings.warn("Input data is a HDUList object, photometry is "
"run only for the {0} HDU."
.format(i), AstropyUserWarning)
data = data[i]
break
if isinstance(data, (fits.PrimaryHDU, fits.ImageHDU)):
header = data.header
data = data.data
if 'BUNIT' in header:
bunit = u.Unit(header['BUNIT'], parse_strict='warn')
if isinstance(bunit, u.UnrecognizedUnit):
warnings.warn('The BUNIT in the header of the input data is '
'not parseable as a valid unit.',
AstropyUserWarning)
else:
data = u.Quantity(data, unit=bunit)
if wcs is None:
try:
wcs = WCS(header)
except Exception:
# A valid WCS was not found in the header. Let the calling
# application raise an exception if it needs a WCS.
pass
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if unit is not None:
unit = u.Unit(unit, parse_strict='warn')
if isinstance(unit, u.UnrecognizedUnit):
warnings.warn('The input unit is not parseable as a valid '
'unit.', AstropyUserWarning)
unit = None
if isinstance(data, u.Quantity):
if unit is not None and data.unit != unit:
warnings.warn('The input unit does not agree with the data '
'unit.', AstropyUserWarning)
else:
if unit is not None:
data = u.Quantity(data, unit=unit)
if error is not None:
if isinstance(error, u.Quantity):
if unit is not None and error.unit != unit:
warnings.warn('The input unit does not agree with the error '
'unit.', AstropyUserWarning)
if np.isscalar(error.value):
error = u.Quantity(np.broadcast_arrays(error, data),
unit=error.unit)[0]
else:
if np.isscalar(error):
error = np.broadcast_arrays(error, data)[0]
if unit is not None:
error = u.Quantity(error, unit=unit)
error = np.asanyarray(error)
if error.shape != data.shape:
raise ValueError('error and data must have the same shape.')
if mask is not None:
mask = np.asanyarray(mask)
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape.')
return data, error, mask, wcs | Parse the inputs to `aperture_photometry`.
`aperture_photometry` accepts a wide range of inputs, e.g. ``data``
could be a numpy array, a Quantity array, or a fits HDU. This
requires some parsing and validation to ensure that all inputs are
complete and consistent. For example, the data could carry a unit
and the wcs itself, so we need to check that it is consistent with
the unit and wcs given as input parameters. | Below is the the instruction that describes the task:
### Input:
Parse the inputs to `aperture_photometry`.
`aperture_photometry` accepts a wide range of inputs, e.g. ``data``
could be a numpy array, a Quantity array, or a fits HDU. This
requires some parsing and validation to ensure that all inputs are
complete and consistent. For example, the data could carry a unit
and the wcs itself, so we need to check that it is consistent with
the unit and wcs given as input parameters.
### Response:
def _prepare_photometry_input(data, error, mask, wcs, unit):
"""
Parse the inputs to `aperture_photometry`.
`aperture_photometry` accepts a wide range of inputs, e.g. ``data``
could be a numpy array, a Quantity array, or a fits HDU. This
requires some parsing and validation to ensure that all inputs are
complete and consistent. For example, the data could carry a unit
and the wcs itself, so we need to check that it is consistent with
the unit and wcs given as input parameters.
"""
if isinstance(data, fits.HDUList):
for i in range(len(data)):
if data[i].data is not None:
warnings.warn("Input data is a HDUList object, photometry is "
"run only for the {0} HDU."
.format(i), AstropyUserWarning)
data = data[i]
break
if isinstance(data, (fits.PrimaryHDU, fits.ImageHDU)):
header = data.header
data = data.data
if 'BUNIT' in header:
bunit = u.Unit(header['BUNIT'], parse_strict='warn')
if isinstance(bunit, u.UnrecognizedUnit):
warnings.warn('The BUNIT in the header of the input data is '
'not parseable as a valid unit.',
AstropyUserWarning)
else:
data = u.Quantity(data, unit=bunit)
if wcs is None:
try:
wcs = WCS(header)
except Exception:
# A valid WCS was not found in the header. Let the calling
# application raise an exception if it needs a WCS.
pass
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if unit is not None:
unit = u.Unit(unit, parse_strict='warn')
if isinstance(unit, u.UnrecognizedUnit):
warnings.warn('The input unit is not parseable as a valid '
'unit.', AstropyUserWarning)
unit = None
if isinstance(data, u.Quantity):
if unit is not None and data.unit != unit:
warnings.warn('The input unit does not agree with the data '
'unit.', AstropyUserWarning)
else:
if unit is not None:
data = u.Quantity(data, unit=unit)
if error is not None:
if isinstance(error, u.Quantity):
if unit is not None and error.unit != unit:
warnings.warn('The input unit does not agree with the error '
'unit.', AstropyUserWarning)
if np.isscalar(error.value):
error = u.Quantity(np.broadcast_arrays(error, data),
unit=error.unit)[0]
else:
if np.isscalar(error):
error = np.broadcast_arrays(error, data)[0]
if unit is not None:
error = u.Quantity(error, unit=unit)
error = np.asanyarray(error)
if error.shape != data.shape:
raise ValueError('error and data must have the same shape.')
if mask is not None:
mask = np.asanyarray(mask)
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape.')
return data, error, mask, wcs |
def always(self, method, path=None, headers=None, text=None, json=None):
'''
Sends response every time matching parameters are found util :func:`Server.reset` is called
:type method: str
:param method: request method: ``'GET'``, ``'POST'``, etc. can be some custom string
:type path: str
:param path: request path including query parameters
:type headers: dict
:param headers: dictionary of headers to expect. If omitted any headers will do
:type text: str
:param text: request text to expect. If ommited any text will match
:type json: dict
:param json: request json to expect. If ommited any json will match,
if present text param will be ignored
:rtype: Rule
:returns: newly created expectation rule
'''
rule = Rule(method, path, headers, text, json)
return self._add_rule_to(rule, self._always_rules) | Sends response every time matching parameters are found util :func:`Server.reset` is called
:type method: str
:param method: request method: ``'GET'``, ``'POST'``, etc. can be some custom string
:type path: str
:param path: request path including query parameters
:type headers: dict
:param headers: dictionary of headers to expect. If omitted any headers will do
:type text: str
:param text: request text to expect. If ommited any text will match
:type json: dict
:param json: request json to expect. If ommited any json will match,
if present text param will be ignored
:rtype: Rule
:returns: newly created expectation rule | Below is the the instruction that describes the task:
### Input:
Sends response every time matching parameters are found util :func:`Server.reset` is called
:type method: str
:param method: request method: ``'GET'``, ``'POST'``, etc. can be some custom string
:type path: str
:param path: request path including query parameters
:type headers: dict
:param headers: dictionary of headers to expect. If omitted any headers will do
:type text: str
:param text: request text to expect. If ommited any text will match
:type json: dict
:param json: request json to expect. If ommited any json will match,
if present text param will be ignored
:rtype: Rule
:returns: newly created expectation rule
### Response:
def always(self, method, path=None, headers=None, text=None, json=None):
'''
Sends response every time matching parameters are found util :func:`Server.reset` is called
:type method: str
:param method: request method: ``'GET'``, ``'POST'``, etc. can be some custom string
:type path: str
:param path: request path including query parameters
:type headers: dict
:param headers: dictionary of headers to expect. If omitted any headers will do
:type text: str
:param text: request text to expect. If ommited any text will match
:type json: dict
:param json: request json to expect. If ommited any json will match,
if present text param will be ignored
:rtype: Rule
:returns: newly created expectation rule
'''
rule = Rule(method, path, headers, text, json)
return self._add_rule_to(rule, self._always_rules) |
def to_jd(year, month, day):
'''Determine Julian day from Persian date'''
if year >= 0:
y = 474
else:
y = 473
epbase = year - y
epyear = 474 + (epbase % 2820)
if month <= 7:
m = (month - 1) * 31
else:
m = (month - 1) * 30 + 6
return day + m + trunc(((epyear * 682) - 110) / 2816) + (epyear - 1) * 365 + trunc(epbase / 2820) * 1029983 + (EPOCH - 1) | Determine Julian day from Persian date | Below is the the instruction that describes the task:
### Input:
Determine Julian day from Persian date
### Response:
def to_jd(year, month, day):
'''Determine Julian day from Persian date'''
if year >= 0:
y = 474
else:
y = 473
epbase = year - y
epyear = 474 + (epbase % 2820)
if month <= 7:
m = (month - 1) * 31
else:
m = (month - 1) * 30 + 6
return day + m + trunc(((epyear * 682) - 110) / 2816) + (epyear - 1) * 365 + trunc(epbase / 2820) * 1029983 + (EPOCH - 1) |
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler | Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type | Below is the the instruction that describes the task:
### Input:
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
### Response:
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler |
def raw(config): # pragma: no cover
"""Dump the contents of LDAP to console in raw format."""
client = Client()
client.prepare_connection()
audit_api = API(client)
print(audit_api.raw()) | Dump the contents of LDAP to console in raw format. | Below is the the instruction that describes the task:
### Input:
Dump the contents of LDAP to console in raw format.
### Response:
def raw(config): # pragma: no cover
"""Dump the contents of LDAP to console in raw format."""
client = Client()
client.prepare_connection()
audit_api = API(client)
print(audit_api.raw()) |
def make_crossroad_router(source, drain=False):
''' legacy crossroad implementation. deprecated
'''
sink_observer = None
def on_sink_subscribe(observer):
nonlocal sink_observer
sink_observer = observer
def dispose():
nonlocal sink_observer
sink_observer = None
return dispose
def route_crossroad(request):
def on_response_subscribe(observer):
def on_next_source(i):
if type(i) is cyclotron.Drain:
observer.on_completed()
else:
observer.on_next(i)
source_disposable = source.subscribe(
on_next=on_next_source,
on_error=lambda e: observer.on_error(e),
on_completed=lambda: observer.on_completed()
)
def on_next_request(i):
if sink_observer is not None:
sink_observer.on_next(i)
def on_request_completed():
if sink_observer is not None:
if drain is True:
sink_observer.on_next(cyclotron.Drain())
else:
sink_observer.on_completed()
request_disposable = request.subscribe(
on_next=on_next_request,
on_error=observer.on_error,
on_completed=on_request_completed
)
def dispose():
source_disposable.dispose()
request_disposable.dispose()
return dispose
return Observable.create(on_response_subscribe)
return Observable.create(on_sink_subscribe), route_crossroad | legacy crossroad implementation. deprecated | Below is the the instruction that describes the task:
### Input:
legacy crossroad implementation. deprecated
### Response:
def make_crossroad_router(source, drain=False):
''' legacy crossroad implementation. deprecated
'''
sink_observer = None
def on_sink_subscribe(observer):
nonlocal sink_observer
sink_observer = observer
def dispose():
nonlocal sink_observer
sink_observer = None
return dispose
def route_crossroad(request):
def on_response_subscribe(observer):
def on_next_source(i):
if type(i) is cyclotron.Drain:
observer.on_completed()
else:
observer.on_next(i)
source_disposable = source.subscribe(
on_next=on_next_source,
on_error=lambda e: observer.on_error(e),
on_completed=lambda: observer.on_completed()
)
def on_next_request(i):
if sink_observer is not None:
sink_observer.on_next(i)
def on_request_completed():
if sink_observer is not None:
if drain is True:
sink_observer.on_next(cyclotron.Drain())
else:
sink_observer.on_completed()
request_disposable = request.subscribe(
on_next=on_next_request,
on_error=observer.on_error,
on_completed=on_request_completed
)
def dispose():
source_disposable.dispose()
request_disposable.dispose()
return dispose
return Observable.create(on_response_subscribe)
return Observable.create(on_sink_subscribe), route_crossroad |
def bootstrap_prompt(prompt_kwargs, group):
"""
Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs.
"""
prompt_kwargs = prompt_kwargs or {}
defaults = {
"history": InMemoryHistory(),
"completer": ClickCompleter(group),
"message": u"> ",
}
for key in defaults:
default_value = defaults[key]
if key not in prompt_kwargs:
prompt_kwargs[key] = default_value
return prompt_kwargs | Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs. | Below is the the instruction that describes the task:
### Input:
Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs.
### Response:
def bootstrap_prompt(prompt_kwargs, group):
"""
Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs.
"""
prompt_kwargs = prompt_kwargs or {}
defaults = {
"history": InMemoryHistory(),
"completer": ClickCompleter(group),
"message": u"> ",
}
for key in defaults:
default_value = defaults[key]
if key not in prompt_kwargs:
prompt_kwargs[key] = default_value
return prompt_kwargs |
def get_oc_api_token():
"""
Get token of user logged in OpenShift cluster
:return: str, API token
"""
oc_command_exists()
try:
return run_cmd(["oc", "whoami", "-t"], return_output=True).rstrip() # remove '\n'
except subprocess.CalledProcessError as ex:
raise ConuException("oc whoami -t failed: %s" % ex) | Get token of user logged in OpenShift cluster
:return: str, API token | Below is the the instruction that describes the task:
### Input:
Get token of user logged in OpenShift cluster
:return: str, API token
### Response:
def get_oc_api_token():
"""
Get token of user logged in OpenShift cluster
:return: str, API token
"""
oc_command_exists()
try:
return run_cmd(["oc", "whoami", "-t"], return_output=True).rstrip() # remove '\n'
except subprocess.CalledProcessError as ex:
raise ConuException("oc whoami -t failed: %s" % ex) |
def allocate(self, pool, tenant_id=None, **params):
"""Allocates a floating IP to the tenant.
You must provide a pool name or id for which you would like to
allocate a floating IP.
:returns: FloatingIp object corresponding to an allocated floating IP
"""
if not tenant_id:
tenant_id = self.request.user.project_id
create_dict = {'floating_network_id': pool,
'tenant_id': tenant_id}
if 'subnet_id' in params:
create_dict['subnet_id'] = params['subnet_id']
if 'floating_ip_address' in params:
create_dict['floating_ip_address'] = params['floating_ip_address']
if 'description' in params:
create_dict['description'] = params['description']
if 'dns_domain' in params:
create_dict['dns_domain'] = params['dns_domain']
if 'dns_name' in params:
create_dict['dns_name'] = params['dns_name']
fip = self.client.create_floatingip(
{'floatingip': create_dict}).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip) | Allocates a floating IP to the tenant.
You must provide a pool name or id for which you would like to
allocate a floating IP.
:returns: FloatingIp object corresponding to an allocated floating IP | Below is the the instruction that describes the task:
### Input:
Allocates a floating IP to the tenant.
You must provide a pool name or id for which you would like to
allocate a floating IP.
:returns: FloatingIp object corresponding to an allocated floating IP
### Response:
def allocate(self, pool, tenant_id=None, **params):
"""Allocates a floating IP to the tenant.
You must provide a pool name or id for which you would like to
allocate a floating IP.
:returns: FloatingIp object corresponding to an allocated floating IP
"""
if not tenant_id:
tenant_id = self.request.user.project_id
create_dict = {'floating_network_id': pool,
'tenant_id': tenant_id}
if 'subnet_id' in params:
create_dict['subnet_id'] = params['subnet_id']
if 'floating_ip_address' in params:
create_dict['floating_ip_address'] = params['floating_ip_address']
if 'description' in params:
create_dict['description'] = params['description']
if 'dns_domain' in params:
create_dict['dns_domain'] = params['dns_domain']
if 'dns_name' in params:
create_dict['dns_name'] = params['dns_name']
fip = self.client.create_floatingip(
{'floatingip': create_dict}).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip) |
def send(self, *args):
"""Sends a single raw message to the IRC server.
Arguments are automatically joined by spaces. No newlines are allowed.
"""
msg = " ".join(a.nick if isinstance(a, User) else str(a) for a in args)
if "\n" in msg:
raise ValueError("Cannot send() a newline. Args: %s" % repr(args))
_log.debug("%s <-- %s", self.server.host, msg)
self.socket.send(msg + "\r\n") | Sends a single raw message to the IRC server.
Arguments are automatically joined by spaces. No newlines are allowed. | Below is the the instruction that describes the task:
### Input:
Sends a single raw message to the IRC server.
Arguments are automatically joined by spaces. No newlines are allowed.
### Response:
def send(self, *args):
"""Sends a single raw message to the IRC server.
Arguments are automatically joined by spaces. No newlines are allowed.
"""
msg = " ".join(a.nick if isinstance(a, User) else str(a) for a in args)
if "\n" in msg:
raise ValueError("Cannot send() a newline. Args: %s" % repr(args))
_log.debug("%s <-- %s", self.server.host, msg)
self.socket.send(msg + "\r\n") |
def remove_user_from_group(self, username, groupname, raise_on_error=False):
"""Remove a user from a group
Attempts to remove a user from a group
Args
username: The username to remove from the group.
groupname: The group name to be removed from the user.
Returns:
True: Succeeded
False: If unsuccessful
"""
response = self._delete(self.rest_url + "/group/user/direct",params={"username": username, "groupname": groupname})
if response.status_code == 204:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False | Remove a user from a group
Attempts to remove a user from a group
Args
username: The username to remove from the group.
groupname: The group name to be removed from the user.
Returns:
True: Succeeded
False: If unsuccessful | Below is the the instruction that describes the task:
### Input:
Remove a user from a group
Attempts to remove a user from a group
Args
username: The username to remove from the group.
groupname: The group name to be removed from the user.
Returns:
True: Succeeded
False: If unsuccessful
### Response:
def remove_user_from_group(self, username, groupname, raise_on_error=False):
"""Remove a user from a group
Attempts to remove a user from a group
Args
username: The username to remove from the group.
groupname: The group name to be removed from the user.
Returns:
True: Succeeded
False: If unsuccessful
"""
response = self._delete(self.rest_url + "/group/user/direct",params={"username": username, "groupname": groupname})
if response.status_code == 204:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False |
def exit_if_no_roles(roles_count, roles_path):
"""
Exit if there were no roles found.
"""
if roles_count == 0:
ui.warn(c.MESSAGES["empty_roles_path"], roles_path)
sys.exit() | Exit if there were no roles found. | Below is the the instruction that describes the task:
### Input:
Exit if there were no roles found.
### Response:
def exit_if_no_roles(roles_count, roles_path):
"""
Exit if there were no roles found.
"""
if roles_count == 0:
ui.warn(c.MESSAGES["empty_roles_path"], roles_path)
sys.exit() |
def get_new_edges(self, level):
"""Get new edges from the pattern graph for the graph search algorithm
The level argument denotes the distance of the new edges from the
starting vertex in the pattern graph.
"""
return (
self.level_edges.get(level, []),
self.level_constraints.get(level, [])
) | Get new edges from the pattern graph for the graph search algorithm
The level argument denotes the distance of the new edges from the
starting vertex in the pattern graph. | Below is the the instruction that describes the task:
### Input:
Get new edges from the pattern graph for the graph search algorithm
The level argument denotes the distance of the new edges from the
starting vertex in the pattern graph.
### Response:
def get_new_edges(self, level):
"""Get new edges from the pattern graph for the graph search algorithm
The level argument denotes the distance of the new edges from the
starting vertex in the pattern graph.
"""
return (
self.level_edges.get(level, []),
self.level_constraints.get(level, [])
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.