code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def exists_using_casper(self, filename):
"""Check for the existence of a package file.
Unlike other DistributionPoint types, JDS and CDP types have no
documented interface for checking whether the server and its
children have a complete copy of a file. The best we can do is
check for an object using the API /packages URL--JSS.Package()
or /scripts and look for matches on the filename.
If this is not enough, this method uses the results of the
casper.jxml page to determine if a package exists. This is an
undocumented feature and as such should probably not be relied
upon. Please note, scripts are not listed per-distributionserver
like packages. For scripts, the best you can do is use the
regular exists method.
It will test for whether the file exists on ALL configured
distribution servers. This may register False if the JDS is busy
syncing them.
"""
casper_results = casper.Casper(self.connection["jss"])
distribution_servers = casper_results.find("distributionservers")
# Step one: Build a list of sets of all package names.
all_packages = []
for distribution_server in distribution_servers:
packages = set()
for package in distribution_server.findall("packages/package"):
packages.add(os.path.basename(package.find("fileURL").text))
all_packages.append(packages)
# Step two: Intersect the sets.
base_set = all_packages.pop()
for packages in all_packages:
base_set = base_set.intersection(packages)
# Step three: Check for membership.
return filename in base_set | Check for the existence of a package file.
Unlike other DistributionPoint types, JDS and CDP types have no
documented interface for checking whether the server and its
children have a complete copy of a file. The best we can do is
check for an object using the API /packages URL--JSS.Package()
or /scripts and look for matches on the filename.
If this is not enough, this method uses the results of the
casper.jxml page to determine if a package exists. This is an
undocumented feature and as such should probably not be relied
upon. Please note, scripts are not listed per-distributionserver
like packages. For scripts, the best you can do is use the
regular exists method.
It will test for whether the file exists on ALL configured
distribution servers. This may register False if the JDS is busy
syncing them. | Below is the the instruction that describes the task:
### Input:
Check for the existence of a package file.
Unlike other DistributionPoint types, JDS and CDP types have no
documented interface for checking whether the server and its
children have a complete copy of a file. The best we can do is
check for an object using the API /packages URL--JSS.Package()
or /scripts and look for matches on the filename.
If this is not enough, this method uses the results of the
casper.jxml page to determine if a package exists. This is an
undocumented feature and as such should probably not be relied
upon. Please note, scripts are not listed per-distributionserver
like packages. For scripts, the best you can do is use the
regular exists method.
It will test for whether the file exists on ALL configured
distribution servers. This may register False if the JDS is busy
syncing them.
### Response:
def exists_using_casper(self, filename):
"""Check for the existence of a package file.
Unlike other DistributionPoint types, JDS and CDP types have no
documented interface for checking whether the server and its
children have a complete copy of a file. The best we can do is
check for an object using the API /packages URL--JSS.Package()
or /scripts and look for matches on the filename.
If this is not enough, this method uses the results of the
casper.jxml page to determine if a package exists. This is an
undocumented feature and as such should probably not be relied
upon. Please note, scripts are not listed per-distributionserver
like packages. For scripts, the best you can do is use the
regular exists method.
It will test for whether the file exists on ALL configured
distribution servers. This may register False if the JDS is busy
syncing them.
"""
casper_results = casper.Casper(self.connection["jss"])
distribution_servers = casper_results.find("distributionservers")
# Step one: Build a list of sets of all package names.
all_packages = []
for distribution_server in distribution_servers:
packages = set()
for package in distribution_server.findall("packages/package"):
packages.add(os.path.basename(package.find("fileURL").text))
all_packages.append(packages)
# Step two: Intersect the sets.
base_set = all_packages.pop()
for packages in all_packages:
base_set = base_set.intersection(packages)
# Step three: Check for membership.
return filename in base_set |
def bundle_changed(self, event):
# type: (BundleEvent) -> None
"""
A bundle event has been triggered
:param event: The bundle event
"""
kind = event.get_kind()
bundle = event.get_bundle()
if kind == BundleEvent.STOPPING_PRECLEAN:
# A bundle is gone, remove its factories after the deactivator has
# been called. That way, the deactivator can kill manually started
# components.
self._unregister_bundle_factories(bundle)
elif kind == BundleEvent.STARTED:
# A bundle is staring, register its factories before its activator
# is called. That way, the activator can use the registered
# factories.
self._register_bundle_factories(bundle)
elif kind == BundleEvent.UPDATE_BEGIN:
# A bundle will be updated, store its auto-restart component
self._autorestart_store_components(bundle)
elif kind == BundleEvent.UPDATED:
# Update has finished, restart stored components
self._autorestart_components(bundle)
self._autorestart_clear_components(bundle)
elif kind == BundleEvent.UPDATE_FAILED:
# Update failed, clean the stored components
self._autorestart_clear_components(bundle) | A bundle event has been triggered
:param event: The bundle event | Below is the the instruction that describes the task:
### Input:
A bundle event has been triggered
:param event: The bundle event
### Response:
def bundle_changed(self, event):
# type: (BundleEvent) -> None
"""
A bundle event has been triggered
:param event: The bundle event
"""
kind = event.get_kind()
bundle = event.get_bundle()
if kind == BundleEvent.STOPPING_PRECLEAN:
# A bundle is gone, remove its factories after the deactivator has
# been called. That way, the deactivator can kill manually started
# components.
self._unregister_bundle_factories(bundle)
elif kind == BundleEvent.STARTED:
# A bundle is staring, register its factories before its activator
# is called. That way, the activator can use the registered
# factories.
self._register_bundle_factories(bundle)
elif kind == BundleEvent.UPDATE_BEGIN:
# A bundle will be updated, store its auto-restart component
self._autorestart_store_components(bundle)
elif kind == BundleEvent.UPDATED:
# Update has finished, restart stored components
self._autorestart_components(bundle)
self._autorestart_clear_components(bundle)
elif kind == BundleEvent.UPDATE_FAILED:
# Update failed, clean the stored components
self._autorestart_clear_components(bundle) |
def timestr_to_seconds(
x: Union[dt.date, str], *, inverse: bool = False, mod24: bool = False
) -> int:
"""
Given an HH:MM:SS time string ``x``, return the number of seconds
past midnight that it represents.
In keeping with GTFS standards, the hours entry may be greater than
23.
If ``mod24``, then return the number of seconds modulo ``24*3600``.
If ``inverse``, then do the inverse operation.
In this case, if ``mod24`` also, then first take the number of
seconds modulo ``24*3600``.
"""
if not inverse:
try:
hours, mins, seconds = x.split(":")
result = int(hours) * 3600 + int(mins) * 60 + int(seconds)
if mod24:
result %= 24 * 3600
except:
result = np.nan
else:
try:
seconds = int(x)
if mod24:
seconds %= 24 * 3600
hours, remainder = divmod(seconds, 3600)
mins, secs = divmod(remainder, 60)
result = f"{hours:02d}:{mins:02d}:{secs:02d}"
except:
result = np.nan
return result | Given an HH:MM:SS time string ``x``, return the number of seconds
past midnight that it represents.
In keeping with GTFS standards, the hours entry may be greater than
23.
If ``mod24``, then return the number of seconds modulo ``24*3600``.
If ``inverse``, then do the inverse operation.
In this case, if ``mod24`` also, then first take the number of
seconds modulo ``24*3600``. | Below is the the instruction that describes the task:
### Input:
Given an HH:MM:SS time string ``x``, return the number of seconds
past midnight that it represents.
In keeping with GTFS standards, the hours entry may be greater than
23.
If ``mod24``, then return the number of seconds modulo ``24*3600``.
If ``inverse``, then do the inverse operation.
In this case, if ``mod24`` also, then first take the number of
seconds modulo ``24*3600``.
### Response:
def timestr_to_seconds(
x: Union[dt.date, str], *, inverse: bool = False, mod24: bool = False
) -> int:
"""
Given an HH:MM:SS time string ``x``, return the number of seconds
past midnight that it represents.
In keeping with GTFS standards, the hours entry may be greater than
23.
If ``mod24``, then return the number of seconds modulo ``24*3600``.
If ``inverse``, then do the inverse operation.
In this case, if ``mod24`` also, then first take the number of
seconds modulo ``24*3600``.
"""
if not inverse:
try:
hours, mins, seconds = x.split(":")
result = int(hours) * 3600 + int(mins) * 60 + int(seconds)
if mod24:
result %= 24 * 3600
except:
result = np.nan
else:
try:
seconds = int(x)
if mod24:
seconds %= 24 * 3600
hours, remainder = divmod(seconds, 3600)
mins, secs = divmod(remainder, 60)
result = f"{hours:02d}:{mins:02d}:{secs:02d}"
except:
result = np.nan
return result |
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
if self._comp_dict is None or self._comp_dict_file != args['library']:
self._comp_dict_file = args['library']
self._comp_dict = make_catalog_comp_dict(sources=self._comp_dict_file,
basedir=NAME_FACTORY.base_dict['basedir'])
else:
print ("Using cached catalog dict from %s" % args['library'])
catalog_info_dict = self._comp_dict['catalog_info_dict']
comp_info_dict = self._comp_dict['comp_info_dict']
n_src_per_job = args['nsrc']
if args['make_xml']:
SrcmapsCatalog_SG._make_xml_files(catalog_info_dict, comp_info_dict)
for catalog_name, catalog_info in catalog_info_dict.items():
n_cat_src = len(catalog_info.catalog.table)
n_job = int(math.ceil(float(n_cat_src) / n_src_per_job))
for comp in components:
zcut = "zmax%i" % comp.zmax
key = comp.make_key('{ebin_name}_{evtype_name}')
name_keys = dict(zcut=zcut,
sourcekey=catalog_name,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
coordsys=comp.coordsys,
irf_ver=NAME_FACTORY.irf_ver(),
mktime='none',
fullpath=True)
for i_job in range(n_job):
full_key = "%s_%02i" % (key, i_job)
srcmin = i_job * n_src_per_job
srcmax = min(srcmin + n_src_per_job, n_cat_src)
outfile = NAME_FACTORY.srcmaps(
**name_keys).replace('.fits', "_%02i.fits" % (i_job))
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[full_key] = dict(cmap=NAME_FACTORY.ccube(**name_keys),
expcube=NAME_FACTORY.ltcube(**name_keys),
irfs=NAME_FACTORY.irfs(**name_keys),
bexpmap=NAME_FACTORY.bexpcube(**name_keys),
outfile=outfile,
logfile=logfile,
srcmdl=catalog_info.srcmdl_name,
evtype=comp.evtype,
srcmin=srcmin,
srcmax=srcmax)
return job_configs | Hook to build job configurations | Below is the the instruction that describes the task:
### Input:
Hook to build job configurations
### Response:
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
if self._comp_dict is None or self._comp_dict_file != args['library']:
self._comp_dict_file = args['library']
self._comp_dict = make_catalog_comp_dict(sources=self._comp_dict_file,
basedir=NAME_FACTORY.base_dict['basedir'])
else:
print ("Using cached catalog dict from %s" % args['library'])
catalog_info_dict = self._comp_dict['catalog_info_dict']
comp_info_dict = self._comp_dict['comp_info_dict']
n_src_per_job = args['nsrc']
if args['make_xml']:
SrcmapsCatalog_SG._make_xml_files(catalog_info_dict, comp_info_dict)
for catalog_name, catalog_info in catalog_info_dict.items():
n_cat_src = len(catalog_info.catalog.table)
n_job = int(math.ceil(float(n_cat_src) / n_src_per_job))
for comp in components:
zcut = "zmax%i" % comp.zmax
key = comp.make_key('{ebin_name}_{evtype_name}')
name_keys = dict(zcut=zcut,
sourcekey=catalog_name,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
coordsys=comp.coordsys,
irf_ver=NAME_FACTORY.irf_ver(),
mktime='none',
fullpath=True)
for i_job in range(n_job):
full_key = "%s_%02i" % (key, i_job)
srcmin = i_job * n_src_per_job
srcmax = min(srcmin + n_src_per_job, n_cat_src)
outfile = NAME_FACTORY.srcmaps(
**name_keys).replace('.fits', "_%02i.fits" % (i_job))
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[full_key] = dict(cmap=NAME_FACTORY.ccube(**name_keys),
expcube=NAME_FACTORY.ltcube(**name_keys),
irfs=NAME_FACTORY.irfs(**name_keys),
bexpmap=NAME_FACTORY.bexpcube(**name_keys),
outfile=outfile,
logfile=logfile,
srcmdl=catalog_info.srcmdl_name,
evtype=comp.evtype,
srcmin=srcmin,
srcmax=srcmax)
return job_configs |
def take(list_, index_list):
"""
Selects a subset of a list based on a list of indices.
This is similar to np.take, but pure python.
Args:
list_ (list): some indexable object
index_list (list, slice, int): some indexing object
Returns:
list or scalar: subset of the list
CommandLine:
python -m utool.util_list --test-take
SeeAlso:
ut.dict_take
ut.dict_subset
ut.none_take
ut.compress
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index_list = [2, 0]
>>> result = take(list_, index_list)
>>> print(result)
[2, 0]
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index = 2
>>> result = take(list_, index)
>>> print(result)
2
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index = slice(1, None, 2)
>>> result = take(list_, index)
>>> print(result)
[1, 3]
"""
try:
return [list_[index] for index in index_list]
except TypeError:
return list_[index_list] | Selects a subset of a list based on a list of indices.
This is similar to np.take, but pure python.
Args:
list_ (list): some indexable object
index_list (list, slice, int): some indexing object
Returns:
list or scalar: subset of the list
CommandLine:
python -m utool.util_list --test-take
SeeAlso:
ut.dict_take
ut.dict_subset
ut.none_take
ut.compress
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index_list = [2, 0]
>>> result = take(list_, index_list)
>>> print(result)
[2, 0]
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index = 2
>>> result = take(list_, index)
>>> print(result)
2
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index = slice(1, None, 2)
>>> result = take(list_, index)
>>> print(result)
[1, 3] | Below is the the instruction that describes the task:
### Input:
Selects a subset of a list based on a list of indices.
This is similar to np.take, but pure python.
Args:
list_ (list): some indexable object
index_list (list, slice, int): some indexing object
Returns:
list or scalar: subset of the list
CommandLine:
python -m utool.util_list --test-take
SeeAlso:
ut.dict_take
ut.dict_subset
ut.none_take
ut.compress
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index_list = [2, 0]
>>> result = take(list_, index_list)
>>> print(result)
[2, 0]
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index = 2
>>> result = take(list_, index)
>>> print(result)
2
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index = slice(1, None, 2)
>>> result = take(list_, index)
>>> print(result)
[1, 3]
### Response:
def take(list_, index_list):
"""
Selects a subset of a list based on a list of indices.
This is similar to np.take, but pure python.
Args:
list_ (list): some indexable object
index_list (list, slice, int): some indexing object
Returns:
list or scalar: subset of the list
CommandLine:
python -m utool.util_list --test-take
SeeAlso:
ut.dict_take
ut.dict_subset
ut.none_take
ut.compress
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index_list = [2, 0]
>>> result = take(list_, index_list)
>>> print(result)
[2, 0]
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index = 2
>>> result = take(list_, index)
>>> print(result)
2
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [0, 1, 2, 3]
>>> index = slice(1, None, 2)
>>> result = take(list_, index)
>>> print(result)
[1, 3]
"""
try:
return [list_[index] for index in index_list]
except TypeError:
return list_[index_list] |
def validate(cls, mapper_spec):
"""Inherit docs."""
super(RawDatastoreInputReader, cls).validate(mapper_spec)
params = _get_params(mapper_spec)
entity_kind = params[cls.ENTITY_KIND_PARAM]
if "." in entity_kind:
logging.warning(
". detected in entity kind %s specified for reader %s."
"Assuming entity kind contains the dot.",
entity_kind, cls.__name__)
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
for f in filters:
if f[1] != "=":
raise BadReaderParamsError(
"Only equality filters are supported: %s", f) | Inherit docs. | Below is the the instruction that describes the task:
### Input:
Inherit docs.
### Response:
def validate(cls, mapper_spec):
"""Inherit docs."""
super(RawDatastoreInputReader, cls).validate(mapper_spec)
params = _get_params(mapper_spec)
entity_kind = params[cls.ENTITY_KIND_PARAM]
if "." in entity_kind:
logging.warning(
". detected in entity kind %s specified for reader %s."
"Assuming entity kind contains the dot.",
entity_kind, cls.__name__)
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
for f in filters:
if f[1] != "=":
raise BadReaderParamsError(
"Only equality filters are supported: %s", f) |
def get_input(input_func, input_str):
"""
Get input from the user given an input function and an input string
"""
val = input_func("Please enter your {0}: ".format(input_str))
while not val or not len(val.strip()):
val = input_func("You didn't enter a valid {0}, please try again: ".format(input_str))
return val | Get input from the user given an input function and an input string | Below is the the instruction that describes the task:
### Input:
Get input from the user given an input function and an input string
### Response:
def get_input(input_func, input_str):
"""
Get input from the user given an input function and an input string
"""
val = input_func("Please enter your {0}: ".format(input_str))
while not val or not len(val.strip()):
val = input_func("You didn't enter a valid {0}, please try again: ".format(input_str))
return val |
def attowiki_distro_path():
"""return the absolute complete path where attowiki is located
.. todo:: use pkg_resources ?
"""
attowiki_path = os.path.abspath(__file__)
if attowiki_path[-1] != '/':
attowiki_path = attowiki_path[:attowiki_path.rfind('/')]
else:
attowiki_path = attowiki_path[:attowiki_path[:-1].rfind('/')]
return attowiki_path | return the absolute complete path where attowiki is located
.. todo:: use pkg_resources ? | Below is the the instruction that describes the task:
### Input:
return the absolute complete path where attowiki is located
.. todo:: use pkg_resources ?
### Response:
def attowiki_distro_path():
"""return the absolute complete path where attowiki is located
.. todo:: use pkg_resources ?
"""
attowiki_path = os.path.abspath(__file__)
if attowiki_path[-1] != '/':
attowiki_path = attowiki_path[:attowiki_path.rfind('/')]
else:
attowiki_path = attowiki_path[:attowiki_path[:-1].rfind('/')]
return attowiki_path |
def _is_utf_8(txt):
"""
Check a string is utf-8 encoded
:param bytes txt: utf-8 string
:return: Whether the string\
is utf-8 encoded or not
:rtype: bool
"""
assert isinstance(txt, six.binary_type)
try:
_ = six.text_type(txt, 'utf-8')
except (TypeError, UnicodeEncodeError):
return False
else:
return True | Check a string is utf-8 encoded
:param bytes txt: utf-8 string
:return: Whether the string\
is utf-8 encoded or not
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Check a string is utf-8 encoded
:param bytes txt: utf-8 string
:return: Whether the string\
is utf-8 encoded or not
:rtype: bool
### Response:
def _is_utf_8(txt):
"""
Check a string is utf-8 encoded
:param bytes txt: utf-8 string
:return: Whether the string\
is utf-8 encoded or not
:rtype: bool
"""
assert isinstance(txt, six.binary_type)
try:
_ = six.text_type(txt, 'utf-8')
except (TypeError, UnicodeEncodeError):
return False
else:
return True |
def reply_webapi(self, text, attachments=None, as_user=True, in_thread=None):
"""
Send a reply to the sender using Web API
(This function supports formatted message
when using a bot integration)
If the message was send in a thread, answer in a thread per default.
"""
if in_thread is None:
in_thread = 'thread_ts' in self.body
if in_thread:
self.send_webapi(text, attachments=attachments, as_user=as_user, thread_ts=self.thread_ts)
else:
text = self.gen_reply(text)
self.send_webapi(text, attachments=attachments, as_user=as_user) | Send a reply to the sender using Web API
(This function supports formatted message
when using a bot integration)
If the message was send in a thread, answer in a thread per default. | Below is the the instruction that describes the task:
### Input:
Send a reply to the sender using Web API
(This function supports formatted message
when using a bot integration)
If the message was send in a thread, answer in a thread per default.
### Response:
def reply_webapi(self, text, attachments=None, as_user=True, in_thread=None):
"""
Send a reply to the sender using Web API
(This function supports formatted message
when using a bot integration)
If the message was send in a thread, answer in a thread per default.
"""
if in_thread is None:
in_thread = 'thread_ts' in self.body
if in_thread:
self.send_webapi(text, attachments=attachments, as_user=as_user, thread_ts=self.thread_ts)
else:
text = self.gen_reply(text)
self.send_webapi(text, attachments=attachments, as_user=as_user) |
def _dump_cml_molecule(f, molecule):
"""Dump a single molecule to a CML file
Arguments:
| ``f`` -- a file-like object
| ``molecule`` -- a Molecule instance
"""
extra = getattr(molecule, "extra", {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in extra.items())
f.write(" <molecule id='%s' %s>\n" % (molecule.title, attr_str))
f.write(" <atomArray>\n")
atoms_extra = getattr(molecule, "atoms_extra", {})
for counter, number, coordinate in zip(range(molecule.size), molecule.numbers, molecule.coordinates/angstrom):
atom_extra = atoms_extra.get(counter, {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in atom_extra.items())
f.write(" <atom id='a%i' elementType='%s' x3='%s' y3='%s' z3='%s' %s />\n" % (
counter, periodic[number].symbol, coordinate[0], coordinate[1],
coordinate[2], attr_str,
))
f.write(" </atomArray>\n")
if molecule.graph is not None:
bonds_extra = getattr(molecule, "bonds_extra", {})
f.write(" <bondArray>\n")
for edge in molecule.graph.edges:
bond_extra = bonds_extra.get(edge, {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in bond_extra.items())
i1, i2 = edge
f.write(" <bond atomRefs2='a%i a%i' %s />\n" % (i1, i2, attr_str))
f.write(" </bondArray>\n")
f.write(" </molecule>\n") | Dump a single molecule to a CML file
Arguments:
| ``f`` -- a file-like object
| ``molecule`` -- a Molecule instance | Below is the the instruction that describes the task:
### Input:
Dump a single molecule to a CML file
Arguments:
| ``f`` -- a file-like object
| ``molecule`` -- a Molecule instance
### Response:
def _dump_cml_molecule(f, molecule):
"""Dump a single molecule to a CML file
Arguments:
| ``f`` -- a file-like object
| ``molecule`` -- a Molecule instance
"""
extra = getattr(molecule, "extra", {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in extra.items())
f.write(" <molecule id='%s' %s>\n" % (molecule.title, attr_str))
f.write(" <atomArray>\n")
atoms_extra = getattr(molecule, "atoms_extra", {})
for counter, number, coordinate in zip(range(molecule.size), molecule.numbers, molecule.coordinates/angstrom):
atom_extra = atoms_extra.get(counter, {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in atom_extra.items())
f.write(" <atom id='a%i' elementType='%s' x3='%s' y3='%s' z3='%s' %s />\n" % (
counter, periodic[number].symbol, coordinate[0], coordinate[1],
coordinate[2], attr_str,
))
f.write(" </atomArray>\n")
if molecule.graph is not None:
bonds_extra = getattr(molecule, "bonds_extra", {})
f.write(" <bondArray>\n")
for edge in molecule.graph.edges:
bond_extra = bonds_extra.get(edge, {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in bond_extra.items())
i1, i2 = edge
f.write(" <bond atomRefs2='a%i a%i' %s />\n" % (i1, i2, attr_str))
f.write(" </bondArray>\n")
f.write(" </molecule>\n") |
def appt_exists(self, complex: str, house: str, appt: str) -> bool:
"""
Shortcut to check if appt exists in our database.
"""
try:
self.check_appt(complex, house, appt)
except exceptions.RumetrApptNotFound:
return False
return True | Shortcut to check if appt exists in our database. | Below is the the instruction that describes the task:
### Input:
Shortcut to check if appt exists in our database.
### Response:
def appt_exists(self, complex: str, house: str, appt: str) -> bool:
"""
Shortcut to check if appt exists in our database.
"""
try:
self.check_appt(complex, house, appt)
except exceptions.RumetrApptNotFound:
return False
return True |
def _kill_worker_threads(self):
"""This function coerces the consumer/worker threads to kill
themselves. When called by the queuing thread, one death token will
be placed on the queue for each thread. Each worker thread is always
looking for the death token. When it encounters it, it immediately
runs to completion without drawing anything more off the queue.
This is a blocking call. The thread using this function will wait for
all the worker threads to die."""
for x in range(self.number_of_threads):
self.task_queue.put((None, None))
self.logger.debug("waiting for standard worker threads to stop")
for t in self.thread_list:
t.join() | This function coerces the consumer/worker threads to kill
themselves. When called by the queuing thread, one death token will
be placed on the queue for each thread. Each worker thread is always
looking for the death token. When it encounters it, it immediately
runs to completion without drawing anything more off the queue.
This is a blocking call. The thread using this function will wait for
all the worker threads to die. | Below is the the instruction that describes the task:
### Input:
This function coerces the consumer/worker threads to kill
themselves. When called by the queuing thread, one death token will
be placed on the queue for each thread. Each worker thread is always
looking for the death token. When it encounters it, it immediately
runs to completion without drawing anything more off the queue.
This is a blocking call. The thread using this function will wait for
all the worker threads to die.
### Response:
def _kill_worker_threads(self):
"""This function coerces the consumer/worker threads to kill
themselves. When called by the queuing thread, one death token will
be placed on the queue for each thread. Each worker thread is always
looking for the death token. When it encounters it, it immediately
runs to completion without drawing anything more off the queue.
This is a blocking call. The thread using this function will wait for
all the worker threads to die."""
for x in range(self.number_of_threads):
self.task_queue.put((None, None))
self.logger.debug("waiting for standard worker threads to stop")
for t in self.thread_list:
t.join() |
def measure_power(self, hz, duration, tag, offset=30):
"""Measure power consumption of the attached device.
Because it takes some time for the device to calm down after the usb
connection is cut, an offset is set for each measurement. The default
is 30s. The total time taken to measure will be (duration + offset).
Args:
hz: Number of samples to take per second.
duration: Number of seconds to take samples for in each step.
offset: The number of seconds of initial data to discard.
tag: A string that's the name of the collected data group.
Returns:
A MonsoonData object with the measured power data.
"""
num = duration * hz
oset = offset * hz
data = None
self.usb("auto")
time.sleep(1)
with self.dut.handle_usb_disconnect():
time.sleep(1)
try:
data = self.take_samples(hz, num, sample_offset=oset)
if not data:
raise MonsoonError(
"No data was collected in measurement %s." % tag)
data.tag = tag
self.dut.log.info("Measurement summary: %s", repr(data))
return data
finally:
self.mon.StopDataCollection()
self.log.info("Finished taking samples, reconnecting to dut.")
self.usb("on")
self.dut.adb.wait_for_device(timeout=DEFAULT_TIMEOUT_USB_ON)
# Wait for device to come back online.
time.sleep(10)
self.dut.log.info("Dut reconnected.") | Measure power consumption of the attached device.
Because it takes some time for the device to calm down after the usb
connection is cut, an offset is set for each measurement. The default
is 30s. The total time taken to measure will be (duration + offset).
Args:
hz: Number of samples to take per second.
duration: Number of seconds to take samples for in each step.
offset: The number of seconds of initial data to discard.
tag: A string that's the name of the collected data group.
Returns:
A MonsoonData object with the measured power data. | Below is the the instruction that describes the task:
### Input:
Measure power consumption of the attached device.
Because it takes some time for the device to calm down after the usb
connection is cut, an offset is set for each measurement. The default
is 30s. The total time taken to measure will be (duration + offset).
Args:
hz: Number of samples to take per second.
duration: Number of seconds to take samples for in each step.
offset: The number of seconds of initial data to discard.
tag: A string that's the name of the collected data group.
Returns:
A MonsoonData object with the measured power data.
### Response:
def measure_power(self, hz, duration, tag, offset=30):
"""Measure power consumption of the attached device.
Because it takes some time for the device to calm down after the usb
connection is cut, an offset is set for each measurement. The default
is 30s. The total time taken to measure will be (duration + offset).
Args:
hz: Number of samples to take per second.
duration: Number of seconds to take samples for in each step.
offset: The number of seconds of initial data to discard.
tag: A string that's the name of the collected data group.
Returns:
A MonsoonData object with the measured power data.
"""
num = duration * hz
oset = offset * hz
data = None
self.usb("auto")
time.sleep(1)
with self.dut.handle_usb_disconnect():
time.sleep(1)
try:
data = self.take_samples(hz, num, sample_offset=oset)
if not data:
raise MonsoonError(
"No data was collected in measurement %s." % tag)
data.tag = tag
self.dut.log.info("Measurement summary: %s", repr(data))
return data
finally:
self.mon.StopDataCollection()
self.log.info("Finished taking samples, reconnecting to dut.")
self.usb("on")
self.dut.adb.wait_for_device(timeout=DEFAULT_TIMEOUT_USB_ON)
# Wait for device to come back online.
time.sleep(10)
self.dut.log.info("Dut reconnected.") |
def has_pkgs_signed_with(self, allowed_keys):
"""
Check signature of packages installed in image.
Raises exception when
* rpm binary is not installed in image
* parsing of rpm fails
* there are packages in image that are not signed with one of allowed keys
:param allowed_keys: list of allowed keys
:return: bool
"""
if not allowed_keys or not isinstance(allowed_keys, list):
raise ConuException("allowed_keys must be a list")
command = ['rpm', '-qa', '--qf', '%{name} %{SIGPGP:pgpsig}\n']
cont = self.run_via_binary(command=command)
try:
out = cont.logs_unicode()[:-1].split('\n')
check_signatures(out, allowed_keys)
finally:
cont.stop()
cont.delete()
return True | Check signature of packages installed in image.
Raises exception when
* rpm binary is not installed in image
* parsing of rpm fails
* there are packages in image that are not signed with one of allowed keys
:param allowed_keys: list of allowed keys
:return: bool | Below is the the instruction that describes the task:
### Input:
Check signature of packages installed in image.
Raises exception when
* rpm binary is not installed in image
* parsing of rpm fails
* there are packages in image that are not signed with one of allowed keys
:param allowed_keys: list of allowed keys
:return: bool
### Response:
def has_pkgs_signed_with(self, allowed_keys):
"""
Check signature of packages installed in image.
Raises exception when
* rpm binary is not installed in image
* parsing of rpm fails
* there are packages in image that are not signed with one of allowed keys
:param allowed_keys: list of allowed keys
:return: bool
"""
if not allowed_keys or not isinstance(allowed_keys, list):
raise ConuException("allowed_keys must be a list")
command = ['rpm', '-qa', '--qf', '%{name} %{SIGPGP:pgpsig}\n']
cont = self.run_via_binary(command=command)
try:
out = cont.logs_unicode()[:-1].split('\n')
check_signatures(out, allowed_keys)
finally:
cont.stop()
cont.delete()
return True |
def check_permissions(self, request):
"""
Check if the request should be permitted.
Raises an appropriate exception if the request is not permitted.
"""
permissions = [permission() for permission in self.permission_classes]
for permission in permissions:
if not permission.has_permission(request):
raise PermissionDenied() | Check if the request should be permitted.
Raises an appropriate exception if the request is not permitted. | Below is the the instruction that describes the task:
### Input:
Check if the request should be permitted.
Raises an appropriate exception if the request is not permitted.
### Response:
def check_permissions(self, request):
"""
Check if the request should be permitted.
Raises an appropriate exception if the request is not permitted.
"""
permissions = [permission() for permission in self.permission_classes]
for permission in permissions:
if not permission.has_permission(request):
raise PermissionDenied() |
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
return self.crypto.encrypt_assertion(
statement, enc_key, template, key_type, node_xpath) | Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text | Below is the the instruction that describes the task:
### Input:
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
### Response:
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
return self.crypto.encrypt_assertion(
statement, enc_key, template, key_type, node_xpath) |
def state_in_ec(self, ec_index):
'''Get the state of the component in an execution context.
@param ec_index The index of the execution context to check the state
in. This index is into the total array of contexts,
that is both owned and participating contexts. If the
value of ec_index is greater than the length of @ref
owned_ecs, that length is subtracted from ec_index and
the result used as an index into @ref
participating_ecs.
'''
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
return self.participating_ec_states[ec_index]
else:
return self.owned_ec_states[ec_index] | Get the state of the component in an execution context.
@param ec_index The index of the execution context to check the state
in. This index is into the total array of contexts,
that is both owned and participating contexts. If the
value of ec_index is greater than the length of @ref
owned_ecs, that length is subtracted from ec_index and
the result used as an index into @ref
participating_ecs. | Below is the the instruction that describes the task:
### Input:
Get the state of the component in an execution context.
@param ec_index The index of the execution context to check the state
in. This index is into the total array of contexts,
that is both owned and participating contexts. If the
value of ec_index is greater than the length of @ref
owned_ecs, that length is subtracted from ec_index and
the result used as an index into @ref
participating_ecs.
### Response:
def state_in_ec(self, ec_index):
'''Get the state of the component in an execution context.
@param ec_index The index of the execution context to check the state
in. This index is into the total array of contexts,
that is both owned and participating contexts. If the
value of ec_index is greater than the length of @ref
owned_ecs, that length is subtracted from ec_index and
the result used as an index into @ref
participating_ecs.
'''
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
return self.participating_ec_states[ec_index]
else:
return self.owned_ec_states[ec_index] |
def dynamics_from_bundle_bs(b, times, compute=None, return_roche_euler=False, **kwargs):
"""
Parse parameters in the bundle and call :func:`dynamics`.
See :func:`dynamics` for more detailed information.
NOTE: you must either provide compute (the label) OR all relevant options
as kwargs (ltte)
Args:
b: (Bundle) the bundle with a set hierarchy
times: (list or array) times at which to run the dynamics
stepsize: (float, optional) stepsize for the integration
[default: 0.01]
orbiterror: (float, optional) orbiterror for the integration
[default: 1e-16]
ltte: (bool, default False) whether to account for light travel time effects.
Returns:
t, xs, ys, zs, vxs, vys, vzs. t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times.
"""
stepsize = 0.01
orbiterror = 1e-16
computeps = b.get_compute(compute, check_visible=False, force_ps=True)
ltte = computeps.get_value('ltte', check_visible=False, **kwargs)
hier = b.hierarchy
starrefs = hier.get_stars()
orbitrefs = hier.get_orbits()
def mean_anom(t0, t0_perpass, period):
# TODO: somehow make this into a constraint where t0 and mean anom
# are both in the compute options if dynamic_method==nbody
# (one is constrained from the other and the orbit.... nvm, this gets ugly)
return 2 * np.pi * (t0 - t0_perpass) / period
masses = [b.get_value('mass', u.solMass, component=component, context='component') * c.G.to('AU3 / (Msun d2)').value for component in starrefs] # GM
smas = [b.get_value('sma', u.AU, component=component, context='component') for component in orbitrefs]
eccs = [b.get_value('ecc', component=component, context='component') for component in orbitrefs]
incls = [b.get_value('incl', u.rad, component=component, context='component') for component in orbitrefs]
per0s = [b.get_value('per0', u.rad, component=component, context='component') for component in orbitrefs]
long_ans = [b.get_value('long_an', u.rad, component=component, context='component') for component in orbitrefs]
t0_perpasses = [b.get_value('t0_perpass', u.d, component=component, context='component') for component in orbitrefs]
periods = [b.get_value('period', u.d, component=component, context='component') for component in orbitrefs]
vgamma = b.get_value('vgamma', context='system', unit=u.solRad/u.d)
t0 = b.get_value('t0', context='system', unit=u.d)
# mean_anoms = [mean_anom(t0, t0_perpass, period) for t0_perpass, period in zip(t0_perpasses, periods)]
mean_anoms = [b.get_value('mean_anom', u.rad, component=component, context='component') for component in orbitrefs]
return dynamics_bs(times, masses, smas, eccs, incls, per0s, long_ans, \
mean_anoms, t0, vgamma, stepsize, orbiterror, ltte,
return_roche_euler=return_roche_euler) | Parse parameters in the bundle and call :func:`dynamics`.
See :func:`dynamics` for more detailed information.
NOTE: you must either provide compute (the label) OR all relevant options
as kwargs (ltte)
Args:
b: (Bundle) the bundle with a set hierarchy
times: (list or array) times at which to run the dynamics
stepsize: (float, optional) stepsize for the integration
[default: 0.01]
orbiterror: (float, optional) orbiterror for the integration
[default: 1e-16]
ltte: (bool, default False) whether to account for light travel time effects.
Returns:
t, xs, ys, zs, vxs, vys, vzs. t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times. | Below is the the instruction that describes the task:
### Input:
Parse parameters in the bundle and call :func:`dynamics`.
See :func:`dynamics` for more detailed information.
NOTE: you must either provide compute (the label) OR all relevant options
as kwargs (ltte)
Args:
b: (Bundle) the bundle with a set hierarchy
times: (list or array) times at which to run the dynamics
stepsize: (float, optional) stepsize for the integration
[default: 0.01]
orbiterror: (float, optional) orbiterror for the integration
[default: 1e-16]
ltte: (bool, default False) whether to account for light travel time effects.
Returns:
t, xs, ys, zs, vxs, vys, vzs. t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times.
### Response:
def dynamics_from_bundle_bs(b, times, compute=None, return_roche_euler=False, **kwargs):
"""
Parse parameters in the bundle and call :func:`dynamics`.
See :func:`dynamics` for more detailed information.
NOTE: you must either provide compute (the label) OR all relevant options
as kwargs (ltte)
Args:
b: (Bundle) the bundle with a set hierarchy
times: (list or array) times at which to run the dynamics
stepsize: (float, optional) stepsize for the integration
[default: 0.01]
orbiterror: (float, optional) orbiterror for the integration
[default: 1e-16]
ltte: (bool, default False) whether to account for light travel time effects.
Returns:
t, xs, ys, zs, vxs, vys, vzs. t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times.
"""
stepsize = 0.01
orbiterror = 1e-16
computeps = b.get_compute(compute, check_visible=False, force_ps=True)
ltte = computeps.get_value('ltte', check_visible=False, **kwargs)
hier = b.hierarchy
starrefs = hier.get_stars()
orbitrefs = hier.get_orbits()
def mean_anom(t0, t0_perpass, period):
# TODO: somehow make this into a constraint where t0 and mean anom
# are both in the compute options if dynamic_method==nbody
# (one is constrained from the other and the orbit.... nvm, this gets ugly)
return 2 * np.pi * (t0 - t0_perpass) / period
masses = [b.get_value('mass', u.solMass, component=component, context='component') * c.G.to('AU3 / (Msun d2)').value for component in starrefs] # GM
smas = [b.get_value('sma', u.AU, component=component, context='component') for component in orbitrefs]
eccs = [b.get_value('ecc', component=component, context='component') for component in orbitrefs]
incls = [b.get_value('incl', u.rad, component=component, context='component') for component in orbitrefs]
per0s = [b.get_value('per0', u.rad, component=component, context='component') for component in orbitrefs]
long_ans = [b.get_value('long_an', u.rad, component=component, context='component') for component in orbitrefs]
t0_perpasses = [b.get_value('t0_perpass', u.d, component=component, context='component') for component in orbitrefs]
periods = [b.get_value('period', u.d, component=component, context='component') for component in orbitrefs]
vgamma = b.get_value('vgamma', context='system', unit=u.solRad/u.d)
t0 = b.get_value('t0', context='system', unit=u.d)
# mean_anoms = [mean_anom(t0, t0_perpass, period) for t0_perpass, period in zip(t0_perpasses, periods)]
mean_anoms = [b.get_value('mean_anom', u.rad, component=component, context='component') for component in orbitrefs]
return dynamics_bs(times, masses, smas, eccs, incls, per0s, long_ans, \
mean_anoms, t0, vgamma, stepsize, orbiterror, ltte,
return_roche_euler=return_roche_euler) |
def parse_application_name(setup_filename):
"""Parse a setup.py file for the name.
Returns:
name, or None
"""
with open(setup_filename, 'rt') as setup_file:
fst = RedBaron(setup_file.read())
for node in fst:
if (
node.type == 'atomtrailers' and
str(node.name) == 'setup'
):
for call in node.call:
if str(call.name) == 'name':
value = call.value
if hasattr(value, 'to_python'):
value = value.to_python()
name = str(value)
break
if name:
break
return name | Parse a setup.py file for the name.
Returns:
name, or None | Below is the the instruction that describes the task:
### Input:
Parse a setup.py file for the name.
Returns:
name, or None
### Response:
def parse_application_name(setup_filename):
"""Parse a setup.py file for the name.
Returns:
name, or None
"""
with open(setup_filename, 'rt') as setup_file:
fst = RedBaron(setup_file.read())
for node in fst:
if (
node.type == 'atomtrailers' and
str(node.name) == 'setup'
):
for call in node.call:
if str(call.name) == 'name':
value = call.value
if hasattr(value, 'to_python'):
value = value.to_python()
name = str(value)
break
if name:
break
return name |
def open_ext_pack_file(self, path):
"""Attempts to open an extension pack file in preparation for
installation.
in path of type str
The path of the extension pack tarball. This can optionally be
followed by a "::SHA-256=hex-digit" of the tarball.
return file_p of type :class:`IExtPackFile`
The interface of the extension pack file object.
"""
if not isinstance(path, basestring):
raise TypeError("path can only be an instance of type basestring")
file_p = self._call("openExtPackFile",
in_p=[path])
file_p = IExtPackFile(file_p)
return file_p | Attempts to open an extension pack file in preparation for
installation.
in path of type str
The path of the extension pack tarball. This can optionally be
followed by a "::SHA-256=hex-digit" of the tarball.
return file_p of type :class:`IExtPackFile`
The interface of the extension pack file object. | Below is the the instruction that describes the task:
### Input:
Attempts to open an extension pack file in preparation for
installation.
in path of type str
The path of the extension pack tarball. This can optionally be
followed by a "::SHA-256=hex-digit" of the tarball.
return file_p of type :class:`IExtPackFile`
The interface of the extension pack file object.
### Response:
def open_ext_pack_file(self, path):
"""Attempts to open an extension pack file in preparation for
installation.
in path of type str
The path of the extension pack tarball. This can optionally be
followed by a "::SHA-256=hex-digit" of the tarball.
return file_p of type :class:`IExtPackFile`
The interface of the extension pack file object.
"""
if not isinstance(path, basestring):
raise TypeError("path can only be an instance of type basestring")
file_p = self._call("openExtPackFile",
in_p=[path])
file_p = IExtPackFile(file_p)
return file_p |
def getlist(self, key, default=[]):
"""
Returns: The list of values for <key> if <key> is in the dictionary,
else <default>. If <default> is not provided, an empty list is
returned.
"""
if key in self:
return [node.value for node in self._map[key]]
return default | Returns: The list of values for <key> if <key> is in the dictionary,
else <default>. If <default> is not provided, an empty list is
returned. | Below is the the instruction that describes the task:
### Input:
Returns: The list of values for <key> if <key> is in the dictionary,
else <default>. If <default> is not provided, an empty list is
returned.
### Response:
def getlist(self, key, default=[]):
"""
Returns: The list of values for <key> if <key> is in the dictionary,
else <default>. If <default> is not provided, an empty list is
returned.
"""
if key in self:
return [node.value for node in self._map[key]]
return default |
def sort(expr, field = None, keytype=None, ascending=True):
"""
Sorts the vector.
If the field parameter is provided then the sort
operators on a vector of structs where the sort key
is the field of the struct.
Args:
expr (WeldObject)
field (Int)
"""
weld_obj = WeldObject(encoder_, decoder_)
expr_var = weld_obj.update(expr)
if isinstance(expr, WeldObject):
expr_var = expr.obj_id
weld_obj.dependencies[expr_var] = expr
if field is not None:
key_str = "x.$%s" % field
else:
key_str = "x"
if not ascending:
# The type is not necessarily f64.
key_str = key_str + "* %s(-1)" % keytype
weld_template = """
sort(%(expr)s, |x| %(key)s)
"""
weld_obj.weld_code = weld_template % {"expr":expr_var, "key":key_str}
return weld_obj | Sorts the vector.
If the field parameter is provided then the sort
operators on a vector of structs where the sort key
is the field of the struct.
Args:
expr (WeldObject)
field (Int) | Below is the the instruction that describes the task:
### Input:
Sorts the vector.
If the field parameter is provided then the sort
operators on a vector of structs where the sort key
is the field of the struct.
Args:
expr (WeldObject)
field (Int)
### Response:
def sort(expr, field = None, keytype=None, ascending=True):
"""
Sorts the vector.
If the field parameter is provided then the sort
operators on a vector of structs where the sort key
is the field of the struct.
Args:
expr (WeldObject)
field (Int)
"""
weld_obj = WeldObject(encoder_, decoder_)
expr_var = weld_obj.update(expr)
if isinstance(expr, WeldObject):
expr_var = expr.obj_id
weld_obj.dependencies[expr_var] = expr
if field is not None:
key_str = "x.$%s" % field
else:
key_str = "x"
if not ascending:
# The type is not necessarily f64.
key_str = key_str + "* %s(-1)" % keytype
weld_template = """
sort(%(expr)s, |x| %(key)s)
"""
weld_obj.weld_code = weld_template % {"expr":expr_var, "key":key_str}
return weld_obj |
def list_subdomains(self, limit=None, offset=None):
"""
Returns a list of all subdomains for this domain.
"""
return self.manager.list_subdomains(self, limit=limit, offset=offset) | Returns a list of all subdomains for this domain. | Below is the the instruction that describes the task:
### Input:
Returns a list of all subdomains for this domain.
### Response:
def list_subdomains(self, limit=None, offset=None):
"""
Returns a list of all subdomains for this domain.
"""
return self.manager.list_subdomains(self, limit=limit, offset=offset) |
def from_str(cls, coordinate):
"""Build a TikZCoordinate object from a string."""
m = cls._coordinate_str_regex.match(coordinate)
if m is None:
raise ValueError('invalid coordinate string')
if m.group(1) == '++':
relative = True
else:
relative = False
return TikZCoordinate(
float(m.group(2)), float(m.group(4)), relative=relative) | Build a TikZCoordinate object from a string. | Below is the the instruction that describes the task:
### Input:
Build a TikZCoordinate object from a string.
### Response:
def from_str(cls, coordinate):
"""Build a TikZCoordinate object from a string."""
m = cls._coordinate_str_regex.match(coordinate)
if m is None:
raise ValueError('invalid coordinate string')
if m.group(1) == '++':
relative = True
else:
relative = False
return TikZCoordinate(
float(m.group(2)), float(m.group(4)), relative=relative) |
def saveData(self, dataOutputFile, categoriesOutputFile):
"""
Save the processed data and the associated category mapping.
@param dataOutputFile (str) Location to save data
@param categoriesOutputFile (str) Location to save category map
@return (str) Path to the saved data file iff
saveData() is successful.
"""
if self.records is None:
return False
if not dataOutputFile.endswith("csv"):
raise TypeError("data output file must be csv.")
if not categoriesOutputFile.endswith("json"):
raise TypeError("category output file must be json")
# Ensure directory exists
dataOutputDirectory = os.path.dirname(dataOutputFile)
if not os.path.exists(dataOutputDirectory):
os.makedirs(dataOutputDirectory)
categoriesOutputDirectory = os.path.dirname(categoriesOutputFile)
if not os.path.exists(categoriesOutputDirectory):
os.makedirs(categoriesOutputDirectory)
with open(dataOutputFile, "w") as f:
# Header
writer = csv.DictWriter(f, fieldnames=self.fieldNames)
writer.writeheader()
# Types
writer.writerow(self.types)
# Special characters
writer.writerow(self.specials)
for data in self.records:
for record in data:
writer.writerow(record)
with open(categoriesOutputFile, "w") as f:
f.write(json.dumps(self.categoryToId,
sort_keys=True,
indent=4,
separators=(",", ": ")))
return dataOutputFile | Save the processed data and the associated category mapping.
@param dataOutputFile (str) Location to save data
@param categoriesOutputFile (str) Location to save category map
@return (str) Path to the saved data file iff
saveData() is successful. | Below is the the instruction that describes the task:
### Input:
Save the processed data and the associated category mapping.
@param dataOutputFile (str) Location to save data
@param categoriesOutputFile (str) Location to save category map
@return (str) Path to the saved data file iff
saveData() is successful.
### Response:
def saveData(self, dataOutputFile, categoriesOutputFile):
"""
Save the processed data and the associated category mapping.
@param dataOutputFile (str) Location to save data
@param categoriesOutputFile (str) Location to save category map
@return (str) Path to the saved data file iff
saveData() is successful.
"""
if self.records is None:
return False
if not dataOutputFile.endswith("csv"):
raise TypeError("data output file must be csv.")
if not categoriesOutputFile.endswith("json"):
raise TypeError("category output file must be json")
# Ensure directory exists
dataOutputDirectory = os.path.dirname(dataOutputFile)
if not os.path.exists(dataOutputDirectory):
os.makedirs(dataOutputDirectory)
categoriesOutputDirectory = os.path.dirname(categoriesOutputFile)
if not os.path.exists(categoriesOutputDirectory):
os.makedirs(categoriesOutputDirectory)
with open(dataOutputFile, "w") as f:
# Header
writer = csv.DictWriter(f, fieldnames=self.fieldNames)
writer.writeheader()
# Types
writer.writerow(self.types)
# Special characters
writer.writerow(self.specials)
for data in self.records:
for record in data:
writer.writerow(record)
with open(categoriesOutputFile, "w") as f:
f.write(json.dumps(self.categoryToId,
sort_keys=True,
indent=4,
separators=(",", ": ")))
return dataOutputFile |
def _cram_to_fastq_region(cram_file, work_dir, base_name, region, data):
"""Convert CRAM to fastq in a specified region.
"""
ref_file = tz.get_in(["reference", "fasta", "base"], data)
resources = config_utils.get_resources("bamtofastq", data["config"])
cores = tz.get_in(["config", "algorithm", "num_cores"], data, 1)
max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores
rext = "-%s" % region.replace(":", "_").replace("-", "_") if region else "full"
out_s, out_p1, out_p2, out_o1, out_o2 = [os.path.join(work_dir, "%s%s-%s.fq.gz" %
(base_name, rext, fext))
for fext in ["s1", "p1", "p2", "o1", "o2"]]
if not utils.file_exists(out_p1):
with file_transaction(data, out_s, out_p1, out_p2, out_o1, out_o2) as \
(tx_out_s, tx_out_p1, tx_out_p2, tx_out_o1, tx_out_o2):
cram_file = objectstore.cl_input(cram_file)
sortprefix = "%s-sort" % utils.splitext_plus(tx_out_s)[0]
cmd = ("bamtofastq filename={cram_file} inputformat=cram T={sortprefix} "
"gz=1 collate=1 colsbs={max_mem} exclude=SECONDARY,SUPPLEMENTARY "
"F={tx_out_p1} F2={tx_out_p2} S={tx_out_s} O={tx_out_o1} O2={tx_out_o2} "
"reference={ref_file}")
if region:
cmd += " ranges='{region}'"
do.run(cmd.format(**locals()), "CRAM to fastq %s" % region if region else "")
return [[out_p1, out_p2, out_s]] | Convert CRAM to fastq in a specified region. | Below is the the instruction that describes the task:
### Input:
Convert CRAM to fastq in a specified region.
### Response:
def _cram_to_fastq_region(cram_file, work_dir, base_name, region, data):
"""Convert CRAM to fastq in a specified region.
"""
ref_file = tz.get_in(["reference", "fasta", "base"], data)
resources = config_utils.get_resources("bamtofastq", data["config"])
cores = tz.get_in(["config", "algorithm", "num_cores"], data, 1)
max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores
rext = "-%s" % region.replace(":", "_").replace("-", "_") if region else "full"
out_s, out_p1, out_p2, out_o1, out_o2 = [os.path.join(work_dir, "%s%s-%s.fq.gz" %
(base_name, rext, fext))
for fext in ["s1", "p1", "p2", "o1", "o2"]]
if not utils.file_exists(out_p1):
with file_transaction(data, out_s, out_p1, out_p2, out_o1, out_o2) as \
(tx_out_s, tx_out_p1, tx_out_p2, tx_out_o1, tx_out_o2):
cram_file = objectstore.cl_input(cram_file)
sortprefix = "%s-sort" % utils.splitext_plus(tx_out_s)[0]
cmd = ("bamtofastq filename={cram_file} inputformat=cram T={sortprefix} "
"gz=1 collate=1 colsbs={max_mem} exclude=SECONDARY,SUPPLEMENTARY "
"F={tx_out_p1} F2={tx_out_p2} S={tx_out_s} O={tx_out_o1} O2={tx_out_o2} "
"reference={ref_file}")
if region:
cmd += " ranges='{region}'"
do.run(cmd.format(**locals()), "CRAM to fastq %s" % region if region else "")
return [[out_p1, out_p2, out_s]] |
def ladders(session, game_id):
"""Get a list of ladder IDs."""
if isinstance(game_id, str):
game_id = lookup_game_id(game_id)
lobbies = get_lobbies(session, game_id)
ladder_ids = set()
for lobby in lobbies:
ladder_ids |= set(lobby['ladders'])
return list(ladder_ids) | Get a list of ladder IDs. | Below is the the instruction that describes the task:
### Input:
Get a list of ladder IDs.
### Response:
def ladders(session, game_id):
"""Get a list of ladder IDs."""
if isinstance(game_id, str):
game_id = lookup_game_id(game_id)
lobbies = get_lobbies(session, game_id)
ladder_ids = set()
for lobby in lobbies:
ladder_ids |= set(lobby['ladders'])
return list(ladder_ids) |
def ProcessAllReadyRequests(self):
"""Processes all requests that are due to run.
Returns:
The number of processed requests.
"""
request_dict = data_store.REL_DB.ReadFlowRequestsReadyForProcessing(
self.rdf_flow.client_id,
self.rdf_flow.flow_id,
next_needed_request=self.rdf_flow.next_request_to_process)
if not request_dict:
return 0
processed = 0
while self.rdf_flow.next_request_to_process in request_dict:
request, responses = request_dict[self.rdf_flow.next_request_to_process]
self.RunStateMethod(request.next_state, request, responses)
self.rdf_flow.next_request_to_process += 1
processed += 1
self.completed_requests.append(request)
if processed and self.IsRunning() and not self.outstanding_requests:
self.RunStateMethod("End")
if (self.rdf_flow.flow_state == self.rdf_flow.FlowState.RUNNING and
not self.outstanding_requests):
self.MarkDone()
self.PersistState()
if not self.IsRunning():
# All requests and responses can now be deleted.
self._ClearAllRequestsAndResponses()
return processed | Processes all requests that are due to run.
Returns:
The number of processed requests. | Below is the the instruction that describes the task:
### Input:
Processes all requests that are due to run.
Returns:
The number of processed requests.
### Response:
def ProcessAllReadyRequests(self):
"""Processes all requests that are due to run.
Returns:
The number of processed requests.
"""
request_dict = data_store.REL_DB.ReadFlowRequestsReadyForProcessing(
self.rdf_flow.client_id,
self.rdf_flow.flow_id,
next_needed_request=self.rdf_flow.next_request_to_process)
if not request_dict:
return 0
processed = 0
while self.rdf_flow.next_request_to_process in request_dict:
request, responses = request_dict[self.rdf_flow.next_request_to_process]
self.RunStateMethod(request.next_state, request, responses)
self.rdf_flow.next_request_to_process += 1
processed += 1
self.completed_requests.append(request)
if processed and self.IsRunning() and not self.outstanding_requests:
self.RunStateMethod("End")
if (self.rdf_flow.flow_state == self.rdf_flow.FlowState.RUNNING and
not self.outstanding_requests):
self.MarkDone()
self.PersistState()
if not self.IsRunning():
# All requests and responses can now be deleted.
self._ClearAllRequestsAndResponses()
return processed |
def register_column(self,
column,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
"""Explicitly map a single bound column to a collection of blaze
expressions. The expressions need to have ``timestamp`` and ``as_of``
columns.
Parameters
----------
column : BoundColumn
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
"""
self._table_expressions[column] = ExprData(
expr,
deltas,
checkpoints,
odo_kwargs,
) | Explicitly map a single bound column to a collection of blaze
expressions. The expressions need to have ``timestamp`` and ``as_of``
columns.
Parameters
----------
column : BoundColumn
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze` | Below is the the instruction that describes the task:
### Input:
Explicitly map a single bound column to a collection of blaze
expressions. The expressions need to have ``timestamp`` and ``as_of``
columns.
Parameters
----------
column : BoundColumn
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
### Response:
def register_column(self,
column,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
"""Explicitly map a single bound column to a collection of blaze
expressions. The expressions need to have ``timestamp`` and ``as_of``
columns.
Parameters
----------
column : BoundColumn
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
"""
self._table_expressions[column] = ExprData(
expr,
deltas,
checkpoints,
odo_kwargs,
) |
def validator(
*fields: str, pre: bool = False, whole: bool = False, always: bool = False, check_fields: bool = True
) -> Callable[[AnyCallable], classmethod]:
"""
Decorate methods on the class indicating that they should be used to validate fields
:param fields: which field(s) the method should be called on
:param pre: whether or not this validator should be called before the standard validators (else after)
:param whole: for complex objects (sets, lists etc.) whether to validate individual elements or the whole object
:param always: whether this method and other validators should be called even if the value is missing
:param check_fields: whether to check that the fields actually exist on the model
"""
if not fields:
raise ConfigError('validator with no fields specified')
elif isinstance(fields[0], FunctionType):
raise ConfigError(
"validators should be used with fields and keyword arguments, not bare. "
"E.g. usage should be `@validator('<field_name>', ...)`"
)
def dec(f: AnyCallable) -> classmethod:
# avoid validators with duplicated names since without this validators can be overwritten silently
# which generally isn't the intended behaviour, don't run in ipython - see #312
if not in_ipython(): # pragma: no branch
ref = f.__module__ + '.' + f.__qualname__
if ref in _FUNCS:
raise ConfigError(f'duplicate validator function "{ref}"')
_FUNCS.add(ref)
f_cls = classmethod(f)
f_cls.__validator_config = fields, Validator(f, pre, whole, always, check_fields) # type: ignore
return f_cls
return dec | Decorate methods on the class indicating that they should be used to validate fields
:param fields: which field(s) the method should be called on
:param pre: whether or not this validator should be called before the standard validators (else after)
:param whole: for complex objects (sets, lists etc.) whether to validate individual elements or the whole object
:param always: whether this method and other validators should be called even if the value is missing
:param check_fields: whether to check that the fields actually exist on the model | Below is the the instruction that describes the task:
### Input:
Decorate methods on the class indicating that they should be used to validate fields
:param fields: which field(s) the method should be called on
:param pre: whether or not this validator should be called before the standard validators (else after)
:param whole: for complex objects (sets, lists etc.) whether to validate individual elements or the whole object
:param always: whether this method and other validators should be called even if the value is missing
:param check_fields: whether to check that the fields actually exist on the model
### Response:
def validator(
*fields: str, pre: bool = False, whole: bool = False, always: bool = False, check_fields: bool = True
) -> Callable[[AnyCallable], classmethod]:
"""
Decorate methods on the class indicating that they should be used to validate fields
:param fields: which field(s) the method should be called on
:param pre: whether or not this validator should be called before the standard validators (else after)
:param whole: for complex objects (sets, lists etc.) whether to validate individual elements or the whole object
:param always: whether this method and other validators should be called even if the value is missing
:param check_fields: whether to check that the fields actually exist on the model
"""
if not fields:
raise ConfigError('validator with no fields specified')
elif isinstance(fields[0], FunctionType):
raise ConfigError(
"validators should be used with fields and keyword arguments, not bare. "
"E.g. usage should be `@validator('<field_name>', ...)`"
)
def dec(f: AnyCallable) -> classmethod:
# avoid validators with duplicated names since without this validators can be overwritten silently
# which generally isn't the intended behaviour, don't run in ipython - see #312
if not in_ipython(): # pragma: no branch
ref = f.__module__ + '.' + f.__qualname__
if ref in _FUNCS:
raise ConfigError(f'duplicate validator function "{ref}"')
_FUNCS.add(ref)
f_cls = classmethod(f)
f_cls.__validator_config = fields, Validator(f, pre, whole, always, check_fields) # type: ignore
return f_cls
return dec |
def write_yara(self, output_file):
"""
Write out yara signatures to a file.
"""
fout = open(output_file, 'wb')
fout.write('\n')
for iocid in self.yara_signatures:
signature = self.yara_signatures[iocid]
fout.write(signature)
fout.write('\n')
fout.close()
return True | Write out yara signatures to a file. | Below is the the instruction that describes the task:
### Input:
Write out yara signatures to a file.
### Response:
def write_yara(self, output_file):
"""
Write out yara signatures to a file.
"""
fout = open(output_file, 'wb')
fout.write('\n')
for iocid in self.yara_signatures:
signature = self.yara_signatures[iocid]
fout.write(signature)
fout.write('\n')
fout.close()
return True |
def ip_rtm_config_route_static_route_oif_vrf_static_route_oif_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm")
route = ET.SubElement(rtm_config, "route")
static_route_oif_vrf = ET.SubElement(route, "static-route-oif-vrf")
static_route_next_vrf_dest_key = ET.SubElement(static_route_oif_vrf, "static-route-next-vrf-dest")
static_route_next_vrf_dest_key.text = kwargs.pop('static_route_next_vrf_dest')
next_hop_vrf_key = ET.SubElement(static_route_oif_vrf, "next-hop-vrf")
next_hop_vrf_key.text = kwargs.pop('next_hop_vrf')
static_route_oif_type_key = ET.SubElement(static_route_oif_vrf, "static-route-oif-type")
static_route_oif_type_key.text = kwargs.pop('static_route_oif_type')
static_route_oif_name = ET.SubElement(static_route_oif_vrf, "static-route-oif-name")
static_route_oif_name.text = kwargs.pop('static_route_oif_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ip_rtm_config_route_static_route_oif_vrf_static_route_oif_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm")
route = ET.SubElement(rtm_config, "route")
static_route_oif_vrf = ET.SubElement(route, "static-route-oif-vrf")
static_route_next_vrf_dest_key = ET.SubElement(static_route_oif_vrf, "static-route-next-vrf-dest")
static_route_next_vrf_dest_key.text = kwargs.pop('static_route_next_vrf_dest')
next_hop_vrf_key = ET.SubElement(static_route_oif_vrf, "next-hop-vrf")
next_hop_vrf_key.text = kwargs.pop('next_hop_vrf')
static_route_oif_type_key = ET.SubElement(static_route_oif_vrf, "static-route-oif-type")
static_route_oif_type_key.text = kwargs.pop('static_route_oif_type')
static_route_oif_name = ET.SubElement(static_route_oif_vrf, "static-route-oif-name")
static_route_oif_name.text = kwargs.pop('static_route_oif_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def create_archaius(self):
"""Create S3 bucket for Archaius."""
utils.banner("Creating S3")
s3.init_properties(env=self.env, app=self.app) | Create S3 bucket for Archaius. | Below is the the instruction that describes the task:
### Input:
Create S3 bucket for Archaius.
### Response:
def create_archaius(self):
"""Create S3 bucket for Archaius."""
utils.banner("Creating S3")
s3.init_properties(env=self.env, app=self.app) |
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.identifiers = object.__new__(self.identifiers.__class__)
rv.identifiers.__dict__.update(self.identifiers.__dict__)
return rv | Create a copy of the current one. | Below is the the instruction that describes the task:
### Input:
Create a copy of the current one.
### Response:
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.identifiers = object.__new__(self.identifiers.__class__)
rv.identifiers.__dict__.update(self.identifiers.__dict__)
return rv |
def split_action_id (id):
""" Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++')
"""
assert isinstance(id, basestring)
split = id.split ('.', 1)
toolset = split [0]
name = ''
if len (split) > 1:
name = split [1]
return (toolset, name) | Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++') | Below is the the instruction that describes the task:
### Input:
Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++')
### Response:
def split_action_id (id):
""" Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++')
"""
assert isinstance(id, basestring)
split = id.split ('.', 1)
toolset = split [0]
name = ''
if len (split) > 1:
name = split [1]
return (toolset, name) |
def routeevent(self, path, routinemethod, container = None, host = None, vhost = None, method = [b'GET', b'HEAD']):
'''
Route specified path to a routine factory
:param path: path to match, can be a regular expression
:param routinemethod: factory function routinemethod(event), event is the HttpRequestEvent
:param container: routine container. If None, default to self for bound method, or event.connection if not
:param host: if specified, only response to request to specified host
:param vhost: if specified, only response to request to specified vhost.
If not specified, response to dispatcher default vhost.
:param method: if specified, response to specified methods
'''
regm = re.compile(path + b'$')
if vhost is None:
vhost = self.vhost
if container is None:
container = getattr(routinemethod, '__self__', None)
def ismatch(event):
# Check vhost
if vhost is not None and getattr(event.createby, 'vhost', '') != vhost:
return False
# First parse the path
# RFC said we should accept absolute path
psplit = urlsplit(event.path)
if psplit.path[:1] != b'/':
# For security reason, ignore unrecognized path
return False
if psplit.netloc and host is not None and host != psplit.netloc:
# Maybe a proxy request, ignore it
return False
if getattr(event.createby, 'unquoteplus', True):
realpath = unquote_plus_to_bytes(psplit.path)
else:
realpath = unquote_to_bytes(psplit.path)
m = regm.match(realpath)
if m is None:
return False
event.realpath = realpath
event.querystring = psplit.query
event.path_match = m
return True
def func(event, scheduler):
try:
if event.canignore:
# Already processed
return
event.canignore = True
c = event.connection if container is None else container
c.subroutine(routinemethod(event), False)
except Exception:
pass
for m in method:
self.registerHandler(HttpRequestEvent.createMatcher(host, None, m, _ismatch = ismatch), func) | Route specified path to a routine factory
:param path: path to match, can be a regular expression
:param routinemethod: factory function routinemethod(event), event is the HttpRequestEvent
:param container: routine container. If None, default to self for bound method, or event.connection if not
:param host: if specified, only response to request to specified host
:param vhost: if specified, only response to request to specified vhost.
If not specified, response to dispatcher default vhost.
:param method: if specified, response to specified methods | Below is the the instruction that describes the task:
### Input:
Route specified path to a routine factory
:param path: path to match, can be a regular expression
:param routinemethod: factory function routinemethod(event), event is the HttpRequestEvent
:param container: routine container. If None, default to self for bound method, or event.connection if not
:param host: if specified, only response to request to specified host
:param vhost: if specified, only response to request to specified vhost.
If not specified, response to dispatcher default vhost.
:param method: if specified, response to specified methods
### Response:
def routeevent(self, path, routinemethod, container = None, host = None, vhost = None, method = [b'GET', b'HEAD']):
'''
Route specified path to a routine factory
:param path: path to match, can be a regular expression
:param routinemethod: factory function routinemethod(event), event is the HttpRequestEvent
:param container: routine container. If None, default to self for bound method, or event.connection if not
:param host: if specified, only response to request to specified host
:param vhost: if specified, only response to request to specified vhost.
If not specified, response to dispatcher default vhost.
:param method: if specified, response to specified methods
'''
regm = re.compile(path + b'$')
if vhost is None:
vhost = self.vhost
if container is None:
container = getattr(routinemethod, '__self__', None)
def ismatch(event):
# Check vhost
if vhost is not None and getattr(event.createby, 'vhost', '') != vhost:
return False
# First parse the path
# RFC said we should accept absolute path
psplit = urlsplit(event.path)
if psplit.path[:1] != b'/':
# For security reason, ignore unrecognized path
return False
if psplit.netloc and host is not None and host != psplit.netloc:
# Maybe a proxy request, ignore it
return False
if getattr(event.createby, 'unquoteplus', True):
realpath = unquote_plus_to_bytes(psplit.path)
else:
realpath = unquote_to_bytes(psplit.path)
m = regm.match(realpath)
if m is None:
return False
event.realpath = realpath
event.querystring = psplit.query
event.path_match = m
return True
def func(event, scheduler):
try:
if event.canignore:
# Already processed
return
event.canignore = True
c = event.connection if container is None else container
c.subroutine(routinemethod(event), False)
except Exception:
pass
for m in method:
self.registerHandler(HttpRequestEvent.createMatcher(host, None, m, _ismatch = ismatch), func) |
def saveVarsInMat(filename, varNamesStr, outOf=None, **opts):
"""Hacky convinience function to dump a couple of python variables in a
.mat file. See `awmstools.saveVars`.
"""
from mlabwrap import mlab
filename, varnames, outOf = __saveVarsHelper(
filename, varNamesStr, outOf, '.mat', **opts)
try:
for varname in varnames:
mlab._set(varname, outOf[varname])
mlab._do("save('%s','%s')" % (filename, "', '".join(varnames)), nout=0)
finally:
assert varnames
mlab._do("clear('%s')" % "', '".join(varnames), nout=0) | Hacky convinience function to dump a couple of python variables in a
.mat file. See `awmstools.saveVars`. | Below is the the instruction that describes the task:
### Input:
Hacky convinience function to dump a couple of python variables in a
.mat file. See `awmstools.saveVars`.
### Response:
def saveVarsInMat(filename, varNamesStr, outOf=None, **opts):
"""Hacky convinience function to dump a couple of python variables in a
.mat file. See `awmstools.saveVars`.
"""
from mlabwrap import mlab
filename, varnames, outOf = __saveVarsHelper(
filename, varNamesStr, outOf, '.mat', **opts)
try:
for varname in varnames:
mlab._set(varname, outOf[varname])
mlab._do("save('%s','%s')" % (filename, "', '".join(varnames)), nout=0)
finally:
assert varnames
mlab._do("clear('%s')" % "', '".join(varnames), nout=0) |
def LoadImage(filename):
'''return an image from the images/ directory'''
app_dir = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(app_dir, 'images', filename)
return Tkinter.PhotoImage(file=path) | return an image from the images/ directory | Below is the the instruction that describes the task:
### Input:
return an image from the images/ directory
### Response:
def LoadImage(filename):
'''return an image from the images/ directory'''
app_dir = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(app_dir, 'images', filename)
return Tkinter.PhotoImage(file=path) |
def from_xml(cls, xml):
""" Returns a new Text from the given XML string.
"""
s = parse_string(xml)
return Sentence(s.split("\n")[0], token=s.tags, language=s.language) | Returns a new Text from the given XML string. | Below is the the instruction that describes the task:
### Input:
Returns a new Text from the given XML string.
### Response:
def from_xml(cls, xml):
""" Returns a new Text from the given XML string.
"""
s = parse_string(xml)
return Sentence(s.split("\n")[0], token=s.tags, language=s.language) |
def rule_command_cmdlist_interface_h_interface_ge_leaf_interface_gigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_h = ET.SubElement(cmdlist, "interface-h")
interface_ge_leaf = ET.SubElement(interface_h, "interface-ge-leaf")
interface = ET.SubElement(interface_ge_leaf, "interface")
gigabitethernet_leaf = ET.SubElement(interface, "gigabitethernet-leaf")
gigabitethernet_leaf.text = kwargs.pop('gigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def rule_command_cmdlist_interface_h_interface_ge_leaf_interface_gigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_h = ET.SubElement(cmdlist, "interface-h")
interface_ge_leaf = ET.SubElement(interface_h, "interface-ge-leaf")
interface = ET.SubElement(interface_ge_leaf, "interface")
gigabitethernet_leaf = ET.SubElement(interface, "gigabitethernet-leaf")
gigabitethernet_leaf.text = kwargs.pop('gigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def transformed(self, t):
"""
Transforms an m-dimensional Rect using t, an nxn matrix that can
transform vectors in the form: [x, y, z, …, 1].
The Rect is padded to n dimensions.
"""
assert t.shape[0] == t.shape[1]
extra_dimensions = t.shape[0] - self.dimensions - 1
def transform(a):
return t.dot(np.concatenate(
(a, [0] * extra_dimensions, [1]),
axis=0
))[:self.dimensions]
return Rect(transform(self.mins), transform(self.maxes)) | Transforms an m-dimensional Rect using t, an nxn matrix that can
transform vectors in the form: [x, y, z, …, 1].
The Rect is padded to n dimensions. | Below is the the instruction that describes the task:
### Input:
Transforms an m-dimensional Rect using t, an nxn matrix that can
transform vectors in the form: [x, y, z, …, 1].
The Rect is padded to n dimensions.
### Response:
def transformed(self, t):
"""
Transforms an m-dimensional Rect using t, an nxn matrix that can
transform vectors in the form: [x, y, z, …, 1].
The Rect is padded to n dimensions.
"""
assert t.shape[0] == t.shape[1]
extra_dimensions = t.shape[0] - self.dimensions - 1
def transform(a):
return t.dot(np.concatenate(
(a, [0] * extra_dimensions, [1]),
axis=0
))[:self.dimensions]
return Rect(transform(self.mins), transform(self.maxes)) |
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'unnorm':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandDH(self.data, norm=norm, csphase=csphase,
sampling=self.sampling,
**kwargs)
coeffs = SHCoeffs.from_array(cilm,
normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs | Expand the grid into real spherical harmonics. | Below is the the instruction that describes the task:
### Input:
Expand the grid into real spherical harmonics.
### Response:
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'unnorm':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandDH(self.data, norm=norm, csphase=csphase,
sampling=self.sampling,
**kwargs)
coeffs = SHCoeffs.from_array(cilm,
normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs |
def set_members(self, name, members, mode=None):
"""Configures the array of member interfaces for the Port-Channel
Args:
name(str): The Port-Channel interface name to configure the member
interfaces
members(list): The list of Ethernet interfaces that should be
member interfaces
mode(str): The LACP mode to configure the member interfaces to.
Valid values are 'on, 'passive', 'active'. When there are
existing channel-group members and their lacp mode differs
from this attribute, all of those members will be removed and
then re-added using the specified lacp mode. If this attribute
is omitted, the existing lacp mode will be used for new
member additions.
Returns:
True if the operation succeeds otherwise False
"""
commands = list()
grpid = re.search(r'(\d+)', name).group()
current_members = self.get_members(name)
lacp_mode = self.get_lacp_mode(name)
if mode and mode != lacp_mode:
lacp_mode = mode
self.set_lacp_mode(grpid, lacp_mode)
# remove members from the current port-channel interface
for member in set(current_members).difference(members):
commands.append('interface %s' % member)
commands.append('no channel-group %s' % grpid)
# add new member interfaces to the port-channel interface
for member in set(members).difference(current_members):
commands.append('interface %s' % member)
commands.append('channel-group %s mode %s' % (grpid, lacp_mode))
return self.configure(commands) if commands else True | Configures the array of member interfaces for the Port-Channel
Args:
name(str): The Port-Channel interface name to configure the member
interfaces
members(list): The list of Ethernet interfaces that should be
member interfaces
mode(str): The LACP mode to configure the member interfaces to.
Valid values are 'on, 'passive', 'active'. When there are
existing channel-group members and their lacp mode differs
from this attribute, all of those members will be removed and
then re-added using the specified lacp mode. If this attribute
is omitted, the existing lacp mode will be used for new
member additions.
Returns:
True if the operation succeeds otherwise False | Below is the the instruction that describes the task:
### Input:
Configures the array of member interfaces for the Port-Channel
Args:
name(str): The Port-Channel interface name to configure the member
interfaces
members(list): The list of Ethernet interfaces that should be
member interfaces
mode(str): The LACP mode to configure the member interfaces to.
Valid values are 'on, 'passive', 'active'. When there are
existing channel-group members and their lacp mode differs
from this attribute, all of those members will be removed and
then re-added using the specified lacp mode. If this attribute
is omitted, the existing lacp mode will be used for new
member additions.
Returns:
True if the operation succeeds otherwise False
### Response:
def set_members(self, name, members, mode=None):
"""Configures the array of member interfaces for the Port-Channel
Args:
name(str): The Port-Channel interface name to configure the member
interfaces
members(list): The list of Ethernet interfaces that should be
member interfaces
mode(str): The LACP mode to configure the member interfaces to.
Valid values are 'on, 'passive', 'active'. When there are
existing channel-group members and their lacp mode differs
from this attribute, all of those members will be removed and
then re-added using the specified lacp mode. If this attribute
is omitted, the existing lacp mode will be used for new
member additions.
Returns:
True if the operation succeeds otherwise False
"""
commands = list()
grpid = re.search(r'(\d+)', name).group()
current_members = self.get_members(name)
lacp_mode = self.get_lacp_mode(name)
if mode and mode != lacp_mode:
lacp_mode = mode
self.set_lacp_mode(grpid, lacp_mode)
# remove members from the current port-channel interface
for member in set(current_members).difference(members):
commands.append('interface %s' % member)
commands.append('no channel-group %s' % grpid)
# add new member interfaces to the port-channel interface
for member in set(members).difference(current_members):
commands.append('interface %s' % member)
commands.append('channel-group %s mode %s' % (grpid, lacp_mode))
return self.configure(commands) if commands else True |
def list(self, args, unknown):
"""List all addons that can be launched
:param args: arguments from the launch parser
:type args: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None
"""
pm = plugins.PluginManager.get()
plugs = pm.get_all_plugins()
if not plugs:
print "No standalone addons found!"
return
print "Addons:"
for p in plugs:
if isinstance(p, plugins.JB_StandalonePlugin):
print "\t%s" % p.__class__.__name__ | List all addons that can be launched
:param args: arguments from the launch parser
:type args: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
List all addons that can be launched
:param args: arguments from the launch parser
:type args: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None
### Response:
def list(self, args, unknown):
"""List all addons that can be launched
:param args: arguments from the launch parser
:type args: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None
"""
pm = plugins.PluginManager.get()
plugs = pm.get_all_plugins()
if not plugs:
print "No standalone addons found!"
return
print "Addons:"
for p in plugs:
if isinstance(p, plugins.JB_StandalonePlugin):
print "\t%s" % p.__class__.__name__ |
def getElementsByClassName(self, className, root='root', useIndex=True):
'''
getElementsByClassName - Searches and returns all elements containing a given class name.
@param className <str> - A one-word class name
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
@param useIndex <bool> If useIndex is True and class names are indexed [see constructor] only the index will be used. Otherwise a full search is performed.
'''
(root, isFromRoot) = self._handleRootArg(root)
if useIndex is True and self.indexClassNames is True:
elements = self._classNameMap.get(className, [])
if isFromRoot is False:
_hasTagInParentLine = self._hasTagInParentLine
elements = [x for x in elements if _hasTagInParentLine(x, root)]
return TagCollection(elements)
return AdvancedHTMLParser.getElementsByClassName(self, className, root) | getElementsByClassName - Searches and returns all elements containing a given class name.
@param className <str> - A one-word class name
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
@param useIndex <bool> If useIndex is True and class names are indexed [see constructor] only the index will be used. Otherwise a full search is performed. | Below is the the instruction that describes the task:
### Input:
getElementsByClassName - Searches and returns all elements containing a given class name.
@param className <str> - A one-word class name
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
@param useIndex <bool> If useIndex is True and class names are indexed [see constructor] only the index will be used. Otherwise a full search is performed.
### Response:
def getElementsByClassName(self, className, root='root', useIndex=True):
'''
getElementsByClassName - Searches and returns all elements containing a given class name.
@param className <str> - A one-word class name
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
@param useIndex <bool> If useIndex is True and class names are indexed [see constructor] only the index will be used. Otherwise a full search is performed.
'''
(root, isFromRoot) = self._handleRootArg(root)
if useIndex is True and self.indexClassNames is True:
elements = self._classNameMap.get(className, [])
if isFromRoot is False:
_hasTagInParentLine = self._hasTagInParentLine
elements = [x for x in elements if _hasTagInParentLine(x, root)]
return TagCollection(elements)
return AdvancedHTMLParser.getElementsByClassName(self, className, root) |
def copy_from(self, other):
"""Copy properties from another ChemicalEntity
"""
# Need to copy all attributes, fields, relations
self.__attributes__ = {k: v.copy() for k, v in other.__attributes__.items()}
self.__fields__ = {k: v.copy() for k, v in other.__fields__.items()}
self.__relations__ = {k: v.copy() for k, v in other.__relations__.items()}
self.maps = {k: m.copy() for k, m in other.maps.items()}
self.dimensions = other.dimensions.copy() | Copy properties from another ChemicalEntity | Below is the the instruction that describes the task:
### Input:
Copy properties from another ChemicalEntity
### Response:
def copy_from(self, other):
"""Copy properties from another ChemicalEntity
"""
# Need to copy all attributes, fields, relations
self.__attributes__ = {k: v.copy() for k, v in other.__attributes__.items()}
self.__fields__ = {k: v.copy() for k, v in other.__fields__.items()}
self.__relations__ = {k: v.copy() for k, v in other.__relations__.items()}
self.maps = {k: m.copy() for k, m in other.maps.items()}
self.dimensions = other.dimensions.copy() |
def max_normal_germline_depth(in_file, params, somatic_info):
"""Calculate threshold for excluding potential heterozygotes based on normal depth.
"""
bcf_in = pysam.VariantFile(in_file)
depths = []
for rec in bcf_in:
stats = _is_possible_loh(rec, bcf_in, params, somatic_info)
if tz.get_in(["normal", "depth"], stats):
depths.append(tz.get_in(["normal", "depth"], stats))
if depths:
return np.median(depths) * NORMAL_FILTER_PARAMS["max_depth_percent"] | Calculate threshold for excluding potential heterozygotes based on normal depth. | Below is the the instruction that describes the task:
### Input:
Calculate threshold for excluding potential heterozygotes based on normal depth.
### Response:
def max_normal_germline_depth(in_file, params, somatic_info):
"""Calculate threshold for excluding potential heterozygotes based on normal depth.
"""
bcf_in = pysam.VariantFile(in_file)
depths = []
for rec in bcf_in:
stats = _is_possible_loh(rec, bcf_in, params, somatic_info)
if tz.get_in(["normal", "depth"], stats):
depths.append(tz.get_in(["normal", "depth"], stats))
if depths:
return np.median(depths) * NORMAL_FILTER_PARAMS["max_depth_percent"] |
def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
) | Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply | Below is the the instruction that describes the task:
### Input:
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply
### Response:
def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
) |
async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# get xml results list
xml_text = api_data.decode("utf-8")
xml_root = xml.etree.ElementTree.fromstring(xml_text)
status = xml_root.get("status")
if status != "ok":
raise Exception("Unexpected Last.fm response status: %s" % (status))
img_elements = xml_root.findall("album/image")
# build results from xml
thumbnail_url = None
thumbnail_size = None
for img_element in img_elements:
img_url = img_element.text
if not img_url:
# last.fm returns empty image tag for size it does not have
continue
lfm_size = img_element.get("size")
if lfm_size == "mega":
check_metadata = CoverImageMetadata.SIZE
else:
check_metadata = CoverImageMetadata.NONE
try:
size = __class__.SIZES[lfm_size]
except KeyError:
continue
if (size[0] <= MAX_THUMBNAIL_SIZE) and ((thumbnail_size is None) or (size[0] < thumbnail_size)):
thumbnail_url = img_url
thumbnail_size = size[0]
format = os.path.splitext(img_url)[1][1:].lower()
format = SUPPORTED_IMG_FORMATS[format]
results.append(LastFmCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
check_metadata=check_metadata))
return results | See CoverSource.parseResults. | Below is the the instruction that describes the task:
### Input:
See CoverSource.parseResults.
### Response:
async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# get xml results list
xml_text = api_data.decode("utf-8")
xml_root = xml.etree.ElementTree.fromstring(xml_text)
status = xml_root.get("status")
if status != "ok":
raise Exception("Unexpected Last.fm response status: %s" % (status))
img_elements = xml_root.findall("album/image")
# build results from xml
thumbnail_url = None
thumbnail_size = None
for img_element in img_elements:
img_url = img_element.text
if not img_url:
# last.fm returns empty image tag for size it does not have
continue
lfm_size = img_element.get("size")
if lfm_size == "mega":
check_metadata = CoverImageMetadata.SIZE
else:
check_metadata = CoverImageMetadata.NONE
try:
size = __class__.SIZES[lfm_size]
except KeyError:
continue
if (size[0] <= MAX_THUMBNAIL_SIZE) and ((thumbnail_size is None) or (size[0] < thumbnail_size)):
thumbnail_url = img_url
thumbnail_size = size[0]
format = os.path.splitext(img_url)[1][1:].lower()
format = SUPPORTED_IMG_FORMATS[format]
results.append(LastFmCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
check_metadata=check_metadata))
return results |
def iterfields(klass):
"""Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples.
"""
is_field = lambda x: isinstance(x, TypedField)
for name, field in inspect.getmembers(klass, predicate=is_field):
yield name, field | Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples. | Below is the the instruction that describes the task:
### Input:
Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples.
### Response:
def iterfields(klass):
"""Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples.
"""
is_field = lambda x: isinstance(x, TypedField)
for name, field in inspect.getmembers(klass, predicate=is_field):
yield name, field |
def subject(self) -> Optional[UnstructuredHeader]:
"""The ``Subject`` header."""
try:
return cast(UnstructuredHeader, self[b'subject'][0])
except (KeyError, IndexError):
return None | The ``Subject`` header. | Below is the the instruction that describes the task:
### Input:
The ``Subject`` header.
### Response:
def subject(self) -> Optional[UnstructuredHeader]:
"""The ``Subject`` header."""
try:
return cast(UnstructuredHeader, self[b'subject'][0])
except (KeyError, IndexError):
return None |
def typestring(obj):
"""Make a string for the object's type
Parameters
----------
obj : obj
Python object.
Returns
-------
`str`
String representation of the object's type. This is the type's
importable namespace.
Examples
--------
>>> import docutils.nodes
>>> para = docutils.nodes.paragraph()
>>> typestring(para)
'docutils.nodes.paragraph'
"""
obj_type = type(obj)
return '.'.join((obj_type.__module__, obj_type.__name__)) | Make a string for the object's type
Parameters
----------
obj : obj
Python object.
Returns
-------
`str`
String representation of the object's type. This is the type's
importable namespace.
Examples
--------
>>> import docutils.nodes
>>> para = docutils.nodes.paragraph()
>>> typestring(para)
'docutils.nodes.paragraph' | Below is the the instruction that describes the task:
### Input:
Make a string for the object's type
Parameters
----------
obj : obj
Python object.
Returns
-------
`str`
String representation of the object's type. This is the type's
importable namespace.
Examples
--------
>>> import docutils.nodes
>>> para = docutils.nodes.paragraph()
>>> typestring(para)
'docutils.nodes.paragraph'
### Response:
def typestring(obj):
"""Make a string for the object's type
Parameters
----------
obj : obj
Python object.
Returns
-------
`str`
String representation of the object's type. This is the type's
importable namespace.
Examples
--------
>>> import docutils.nodes
>>> para = docutils.nodes.paragraph()
>>> typestring(para)
'docutils.nodes.paragraph'
"""
obj_type = type(obj)
return '.'.join((obj_type.__module__, obj_type.__name__)) |
def clone(self):
"""Create a complete copy of the stream.
:returns: A new MaterialStream object."""
result = copy.copy(self)
result._compound_mfrs = copy.deepcopy(self._compound_mfrs)
return result | Create a complete copy of the stream.
:returns: A new MaterialStream object. | Below is the the instruction that describes the task:
### Input:
Create a complete copy of the stream.
:returns: A new MaterialStream object.
### Response:
def clone(self):
"""Create a complete copy of the stream.
:returns: A new MaterialStream object."""
result = copy.copy(self)
result._compound_mfrs = copy.deepcopy(self._compound_mfrs)
return result |
def nmtoken_from_string(text):
"""
Returns a Nmtoken from a string.
It is useful to produce XHTML valid values for the 'name'
attribute of an anchor.
CAUTION: the function is surjective: 2 different texts might lead to
the same result. This is improbable on a single page.
Nmtoken is the type that is a mixture of characters supported in
attributes such as 'name' in HTML 'a' tag. For example,
<a name="Articles%20%26%20Preprints"> should be tranformed to
<a name="Articles372037263720Preprints"> using this function.
http://www.w3.org/TR/2000/REC-xml-20001006#NT-Nmtoken
Also note that this function filters more characters than
specified by the definition of Nmtoken ('CombiningChar' and
'Extender' charsets are filtered out).
"""
text = text.replace('-', '--')
return ''.join([(((not char.isalnum() and char not in [
'.', '-', '_', ':'
]) and str(ord(char))) or char) for char in text]) | Returns a Nmtoken from a string.
It is useful to produce XHTML valid values for the 'name'
attribute of an anchor.
CAUTION: the function is surjective: 2 different texts might lead to
the same result. This is improbable on a single page.
Nmtoken is the type that is a mixture of characters supported in
attributes such as 'name' in HTML 'a' tag. For example,
<a name="Articles%20%26%20Preprints"> should be tranformed to
<a name="Articles372037263720Preprints"> using this function.
http://www.w3.org/TR/2000/REC-xml-20001006#NT-Nmtoken
Also note that this function filters more characters than
specified by the definition of Nmtoken ('CombiningChar' and
'Extender' charsets are filtered out). | Below is the the instruction that describes the task:
### Input:
Returns a Nmtoken from a string.
It is useful to produce XHTML valid values for the 'name'
attribute of an anchor.
CAUTION: the function is surjective: 2 different texts might lead to
the same result. This is improbable on a single page.
Nmtoken is the type that is a mixture of characters supported in
attributes such as 'name' in HTML 'a' tag. For example,
<a name="Articles%20%26%20Preprints"> should be tranformed to
<a name="Articles372037263720Preprints"> using this function.
http://www.w3.org/TR/2000/REC-xml-20001006#NT-Nmtoken
Also note that this function filters more characters than
specified by the definition of Nmtoken ('CombiningChar' and
'Extender' charsets are filtered out).
### Response:
def nmtoken_from_string(text):
"""
Returns a Nmtoken from a string.
It is useful to produce XHTML valid values for the 'name'
attribute of an anchor.
CAUTION: the function is surjective: 2 different texts might lead to
the same result. This is improbable on a single page.
Nmtoken is the type that is a mixture of characters supported in
attributes such as 'name' in HTML 'a' tag. For example,
<a name="Articles%20%26%20Preprints"> should be tranformed to
<a name="Articles372037263720Preprints"> using this function.
http://www.w3.org/TR/2000/REC-xml-20001006#NT-Nmtoken
Also note that this function filters more characters than
specified by the definition of Nmtoken ('CombiningChar' and
'Extender' charsets are filtered out).
"""
text = text.replace('-', '--')
return ''.join([(((not char.isalnum() and char not in [
'.', '-', '_', ':'
]) and str(ord(char))) or char) for char in text]) |
def p_statement_break(p):
'''statement : BREAK SEMI
| BREAK expr SEMI'''
if len(p) == 3:
p[0] = ast.Break(None, lineno=p.lineno(1))
else:
p[0] = ast.Break(p[2], lineno=p.lineno(1)) | statement : BREAK SEMI
| BREAK expr SEMI | Below is the the instruction that describes the task:
### Input:
statement : BREAK SEMI
| BREAK expr SEMI
### Response:
def p_statement_break(p):
'''statement : BREAK SEMI
| BREAK expr SEMI'''
if len(p) == 3:
p[0] = ast.Break(None, lineno=p.lineno(1))
else:
p[0] = ast.Break(p[2], lineno=p.lineno(1)) |
def path(self):
"""str: URL path for the model's APIs."""
return "/projects/%s/datasets/%s/models/%s" % (
self._proto.project_id,
self._proto.dataset_id,
self._proto.model_id,
) | str: URL path for the model's APIs. | Below is the the instruction that describes the task:
### Input:
str: URL path for the model's APIs.
### Response:
def path(self):
"""str: URL path for the model's APIs."""
return "/projects/%s/datasets/%s/models/%s" % (
self._proto.project_id,
self._proto.dataset_id,
self._proto.model_id,
) |
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk) | Yields the list with a max total size of max_num_chunks | Below is the the instruction that describes the task:
### Input:
Yields the list with a max total size of max_num_chunks
### Response:
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk) |
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes,
ascending, rs=np.random.RandomState()):
"""Build shuffled ranking matrix when permutation_type eq to phenotype.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise'.
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix.
"""
# S: samples, G: gene number
G, S = exprs.shape
# genes = exprs.index.values
expr_mat = exprs.values.T
perm_cor_tensor = np.tile(expr_mat, (permutation_num+1,1,1))
# random shuffle on the first dim, last matrix is not shuffled
for arr in perm_cor_tensor[:-1]: rs.shuffle(arr)
classes = np.array(classes)
pos = classes == pos
neg = classes == neg
pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1)
neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1)
pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1)
neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1)
if method == 'signal_to_noise':
cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std)
elif method == 't_test':
denom = 1.0/G
cor_mat = (pos_cor_mean - neg_cor_mean)/ np.sqrt(denom*pos_cor_std**2 + denom*neg_cor_std**2)
elif method == 'ratio_of_classes':
cor_mat = pos_cor_mean / neg_cor_mean
elif method == 'diff_of_classes':
cor_mat = pos_cor_mean - neg_cor_mean
elif method == 'log2_ratio_of_classes':
cor_mat = np.log2(pos_cor_mean / neg_cor_mean)
else:
logging.error("Please provide correct method name!!!")
sys.exit(0)
# return matix[nperm+1, perm_cors]
cor_mat_ind = cor_mat.argsort()
# ndarray: sort in place
cor_mat.sort()
# genes_mat = genes.take(cor_mat_ind)
if ascending: return cor_mat_ind, cor_mat
# descending order of ranking and genes
# return genes_mat[:,::-1], cor_mat[:,::-1]
return cor_mat_ind[:, ::-1], cor_mat[:, ::-1] | Build shuffled ranking matrix when permutation_type eq to phenotype.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise'.
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix. | Below is the the instruction that describes the task:
### Input:
Build shuffled ranking matrix when permutation_type eq to phenotype.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise'.
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix.
### Response:
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes,
ascending, rs=np.random.RandomState()):
"""Build shuffled ranking matrix when permutation_type eq to phenotype.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise'.
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix.
"""
# S: samples, G: gene number
G, S = exprs.shape
# genes = exprs.index.values
expr_mat = exprs.values.T
perm_cor_tensor = np.tile(expr_mat, (permutation_num+1,1,1))
# random shuffle on the first dim, last matrix is not shuffled
for arr in perm_cor_tensor[:-1]: rs.shuffle(arr)
classes = np.array(classes)
pos = classes == pos
neg = classes == neg
pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1)
neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1)
pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1)
neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1)
if method == 'signal_to_noise':
cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std)
elif method == 't_test':
denom = 1.0/G
cor_mat = (pos_cor_mean - neg_cor_mean)/ np.sqrt(denom*pos_cor_std**2 + denom*neg_cor_std**2)
elif method == 'ratio_of_classes':
cor_mat = pos_cor_mean / neg_cor_mean
elif method == 'diff_of_classes':
cor_mat = pos_cor_mean - neg_cor_mean
elif method == 'log2_ratio_of_classes':
cor_mat = np.log2(pos_cor_mean / neg_cor_mean)
else:
logging.error("Please provide correct method name!!!")
sys.exit(0)
# return matix[nperm+1, perm_cors]
cor_mat_ind = cor_mat.argsort()
# ndarray: sort in place
cor_mat.sort()
# genes_mat = genes.take(cor_mat_ind)
if ascending: return cor_mat_ind, cor_mat
# descending order of ranking and genes
# return genes_mat[:,::-1], cor_mat[:,::-1]
return cor_mat_ind[:, ::-1], cor_mat[:, ::-1] |
def ws_db004(self, value=None):
""" Corresponds to IDD Field `ws_db004`
Mean wind speed coincident with 0.4% dry-bulb temperature
Args:
value (float): value for IDD Field `ws_db004`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `ws_db004`'.format(value))
self._ws_db004 = value | Corresponds to IDD Field `ws_db004`
Mean wind speed coincident with 0.4% dry-bulb temperature
Args:
value (float): value for IDD Field `ws_db004`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | Below is the the instruction that describes the task:
### Input:
Corresponds to IDD Field `ws_db004`
Mean wind speed coincident with 0.4% dry-bulb temperature
Args:
value (float): value for IDD Field `ws_db004`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
### Response:
def ws_db004(self, value=None):
""" Corresponds to IDD Field `ws_db004`
Mean wind speed coincident with 0.4% dry-bulb temperature
Args:
value (float): value for IDD Field `ws_db004`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `ws_db004`'.format(value))
self._ws_db004 = value |
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the DataFrame.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx) | Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the DataFrame.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)] | Below is the the instruction that describes the task:
### Input:
Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the DataFrame.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
### Response:
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the DataFrame.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx) |
def reshape_text(buffer, from_row, to_row):
"""
Reformat text, taking the width into account.
`to_row` is included.
(Vi 'gq' operator.)
"""
lines = buffer.text.splitlines(True)
lines_before = lines[:from_row]
lines_after = lines[to_row + 1:]
lines_to_reformat = lines[from_row:to_row + 1]
if lines_to_reformat:
# Take indentation from the first line.
length = re.search(r'^\s*', lines_to_reformat[0]).end()
indent = lines_to_reformat[0][:length].replace('\n', '')
# Now, take all the 'words' from the lines to be reshaped.
words = ''.join(lines_to_reformat).split()
# And reshape.
width = (buffer.text_width or 80) - len(indent)
reshaped_text = [indent]
current_width = 0
for w in words:
if current_width:
if len(w) + current_width + 1 > width:
reshaped_text.append('\n')
reshaped_text.append(indent)
current_width = 0
else:
reshaped_text.append(' ')
current_width += 1
reshaped_text.append(w)
current_width += len(w)
if reshaped_text[-1] != '\n':
reshaped_text.append('\n')
# Apply result.
buffer.document = Document(
text=''.join(lines_before + reshaped_text + lines_after),
cursor_position=len(''.join(lines_before + reshaped_text))) | Reformat text, taking the width into account.
`to_row` is included.
(Vi 'gq' operator.) | Below is the the instruction that describes the task:
### Input:
Reformat text, taking the width into account.
`to_row` is included.
(Vi 'gq' operator.)
### Response:
def reshape_text(buffer, from_row, to_row):
"""
Reformat text, taking the width into account.
`to_row` is included.
(Vi 'gq' operator.)
"""
lines = buffer.text.splitlines(True)
lines_before = lines[:from_row]
lines_after = lines[to_row + 1:]
lines_to_reformat = lines[from_row:to_row + 1]
if lines_to_reformat:
# Take indentation from the first line.
length = re.search(r'^\s*', lines_to_reformat[0]).end()
indent = lines_to_reformat[0][:length].replace('\n', '')
# Now, take all the 'words' from the lines to be reshaped.
words = ''.join(lines_to_reformat).split()
# And reshape.
width = (buffer.text_width or 80) - len(indent)
reshaped_text = [indent]
current_width = 0
for w in words:
if current_width:
if len(w) + current_width + 1 > width:
reshaped_text.append('\n')
reshaped_text.append(indent)
current_width = 0
else:
reshaped_text.append(' ')
current_width += 1
reshaped_text.append(w)
current_width += len(w)
if reshaped_text[-1] != '\n':
reshaped_text.append('\n')
# Apply result.
buffer.document = Document(
text=''.join(lines_before + reshaped_text + lines_after),
cursor_position=len(''.join(lines_before + reshaped_text))) |
def get_recipe_instances_for_badges(self, badges):
"""
Takes a list of badge slugs and returns a tuple: ``(valid, invalid)``.
"""
from .exceptions import BadgeNotFound
valid, invalid = [], []
if not isinstance(badges, (list, tuple)):
badges = [badges]
for badge in badges:
try:
recipe = self.get_recipe_instance(badge)
valid.append(recipe)
except BadgeNotFound:
logger.debug('✘ Badge "%s" has not been registered', badge)
invalid.append(badge)
return (valid, invalid) | Takes a list of badge slugs and returns a tuple: ``(valid, invalid)``. | Below is the the instruction that describes the task:
### Input:
Takes a list of badge slugs and returns a tuple: ``(valid, invalid)``.
### Response:
def get_recipe_instances_for_badges(self, badges):
"""
Takes a list of badge slugs and returns a tuple: ``(valid, invalid)``.
"""
from .exceptions import BadgeNotFound
valid, invalid = [], []
if not isinstance(badges, (list, tuple)):
badges = [badges]
for badge in badges:
try:
recipe = self.get_recipe_instance(badge)
valid.append(recipe)
except BadgeNotFound:
logger.debug('✘ Badge "%s" has not been registered', badge)
invalid.append(badge)
return (valid, invalid) |
def aes_encrypt(self, plain, sec_key, enable_b64=True):
"""
使用 ``aes`` 加密数据, 并由 ``base64编码`` 加密后的数据
- ``sec_key`` 加密 ``msg``, 最后选择 ``是否由base64编码数据``
- msg长度为16位数, 不足则补 'ascii \\0'
.. warning::
msg长度为16位数, 不足则补 'ascii \\0'
:param plain:
:type plain: str
:param sec_key:
:type sec_key: str
:param enable_b64:
:type enable_b64: bool
:return:
:rtype:
"""
plain = helper.to_str(plain)
sec_key = helper.to_str(sec_key)
# 如果msg长度不为16倍数, 需要补位 '\0'
plain += '\0' * (self.bs - len(plain) % self.bs)
# 使用生成的 key, iv 加密
plain = helper.to_bytes(plain)
cipher = self.aes_obj(sec_key).encrypt(plain)
# 是否返回 base64 编码数据
cip = base64.b64encode(cipher) if enable_b64 else cipher
return helper.to_str(cip) | 使用 ``aes`` 加密数据, 并由 ``base64编码`` 加密后的数据
- ``sec_key`` 加密 ``msg``, 最后选择 ``是否由base64编码数据``
- msg长度为16位数, 不足则补 'ascii \\0'
.. warning::
msg长度为16位数, 不足则补 'ascii \\0'
:param plain:
:type plain: str
:param sec_key:
:type sec_key: str
:param enable_b64:
:type enable_b64: bool
:return:
:rtype: | Below is the the instruction that describes the task:
### Input:
使用 ``aes`` 加密数据, 并由 ``base64编码`` 加密后的数据
- ``sec_key`` 加密 ``msg``, 最后选择 ``是否由base64编码数据``
- msg长度为16位数, 不足则补 'ascii \\0'
.. warning::
msg长度为16位数, 不足则补 'ascii \\0'
:param plain:
:type plain: str
:param sec_key:
:type sec_key: str
:param enable_b64:
:type enable_b64: bool
:return:
:rtype:
### Response:
def aes_encrypt(self, plain, sec_key, enable_b64=True):
"""
使用 ``aes`` 加密数据, 并由 ``base64编码`` 加密后的数据
- ``sec_key`` 加密 ``msg``, 最后选择 ``是否由base64编码数据``
- msg长度为16位数, 不足则补 'ascii \\0'
.. warning::
msg长度为16位数, 不足则补 'ascii \\0'
:param plain:
:type plain: str
:param sec_key:
:type sec_key: str
:param enable_b64:
:type enable_b64: bool
:return:
:rtype:
"""
plain = helper.to_str(plain)
sec_key = helper.to_str(sec_key)
# 如果msg长度不为16倍数, 需要补位 '\0'
plain += '\0' * (self.bs - len(plain) % self.bs)
# 使用生成的 key, iv 加密
plain = helper.to_bytes(plain)
cipher = self.aes_obj(sec_key).encrypt(plain)
# 是否返回 base64 编码数据
cip = base64.b64encode(cipher) if enable_b64 else cipher
return helper.to_str(cip) |
def auto_schedule_hosting_devices(self, plugin, context, agent_host):
"""Schedules unassociated hosting devices to Cisco cfg agent.
Schedules hosting devices to agent running on <agent_host>.
"""
query = context.session.query(bc.Agent)
query = query.filter_by(agent_type=c_constants.AGENT_TYPE_CFG,
host=agent_host, admin_state_up=True)
try:
cfg_agent_db = query.one()
except (exc.MultipleResultsFound, exc.NoResultFound):
LOG.debug('No enabled Cisco cfg agent on host %s', agent_host)
return
if cfg_agentschedulers_db.CfgAgentSchedulerDbMixin.is_agent_down(
cfg_agent_db.heartbeat_timestamp):
LOG.warning('Cisco cfg agent %s is not alive',
cfg_agent_db.id)
return cfg_agent_db | Schedules unassociated hosting devices to Cisco cfg agent.
Schedules hosting devices to agent running on <agent_host>. | Below is the the instruction that describes the task:
### Input:
Schedules unassociated hosting devices to Cisco cfg agent.
Schedules hosting devices to agent running on <agent_host>.
### Response:
def auto_schedule_hosting_devices(self, plugin, context, agent_host):
"""Schedules unassociated hosting devices to Cisco cfg agent.
Schedules hosting devices to agent running on <agent_host>.
"""
query = context.session.query(bc.Agent)
query = query.filter_by(agent_type=c_constants.AGENT_TYPE_CFG,
host=agent_host, admin_state_up=True)
try:
cfg_agent_db = query.one()
except (exc.MultipleResultsFound, exc.NoResultFound):
LOG.debug('No enabled Cisco cfg agent on host %s', agent_host)
return
if cfg_agentschedulers_db.CfgAgentSchedulerDbMixin.is_agent_down(
cfg_agent_db.heartbeat_timestamp):
LOG.warning('Cisco cfg agent %s is not alive',
cfg_agent_db.id)
return cfg_agent_db |
def eq_(result, expected, msg=None):
"""
Shadow of the Nose builtin which presents easier to read multiline output.
"""
params = {'expected': expected, 'result': result}
aka = """
--------------------------------- aka -----------------------------------------
Expected:
%(expected)r
Got:
%(result)r
""" % params
default_msg = """
Expected:
%(expected)s
Got:
%(result)s
""" % params
if (
(repr(result) != six.text_type(result)) or
(repr(expected) != six.text_type(expected))
):
default_msg += aka
assertion_msg = msg or default_msg
# This assert will bubble up to Nose's failure handling, which at some
# point calls explicit str() - which will UnicodeDecodeError on any non
# ASCII text.
# To work around this, we make sure Unicode strings become bytestrings
# beforehand, with explicit encode.
if isinstance(assertion_msg, six.text_type):
assertion_msg = assertion_msg.encode('utf-8')
assert result == expected, assertion_msg | Shadow of the Nose builtin which presents easier to read multiline output. | Below is the the instruction that describes the task:
### Input:
Shadow of the Nose builtin which presents easier to read multiline output.
### Response:
def eq_(result, expected, msg=None):
"""
Shadow of the Nose builtin which presents easier to read multiline output.
"""
params = {'expected': expected, 'result': result}
aka = """
--------------------------------- aka -----------------------------------------
Expected:
%(expected)r
Got:
%(result)r
""" % params
default_msg = """
Expected:
%(expected)s
Got:
%(result)s
""" % params
if (
(repr(result) != six.text_type(result)) or
(repr(expected) != six.text_type(expected))
):
default_msg += aka
assertion_msg = msg or default_msg
# This assert will bubble up to Nose's failure handling, which at some
# point calls explicit str() - which will UnicodeDecodeError on any non
# ASCII text.
# To work around this, we make sure Unicode strings become bytestrings
# beforehand, with explicit encode.
if isinstance(assertion_msg, six.text_type):
assertion_msg = assertion_msg.encode('utf-8')
assert result == expected, assertion_msg |
def rgb2gray(image_rgb_array):
"""!
@brief Returns image as 1-dimension (gray colored) matrix, where one element of list describes pixel.
@details Luma coding is used for transformation and that is calculated directly from gamma-compressed primary intensities as a weighted sum:
\f[Y = 0.2989R + 0.587G + 0.114B\f]
@param[in] image_rgb_array (list): Image represented by RGB list.
@return (list) Image as gray colored matrix, where one element of list describes pixel.
@code
colored_image = read_image(file_name);
gray_image = rgb2gray(colored_image);
@endcode
@see read_image()
"""
image_gray_array = [0.0] * len(image_rgb_array);
for index in range(0, len(image_rgb_array), 1):
image_gray_array[index] = float(image_rgb_array[index][0]) * 0.2989 + float(image_rgb_array[index][1]) * 0.5870 + float(image_rgb_array[index][2]) * 0.1140;
return image_gray_array; | !
@brief Returns image as 1-dimension (gray colored) matrix, where one element of list describes pixel.
@details Luma coding is used for transformation and that is calculated directly from gamma-compressed primary intensities as a weighted sum:
\f[Y = 0.2989R + 0.587G + 0.114B\f]
@param[in] image_rgb_array (list): Image represented by RGB list.
@return (list) Image as gray colored matrix, where one element of list describes pixel.
@code
colored_image = read_image(file_name);
gray_image = rgb2gray(colored_image);
@endcode
@see read_image() | Below is the the instruction that describes the task:
### Input:
!
@brief Returns image as 1-dimension (gray colored) matrix, where one element of list describes pixel.
@details Luma coding is used for transformation and that is calculated directly from gamma-compressed primary intensities as a weighted sum:
\f[Y = 0.2989R + 0.587G + 0.114B\f]
@param[in] image_rgb_array (list): Image represented by RGB list.
@return (list) Image as gray colored matrix, where one element of list describes pixel.
@code
colored_image = read_image(file_name);
gray_image = rgb2gray(colored_image);
@endcode
@see read_image()
### Response:
def rgb2gray(image_rgb_array):
"""!
@brief Returns image as 1-dimension (gray colored) matrix, where one element of list describes pixel.
@details Luma coding is used for transformation and that is calculated directly from gamma-compressed primary intensities as a weighted sum:
\f[Y = 0.2989R + 0.587G + 0.114B\f]
@param[in] image_rgb_array (list): Image represented by RGB list.
@return (list) Image as gray colored matrix, where one element of list describes pixel.
@code
colored_image = read_image(file_name);
gray_image = rgb2gray(colored_image);
@endcode
@see read_image()
"""
image_gray_array = [0.0] * len(image_rgb_array);
for index in range(0, len(image_rgb_array), 1):
image_gray_array[index] = float(image_rgb_array[index][0]) * 0.2989 + float(image_rgb_array[index][1]) * 0.5870 + float(image_rgb_array[index][2]) * 0.1140;
return image_gray_array; |
def telegram():
'''Install Telegram desktop client for linux (x64).
More infos:
https://telegram.org
https://desktop.telegram.org/
'''
if not exists('~/bin/Telegram', msg='Download and install Telegram:'):
run('mkdir -p /tmp/telegram')
run('cd /tmp/telegram && wget https://telegram.org/dl/desktop/linux')
run('cd /tmp/telegram && tar xf linux')
with warn_only():
run('mv /tmp/telegram/Telegram ~/bin')
run('rm -rf /tmp/telegram')
else:
print('skip download, dir ~/bin/Telegram already exists')
run('ln -snf ~/bin/Telegram/Telegram ~/bin/telegram',
msg="\nCreate executable 'telegram':") | Install Telegram desktop client for linux (x64).
More infos:
https://telegram.org
https://desktop.telegram.org/ | Below is the the instruction that describes the task:
### Input:
Install Telegram desktop client for linux (x64).
More infos:
https://telegram.org
https://desktop.telegram.org/
### Response:
def telegram():
'''Install Telegram desktop client for linux (x64).
More infos:
https://telegram.org
https://desktop.telegram.org/
'''
if not exists('~/bin/Telegram', msg='Download and install Telegram:'):
run('mkdir -p /tmp/telegram')
run('cd /tmp/telegram && wget https://telegram.org/dl/desktop/linux')
run('cd /tmp/telegram && tar xf linux')
with warn_only():
run('mv /tmp/telegram/Telegram ~/bin')
run('rm -rf /tmp/telegram')
else:
print('skip download, dir ~/bin/Telegram already exists')
run('ln -snf ~/bin/Telegram/Telegram ~/bin/telegram',
msg="\nCreate executable 'telegram':") |
def set_fan_mode(self, mode):
"""Set the fan mode"""
self.set_service_value(
self.thermostat_fan_service,
'Mode',
'NewMode',
mode)
self.set_cache_value('fanmode', mode) | Set the fan mode | Below is the the instruction that describes the task:
### Input:
Set the fan mode
### Response:
def set_fan_mode(self, mode):
"""Set the fan mode"""
self.set_service_value(
self.thermostat_fan_service,
'Mode',
'NewMode',
mode)
self.set_cache_value('fanmode', mode) |
def fit_polynomial(pixel_data, mask, clip=True):
'''Return an "image" which is a polynomial fit to the pixel data
Fit the image to the polynomial Ax**2+By**2+Cxy+Dx+Ey+F
pixel_data - a two-dimensional numpy array to be fitted
mask - a mask of pixels whose intensities should be considered in the
least squares fit
clip - if True, clip the output array so that pixels less than zero
in the fitted image are zero and pixels that are greater than
one are one.
'''
mask = np.logical_and(mask,pixel_data > 0)
if not np.any(mask):
return pixel_data
x,y = np.mgrid[0:pixel_data.shape[0],0:pixel_data.shape[1]]
x2 = x*x
y2 = y*y
xy = x*y
o = np.ones(pixel_data.shape)
a = np.array([x[mask],y[mask],x2[mask],y2[mask],xy[mask],o[mask]])
coeffs = scipy.linalg.lstsq(a.transpose(),pixel_data[mask])[0]
output_pixels = np.sum([coeff * index for coeff, index in
zip(coeffs, [x,y,x2,y2,xy,o])],0)
if clip:
output_pixels[output_pixels > 1] = 1
output_pixels[output_pixels < 0] = 0
return output_pixels | Return an "image" which is a polynomial fit to the pixel data
Fit the image to the polynomial Ax**2+By**2+Cxy+Dx+Ey+F
pixel_data - a two-dimensional numpy array to be fitted
mask - a mask of pixels whose intensities should be considered in the
least squares fit
clip - if True, clip the output array so that pixels less than zero
in the fitted image are zero and pixels that are greater than
one are one. | Below is the the instruction that describes the task:
### Input:
Return an "image" which is a polynomial fit to the pixel data
Fit the image to the polynomial Ax**2+By**2+Cxy+Dx+Ey+F
pixel_data - a two-dimensional numpy array to be fitted
mask - a mask of pixels whose intensities should be considered in the
least squares fit
clip - if True, clip the output array so that pixels less than zero
in the fitted image are zero and pixels that are greater than
one are one.
### Response:
def fit_polynomial(pixel_data, mask, clip=True):
'''Return an "image" which is a polynomial fit to the pixel data
Fit the image to the polynomial Ax**2+By**2+Cxy+Dx+Ey+F
pixel_data - a two-dimensional numpy array to be fitted
mask - a mask of pixels whose intensities should be considered in the
least squares fit
clip - if True, clip the output array so that pixels less than zero
in the fitted image are zero and pixels that are greater than
one are one.
'''
mask = np.logical_and(mask,pixel_data > 0)
if not np.any(mask):
return pixel_data
x,y = np.mgrid[0:pixel_data.shape[0],0:pixel_data.shape[1]]
x2 = x*x
y2 = y*y
xy = x*y
o = np.ones(pixel_data.shape)
a = np.array([x[mask],y[mask],x2[mask],y2[mask],xy[mask],o[mask]])
coeffs = scipy.linalg.lstsq(a.transpose(),pixel_data[mask])[0]
output_pixels = np.sum([coeff * index for coeff, index in
zip(coeffs, [x,y,x2,y2,xy,o])],0)
if clip:
output_pixels[output_pixels > 1] = 1
output_pixels[output_pixels < 0] = 0
return output_pixels |
def assemble(
iterable, patterns=None, minimum_items=2, case_sensitive=True,
assume_padded_when_ambiguous=False
):
'''Assemble items in *iterable* into discreet collections.
*patterns* may be specified as a list of regular expressions to limit
the returned collection possibilities. Use this when interested in
collections that only match specific patterns. Each pattern must contain
the expression from :py:data:`DIGITS_PATTERN` exactly once.
A selection of common expressions are available in :py:data:`PATTERNS`.
.. note::
If a pattern is supplied as a string it will be automatically compiled
to a :py:class:`re.RegexObject` instance for convenience.
When *patterns* is not specified, collections are formed by examining all
possible groupings of the items in *iterable* based around common numerical
components.
*minimum_items* dictates the minimum number of items a collection must have
in order to be included in the result. The default is 2, filtering out
single item collections.
If *case_sensitive* is False, then items will be treated as part of the same
collection when they only differ in casing. To avoid ambiguity, the
resulting collection will always be lowercase. For example, "item.0001.dpx"
and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx".
.. note::
Any compiled *patterns* will also respect the set case sensitivity.
For certain collections it may be ambiguous whether they are padded or not.
For example, 1000-1010 can be considered either an unpadded collection or a
four padded collection. By default, Clique is conservative and assumes that
the collection is unpadded. To change this behaviour, set
*assume_padded_when_ambiguous* to True and any ambiguous collection will have
a relevant padding set.
.. note::
*assume_padded_when_ambiguous* has no effect on collections that are
unambiguous. For example, 1-100 will always be considered unpadded
regardless of the *assume_padded_when_ambiguous* setting.
Return tuple of two lists (collections, remainder) where 'collections' is a
list of assembled :py:class:`~clique.collection.Collection` instances and
'remainder' is a list of items that did not belong to any collection.
'''
collection_map = defaultdict(set)
collections = []
remainder = []
# Compile patterns.
flags = 0
if not case_sensitive:
flags |= re.IGNORECASE
compiled_patterns = []
if patterns is not None:
if not patterns:
return collections, list(iterable)
for pattern in patterns:
if isinstance(pattern, basestring):
compiled_patterns.append(re.compile(pattern, flags=flags))
else:
compiled_patterns.append(pattern)
else:
compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags))
# Process iterable.
for item in iterable:
matched = False
for pattern in compiled_patterns:
for match in pattern.finditer(item):
index = match.group('index')
head = item[:match.start('index')]
tail = item[match.end('index'):]
if not case_sensitive:
head = head.lower()
tail = tail.lower()
padding = match.group('padding')
if padding:
padding = len(index)
else:
padding = 0
key = (head, tail, padding)
collection_map[key].add(int(index))
matched = True
if not matched:
remainder.append(item)
# Form collections.
merge_candidates = []
for (head, tail, padding), indexes in collection_map.items():
collection = Collection(head, tail, padding, indexes)
collections.append(collection)
if collection.padding == 0:
merge_candidates.append(collection)
# Merge together collections that align on padding boundaries. For example,
# 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only
# indexes within the padding width limit are merged. If a collection is
# entirely merged into another then it will not be included as a separate
# collection in the results.
fully_merged = []
for collection in collections:
if collection.padding == 0:
continue
for candidate in merge_candidates:
if (
candidate.head == collection.head and
candidate.tail == collection.tail
):
merged_index_count = 0
for index in candidate.indexes:
if len(str(abs(index))) == collection.padding:
collection.indexes.add(index)
merged_index_count += 1
if merged_index_count == len(candidate.indexes):
fully_merged.append(candidate)
# Filter out fully merged collections.
collections = [collection for collection in collections
if collection not in fully_merged]
# Filter out collections that do not have at least as many indexes as
# minimum_items. In addition, add any members of a filtered collection,
# which are not members of an unfiltered collection, to the remainder.
filtered = []
remainder_candidates = []
for collection in collections:
if len(collection.indexes) >= minimum_items:
filtered.append(collection)
else:
for member in collection:
remainder_candidates.append(member)
for candidate in remainder_candidates:
# Check if candidate has already been added to remainder to avoid
# duplicate entries.
if candidate in remainder:
continue
has_membership = False
for collection in filtered:
if candidate in collection:
has_membership = True
break
if not has_membership:
remainder.append(candidate)
# Set padding for all ambiguous collections according to the
# assume_padded_when_ambiguous setting.
if assume_padded_when_ambiguous:
for collection in filtered:
if (
not collection.padding and collection.indexes
):
indexes = list(collection.indexes)
first_index_width = len(str(indexes[0]))
last_index_width = len(str(indexes[-1]))
if first_index_width == last_index_width:
collection.padding = first_index_width
return filtered, remainder | Assemble items in *iterable* into discreet collections.
*patterns* may be specified as a list of regular expressions to limit
the returned collection possibilities. Use this when interested in
collections that only match specific patterns. Each pattern must contain
the expression from :py:data:`DIGITS_PATTERN` exactly once.
A selection of common expressions are available in :py:data:`PATTERNS`.
.. note::
If a pattern is supplied as a string it will be automatically compiled
to a :py:class:`re.RegexObject` instance for convenience.
When *patterns* is not specified, collections are formed by examining all
possible groupings of the items in *iterable* based around common numerical
components.
*minimum_items* dictates the minimum number of items a collection must have
in order to be included in the result. The default is 2, filtering out
single item collections.
If *case_sensitive* is False, then items will be treated as part of the same
collection when they only differ in casing. To avoid ambiguity, the
resulting collection will always be lowercase. For example, "item.0001.dpx"
and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx".
.. note::
Any compiled *patterns* will also respect the set case sensitivity.
For certain collections it may be ambiguous whether they are padded or not.
For example, 1000-1010 can be considered either an unpadded collection or a
four padded collection. By default, Clique is conservative and assumes that
the collection is unpadded. To change this behaviour, set
*assume_padded_when_ambiguous* to True and any ambiguous collection will have
a relevant padding set.
.. note::
*assume_padded_when_ambiguous* has no effect on collections that are
unambiguous. For example, 1-100 will always be considered unpadded
regardless of the *assume_padded_when_ambiguous* setting.
Return tuple of two lists (collections, remainder) where 'collections' is a
list of assembled :py:class:`~clique.collection.Collection` instances and
'remainder' is a list of items that did not belong to any collection. | Below is the the instruction that describes the task:
### Input:
Assemble items in *iterable* into discreet collections.
*patterns* may be specified as a list of regular expressions to limit
the returned collection possibilities. Use this when interested in
collections that only match specific patterns. Each pattern must contain
the expression from :py:data:`DIGITS_PATTERN` exactly once.
A selection of common expressions are available in :py:data:`PATTERNS`.
.. note::
If a pattern is supplied as a string it will be automatically compiled
to a :py:class:`re.RegexObject` instance for convenience.
When *patterns* is not specified, collections are formed by examining all
possible groupings of the items in *iterable* based around common numerical
components.
*minimum_items* dictates the minimum number of items a collection must have
in order to be included in the result. The default is 2, filtering out
single item collections.
If *case_sensitive* is False, then items will be treated as part of the same
collection when they only differ in casing. To avoid ambiguity, the
resulting collection will always be lowercase. For example, "item.0001.dpx"
and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx".
.. note::
Any compiled *patterns* will also respect the set case sensitivity.
For certain collections it may be ambiguous whether they are padded or not.
For example, 1000-1010 can be considered either an unpadded collection or a
four padded collection. By default, Clique is conservative and assumes that
the collection is unpadded. To change this behaviour, set
*assume_padded_when_ambiguous* to True and any ambiguous collection will have
a relevant padding set.
.. note::
*assume_padded_when_ambiguous* has no effect on collections that are
unambiguous. For example, 1-100 will always be considered unpadded
regardless of the *assume_padded_when_ambiguous* setting.
Return tuple of two lists (collections, remainder) where 'collections' is a
list of assembled :py:class:`~clique.collection.Collection` instances and
'remainder' is a list of items that did not belong to any collection.
### Response:
def assemble(
iterable, patterns=None, minimum_items=2, case_sensitive=True,
assume_padded_when_ambiguous=False
):
'''Assemble items in *iterable* into discreet collections.
*patterns* may be specified as a list of regular expressions to limit
the returned collection possibilities. Use this when interested in
collections that only match specific patterns. Each pattern must contain
the expression from :py:data:`DIGITS_PATTERN` exactly once.
A selection of common expressions are available in :py:data:`PATTERNS`.
.. note::
If a pattern is supplied as a string it will be automatically compiled
to a :py:class:`re.RegexObject` instance for convenience.
When *patterns* is not specified, collections are formed by examining all
possible groupings of the items in *iterable* based around common numerical
components.
*minimum_items* dictates the minimum number of items a collection must have
in order to be included in the result. The default is 2, filtering out
single item collections.
If *case_sensitive* is False, then items will be treated as part of the same
collection when they only differ in casing. To avoid ambiguity, the
resulting collection will always be lowercase. For example, "item.0001.dpx"
and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx".
.. note::
Any compiled *patterns* will also respect the set case sensitivity.
For certain collections it may be ambiguous whether they are padded or not.
For example, 1000-1010 can be considered either an unpadded collection or a
four padded collection. By default, Clique is conservative and assumes that
the collection is unpadded. To change this behaviour, set
*assume_padded_when_ambiguous* to True and any ambiguous collection will have
a relevant padding set.
.. note::
*assume_padded_when_ambiguous* has no effect on collections that are
unambiguous. For example, 1-100 will always be considered unpadded
regardless of the *assume_padded_when_ambiguous* setting.
Return tuple of two lists (collections, remainder) where 'collections' is a
list of assembled :py:class:`~clique.collection.Collection` instances and
'remainder' is a list of items that did not belong to any collection.
'''
collection_map = defaultdict(set)
collections = []
remainder = []
# Compile patterns.
flags = 0
if not case_sensitive:
flags |= re.IGNORECASE
compiled_patterns = []
if patterns is not None:
if not patterns:
return collections, list(iterable)
for pattern in patterns:
if isinstance(pattern, basestring):
compiled_patterns.append(re.compile(pattern, flags=flags))
else:
compiled_patterns.append(pattern)
else:
compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags))
# Process iterable.
for item in iterable:
matched = False
for pattern in compiled_patterns:
for match in pattern.finditer(item):
index = match.group('index')
head = item[:match.start('index')]
tail = item[match.end('index'):]
if not case_sensitive:
head = head.lower()
tail = tail.lower()
padding = match.group('padding')
if padding:
padding = len(index)
else:
padding = 0
key = (head, tail, padding)
collection_map[key].add(int(index))
matched = True
if not matched:
remainder.append(item)
# Form collections.
merge_candidates = []
for (head, tail, padding), indexes in collection_map.items():
collection = Collection(head, tail, padding, indexes)
collections.append(collection)
if collection.padding == 0:
merge_candidates.append(collection)
# Merge together collections that align on padding boundaries. For example,
# 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only
# indexes within the padding width limit are merged. If a collection is
# entirely merged into another then it will not be included as a separate
# collection in the results.
fully_merged = []
for collection in collections:
if collection.padding == 0:
continue
for candidate in merge_candidates:
if (
candidate.head == collection.head and
candidate.tail == collection.tail
):
merged_index_count = 0
for index in candidate.indexes:
if len(str(abs(index))) == collection.padding:
collection.indexes.add(index)
merged_index_count += 1
if merged_index_count == len(candidate.indexes):
fully_merged.append(candidate)
# Filter out fully merged collections.
collections = [collection for collection in collections
if collection not in fully_merged]
# Filter out collections that do not have at least as many indexes as
# minimum_items. In addition, add any members of a filtered collection,
# which are not members of an unfiltered collection, to the remainder.
filtered = []
remainder_candidates = []
for collection in collections:
if len(collection.indexes) >= minimum_items:
filtered.append(collection)
else:
for member in collection:
remainder_candidates.append(member)
for candidate in remainder_candidates:
# Check if candidate has already been added to remainder to avoid
# duplicate entries.
if candidate in remainder:
continue
has_membership = False
for collection in filtered:
if candidate in collection:
has_membership = True
break
if not has_membership:
remainder.append(candidate)
# Set padding for all ambiguous collections according to the
# assume_padded_when_ambiguous setting.
if assume_padded_when_ambiguous:
for collection in filtered:
if (
not collection.padding and collection.indexes
):
indexes = list(collection.indexes)
first_index_width = len(str(indexes[0]))
last_index_width = len(str(indexes[-1]))
if first_index_width == last_index_width:
collection.padding = first_index_width
return filtered, remainder |
def find_lt(array, x):
"""
Find rightmost value less than x.
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
Example::
>>> find_lt([0, 1, 2, 3], 2.5)
2
**中文文档**
寻找最大的小于x的数。
"""
i = bisect.bisect_left(array, x)
if i:
return array[i - 1]
raise ValueError | Find rightmost value less than x.
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
Example::
>>> find_lt([0, 1, 2, 3], 2.5)
2
**中文文档**
寻找最大的小于x的数。 | Below is the the instruction that describes the task:
### Input:
Find rightmost value less than x.
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
Example::
>>> find_lt([0, 1, 2, 3], 2.5)
2
**中文文档**
寻找最大的小于x的数。
### Response:
def find_lt(array, x):
"""
Find rightmost value less than x.
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
Example::
>>> find_lt([0, 1, 2, 3], 2.5)
2
**中文文档**
寻找最大的小于x的数。
"""
i = bisect.bisect_left(array, x)
if i:
return array[i - 1]
raise ValueError |
def serial_udb_extra_f5_send(self, sue_YAWKP_AILERON, sue_YAWKD_AILERON, sue_ROLLKP, sue_ROLLKD, sue_YAW_STABILIZATION_AILERON, sue_AILERON_BOOST, force_mavlink1=False):
'''
Backwards compatible version of SERIAL_UDB_EXTRA F5: format
sue_YAWKP_AILERON : Serial UDB YAWKP_AILERON Gain for Proporional control of navigation (float)
sue_YAWKD_AILERON : Serial UDB YAWKD_AILERON Gain for Rate control of navigation (float)
sue_ROLLKP : Serial UDB Extra ROLLKP Gain for Proportional control of roll stabilization (float)
sue_ROLLKD : Serial UDB Extra ROLLKD Gain for Rate control of roll stabilization (float)
sue_YAW_STABILIZATION_AILERON : YAW_STABILIZATION_AILERON Proportional control (float)
sue_AILERON_BOOST : Gain For Boosting Manual Aileron control When Plane Stabilized (float)
'''
return self.send(self.serial_udb_extra_f5_encode(sue_YAWKP_AILERON, sue_YAWKD_AILERON, sue_ROLLKP, sue_ROLLKD, sue_YAW_STABILIZATION_AILERON, sue_AILERON_BOOST), force_mavlink1=force_mavlink1) | Backwards compatible version of SERIAL_UDB_EXTRA F5: format
sue_YAWKP_AILERON : Serial UDB YAWKP_AILERON Gain for Proporional control of navigation (float)
sue_YAWKD_AILERON : Serial UDB YAWKD_AILERON Gain for Rate control of navigation (float)
sue_ROLLKP : Serial UDB Extra ROLLKP Gain for Proportional control of roll stabilization (float)
sue_ROLLKD : Serial UDB Extra ROLLKD Gain for Rate control of roll stabilization (float)
sue_YAW_STABILIZATION_AILERON : YAW_STABILIZATION_AILERON Proportional control (float)
sue_AILERON_BOOST : Gain For Boosting Manual Aileron control When Plane Stabilized (float) | Below is the the instruction that describes the task:
### Input:
Backwards compatible version of SERIAL_UDB_EXTRA F5: format
sue_YAWKP_AILERON : Serial UDB YAWKP_AILERON Gain for Proporional control of navigation (float)
sue_YAWKD_AILERON : Serial UDB YAWKD_AILERON Gain for Rate control of navigation (float)
sue_ROLLKP : Serial UDB Extra ROLLKP Gain for Proportional control of roll stabilization (float)
sue_ROLLKD : Serial UDB Extra ROLLKD Gain for Rate control of roll stabilization (float)
sue_YAW_STABILIZATION_AILERON : YAW_STABILIZATION_AILERON Proportional control (float)
sue_AILERON_BOOST : Gain For Boosting Manual Aileron control When Plane Stabilized (float)
### Response:
def serial_udb_extra_f5_send(self, sue_YAWKP_AILERON, sue_YAWKD_AILERON, sue_ROLLKP, sue_ROLLKD, sue_YAW_STABILIZATION_AILERON, sue_AILERON_BOOST, force_mavlink1=False):
'''
Backwards compatible version of SERIAL_UDB_EXTRA F5: format
sue_YAWKP_AILERON : Serial UDB YAWKP_AILERON Gain for Proporional control of navigation (float)
sue_YAWKD_AILERON : Serial UDB YAWKD_AILERON Gain for Rate control of navigation (float)
sue_ROLLKP : Serial UDB Extra ROLLKP Gain for Proportional control of roll stabilization (float)
sue_ROLLKD : Serial UDB Extra ROLLKD Gain for Rate control of roll stabilization (float)
sue_YAW_STABILIZATION_AILERON : YAW_STABILIZATION_AILERON Proportional control (float)
sue_AILERON_BOOST : Gain For Boosting Manual Aileron control When Plane Stabilized (float)
'''
return self.send(self.serial_udb_extra_f5_encode(sue_YAWKP_AILERON, sue_YAWKD_AILERON, sue_ROLLKP, sue_ROLLKD, sue_YAW_STABILIZATION_AILERON, sue_AILERON_BOOST), force_mavlink1=force_mavlink1) |
def search(self, query, page=0, order=7, category=0, multipage=False):
"""
Searches TPB for query and returns a list of paginated Torrents capable
of changing query, categories and orders.
"""
search = Search(self.base_url, query, page, order, category)
if multipage:
search.multipage()
return search | Searches TPB for query and returns a list of paginated Torrents capable
of changing query, categories and orders. | Below is the the instruction that describes the task:
### Input:
Searches TPB for query and returns a list of paginated Torrents capable
of changing query, categories and orders.
### Response:
def search(self, query, page=0, order=7, category=0, multipage=False):
"""
Searches TPB for query and returns a list of paginated Torrents capable
of changing query, categories and orders.
"""
search = Search(self.base_url, query, page, order, category)
if multipage:
search.multipage()
return search |
def search(self, category, term='', index=0, count=100):
"""Search for an item in a category.
Args:
category (str): The search category to use. Standard Sonos search
categories are 'artists', 'albums', 'tracks', 'playlists',
'genres', 'stations', 'tags'. Not all are available for each
music service. Call available_search_categories for a list for
this service.
term (str): The term to search for.
index (int): The starting index. Default 0.
count (int): The maximum number of items to return. Default 100.
Returns:
~collections.OrderedDict: The search results, or `None`.
See also:
The Sonos `search API <http://musicpartners.sonos.com/node/86>`_
"""
search_category = self._get_search_prefix_map().get(category, None)
if search_category is None:
raise MusicServiceException(
"%s does not support the '%s' search category" % (
self.service_name, category))
response = self.soap_client.call(
'search',
[
('id', search_category), ('term', term), ('index', index),
('count', count)])
return parse_response(self, response, category) | Search for an item in a category.
Args:
category (str): The search category to use. Standard Sonos search
categories are 'artists', 'albums', 'tracks', 'playlists',
'genres', 'stations', 'tags'. Not all are available for each
music service. Call available_search_categories for a list for
this service.
term (str): The term to search for.
index (int): The starting index. Default 0.
count (int): The maximum number of items to return. Default 100.
Returns:
~collections.OrderedDict: The search results, or `None`.
See also:
The Sonos `search API <http://musicpartners.sonos.com/node/86>`_ | Below is the the instruction that describes the task:
### Input:
Search for an item in a category.
Args:
category (str): The search category to use. Standard Sonos search
categories are 'artists', 'albums', 'tracks', 'playlists',
'genres', 'stations', 'tags'. Not all are available for each
music service. Call available_search_categories for a list for
this service.
term (str): The term to search for.
index (int): The starting index. Default 0.
count (int): The maximum number of items to return. Default 100.
Returns:
~collections.OrderedDict: The search results, or `None`.
See also:
The Sonos `search API <http://musicpartners.sonos.com/node/86>`_
### Response:
def search(self, category, term='', index=0, count=100):
"""Search for an item in a category.
Args:
category (str): The search category to use. Standard Sonos search
categories are 'artists', 'albums', 'tracks', 'playlists',
'genres', 'stations', 'tags'. Not all are available for each
music service. Call available_search_categories for a list for
this service.
term (str): The term to search for.
index (int): The starting index. Default 0.
count (int): The maximum number of items to return. Default 100.
Returns:
~collections.OrderedDict: The search results, or `None`.
See also:
The Sonos `search API <http://musicpartners.sonos.com/node/86>`_
"""
search_category = self._get_search_prefix_map().get(category, None)
if search_category is None:
raise MusicServiceException(
"%s does not support the '%s' search category" % (
self.service_name, category))
response = self.soap_client.call(
'search',
[
('id', search_category), ('term', term), ('index', index),
('count', count)])
return parse_response(self, response, category) |
def image_file(path=None, zscript='self[1:]', xscript='[0,1]', yscript='d[0]', g=None, **kwargs):
"""
Loads an data file and plots it with color. Data file must have columns of the
same length!
Parameters
----------
path=None
Path to data file.
zscript='self[1:]'
Determines how to get data from the columns
xscript='[0,1]', yscript='d[0]'
Determine the x and y arrays used for setting the axes bounds
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.image.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
if 'delimiter' in kwargs: delimiter = kwargs.pop('delimiter')
else: delimiter = None
d = _data.load(paths=path, delimiter = delimiter)
if d is None or len(d) == 0: return
# allows the user to overwrite the defaults
default_kwargs = dict(xlabel = str(xscript),
ylabel = str(yscript),
title = d.path,
clabel = str(zscript))
default_kwargs.update(kwargs)
# get the data
X = d(xscript, g)
Y = d(yscript, g)
Z = _n.array(d(zscript, g))
# Z = Z.transpose()
# plot!
image_data(Z, X, Y, **default_kwargs) | Loads an data file and plots it with color. Data file must have columns of the
same length!
Parameters
----------
path=None
Path to data file.
zscript='self[1:]'
Determines how to get data from the columns
xscript='[0,1]', yscript='d[0]'
Determine the x and y arrays used for setting the axes bounds
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.image.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts. | Below is the the instruction that describes the task:
### Input:
Loads an data file and plots it with color. Data file must have columns of the
same length!
Parameters
----------
path=None
Path to data file.
zscript='self[1:]'
Determines how to get data from the columns
xscript='[0,1]', yscript='d[0]'
Determine the x and y arrays used for setting the axes bounds
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.image.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
### Response:
def image_file(path=None, zscript='self[1:]', xscript='[0,1]', yscript='d[0]', g=None, **kwargs):
"""
Loads an data file and plots it with color. Data file must have columns of the
same length!
Parameters
----------
path=None
Path to data file.
zscript='self[1:]'
Determines how to get data from the columns
xscript='[0,1]', yscript='d[0]'
Determine the x and y arrays used for setting the axes bounds
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.image.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
if 'delimiter' in kwargs: delimiter = kwargs.pop('delimiter')
else: delimiter = None
d = _data.load(paths=path, delimiter = delimiter)
if d is None or len(d) == 0: return
# allows the user to overwrite the defaults
default_kwargs = dict(xlabel = str(xscript),
ylabel = str(yscript),
title = d.path,
clabel = str(zscript))
default_kwargs.update(kwargs)
# get the data
X = d(xscript, g)
Y = d(yscript, g)
Z = _n.array(d(zscript, g))
# Z = Z.transpose()
# plot!
image_data(Z, X, Y, **default_kwargs) |
def trylock(self):
"Try to acquire lock and return True; if cannot acquire the lock at this moment, return False."
if self.locked:
return True
if self.lockroutine:
return False
waiter = self.scheduler.send(LockEvent(self.context, self.key, self))
if waiter:
return False
else:
self.locked = True
return True | Try to acquire lock and return True; if cannot acquire the lock at this moment, return False. | Below is the the instruction that describes the task:
### Input:
Try to acquire lock and return True; if cannot acquire the lock at this moment, return False.
### Response:
def trylock(self):
"Try to acquire lock and return True; if cannot acquire the lock at this moment, return False."
if self.locked:
return True
if self.lockroutine:
return False
waiter = self.scheduler.send(LockEvent(self.context, self.key, self))
if waiter:
return False
else:
self.locked = True
return True |
def newDocProp(self, name, value):
"""Create a new property carried by a document. """
ret = libxml2mod.xmlNewDocProp(self._o, name, value)
if ret is None:raise treeError('xmlNewDocProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp | Create a new property carried by a document. | Below is the the instruction that describes the task:
### Input:
Create a new property carried by a document.
### Response:
def newDocProp(self, name, value):
"""Create a new property carried by a document. """
ret = libxml2mod.xmlNewDocProp(self._o, name, value)
if ret is None:raise treeError('xmlNewDocProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp |
def execute_command(self, verb, verb_arguments):
"""Executes command (ex. add) via a dedicated http object.
Async APIs may take minutes to complete. Therefore, callers are
encouraged to leverage concurrent.futures (or similar) to place long
running commands on a separate threads.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to _build_request.
Returns:
dict: An async operation Service Response.
"""
request = self._build_request(verb, verb_arguments)
return self._execute(request) | Executes command (ex. add) via a dedicated http object.
Async APIs may take minutes to complete. Therefore, callers are
encouraged to leverage concurrent.futures (or similar) to place long
running commands on a separate threads.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to _build_request.
Returns:
dict: An async operation Service Response. | Below is the the instruction that describes the task:
### Input:
Executes command (ex. add) via a dedicated http object.
Async APIs may take minutes to complete. Therefore, callers are
encouraged to leverage concurrent.futures (or similar) to place long
running commands on a separate threads.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to _build_request.
Returns:
dict: An async operation Service Response.
### Response:
def execute_command(self, verb, verb_arguments):
"""Executes command (ex. add) via a dedicated http object.
Async APIs may take minutes to complete. Therefore, callers are
encouraged to leverage concurrent.futures (or similar) to place long
running commands on a separate threads.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to _build_request.
Returns:
dict: An async operation Service Response.
"""
request = self._build_request(verb, verb_arguments)
return self._execute(request) |
def load_rocstories_dataset(dataset_path):
""" Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
with open(dataset_path, encoding='utf_8') as f:
f = csv.reader(f)
output = []
next(f) # skip the first line
for line in tqdm(f):
output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1))
return output | Output a list of tuples(story, 1st continuation, 2nd continuation, label) | Below is the the instruction that describes the task:
### Input:
Output a list of tuples(story, 1st continuation, 2nd continuation, label)
### Response:
def load_rocstories_dataset(dataset_path):
""" Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
with open(dataset_path, encoding='utf_8') as f:
f = csv.reader(f)
output = []
next(f) # skip the first line
for line in tqdm(f):
output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1))
return output |
def as_table(self, name=None):
""" Return an alias to a table
"""
if name is None:
name = self._id
return alias(self.subquery(), name=name) | Return an alias to a table | Below is the the instruction that describes the task:
### Input:
Return an alias to a table
### Response:
def as_table(self, name=None):
""" Return an alias to a table
"""
if name is None:
name = self._id
return alias(self.subquery(), name=name) |
def load_directory(self, directory, ext=None):
"""Load RiveScript documents from a directory.
:param str directory: The directory of RiveScript documents to load
replies from.
:param []str ext: List of file extensions to consider as RiveScript
documents. The default is ``[".rive", ".rs"]``.
"""
self._say("Loading from directory: " + directory)
if ext is None:
# Use the default extensions - .rive is preferable.
ext = ['.rive', '.rs']
elif type(ext) == str:
# Backwards compatibility for ext being a string value.
ext = [ext]
if not os.path.isdir(directory):
self._warn("Error: " + directory + " is not a directory.")
return
for root, subdirs, files in os.walk(directory):
for file in files:
for extension in ext:
if file.lower().endswith(extension):
# Load this file.
self.load_file(os.path.join(root, file))
break | Load RiveScript documents from a directory.
:param str directory: The directory of RiveScript documents to load
replies from.
:param []str ext: List of file extensions to consider as RiveScript
documents. The default is ``[".rive", ".rs"]``. | Below is the the instruction that describes the task:
### Input:
Load RiveScript documents from a directory.
:param str directory: The directory of RiveScript documents to load
replies from.
:param []str ext: List of file extensions to consider as RiveScript
documents. The default is ``[".rive", ".rs"]``.
### Response:
def load_directory(self, directory, ext=None):
"""Load RiveScript documents from a directory.
:param str directory: The directory of RiveScript documents to load
replies from.
:param []str ext: List of file extensions to consider as RiveScript
documents. The default is ``[".rive", ".rs"]``.
"""
self._say("Loading from directory: " + directory)
if ext is None:
# Use the default extensions - .rive is preferable.
ext = ['.rive', '.rs']
elif type(ext) == str:
# Backwards compatibility for ext being a string value.
ext = [ext]
if not os.path.isdir(directory):
self._warn("Error: " + directory + " is not a directory.")
return
for root, subdirs, files in os.walk(directory):
for file in files:
for extension in ext:
if file.lower().endswith(extension):
# Load this file.
self.load_file(os.path.join(root, file))
break |
def reply_regexp(self, user, regexp):
"""Prepares a trigger for the regular expression engine.
:param str user: The user ID invoking a reply.
:param str regexp: The original trigger text to be turned into a regexp.
:return regexp: The final regexp object."""
if regexp in self.master._regexc["trigger"]:
# Already compiled this one!
return self.master._regexc["trigger"][regexp]
# If the trigger is simply '*' then the * there needs to become (.*?)
# to match the blank string too.
regexp = re.sub(RE.zero_star, r'<zerowidthstar>', regexp)
# Filter in arrays.
arrays = re.findall(RE.array, regexp)
for array in arrays:
rep = ''
if array in self.master._array:
rep = r'(?:' + '|'.join(self.expand_array(array)) + ')'
regexp = re.sub(r'\@' + re.escape(array) + r'\b', rep, regexp)
# Simple replacements.
regexp = regexp.replace('*', '(.+?)') # Convert * into (.+?)
regexp = regexp.replace('#', '(\d+?)') # Convert # into (\d+?)
regexp = regexp.replace('_', '(\w+?)') # Convert _ into (\w+?)
regexp = re.sub(RE.weight, '', regexp) # Remove {weight} tags, allow spaces before the bracket
regexp = regexp.replace('<zerowidthstar>', r'(.*?)')
# Optionals.
optionals = re.findall(RE.optionals, regexp)
for match in optionals:
parts = match.split("|")
new = []
for p in parts:
p = r'(?:\\s|\\b)+{}(?:\\s|\\b)+'.format(p.strip())
new.append(p)
# If this optional had a star or anything in it, make it
# non-matching.
pipes = '|'.join(new)
pipes = pipes.replace(r'(.+?)', r'(?:.+?)')
pipes = pipes.replace(r'(\d+?)', r'(?:\d+?)')
pipes = pipes.replace(r'([A-Za-z]+?)', r'(?:[A-Za-z]+?)')
regexp = re.sub(r'\s*\[' + re.escape(match) + '\]\s*',
'(?:' + pipes + r'|(?:\\s|\\b))', regexp)
# _ wildcards can't match numbers!
regexp = re.sub(RE.literal_w, r'[^\\s\\d]', regexp)
# Filter in bot variables.
bvars = re.findall(RE.bot_tag, regexp)
for var in bvars:
rep = ''
if var in self.master._var:
rep = self.format_message(self.master._var[var])
regexp = regexp.replace('<bot {var}>'.format(var=var), rep)
# Filter in user variables.
uvars = re.findall(RE.get_tag, regexp)
for var in uvars:
rep = ''
value = self.master.get_uservar(user, var)
if value not in [None, "undefined"]:
rep = utils.strip_nasties(value)
regexp = regexp.replace('<get {var}>'.format(var=var), rep)
# Filter in <input> and <reply> tags. This is a slow process, so only
# do it if we have to!
if '<input' in regexp or '<reply' in regexp:
history = self.master.get_uservar(user, "__history__")
for type in ['input', 'reply']:
tags = re.findall(r'<' + type + r'([0-9])>', regexp)
for index in tags:
rep = self.format_message(history[type][int(index) - 1])
regexp = regexp.replace('<{type}{index}>'.format(type=type, index=index), rep)
regexp = regexp.replace('<{type}>'.format(type=type),
self.format_message(history[type][0]))
# TODO: the Perl version doesn't do just <input>/<reply> in trigs!
if self.utf8:
return re.compile(r'^' + regexp.lower() + r'$', re.UNICODE)
else:
return re.compile(r'^' + regexp.lower() + r'$') | Prepares a trigger for the regular expression engine.
:param str user: The user ID invoking a reply.
:param str regexp: The original trigger text to be turned into a regexp.
:return regexp: The final regexp object. | Below is the the instruction that describes the task:
### Input:
Prepares a trigger for the regular expression engine.
:param str user: The user ID invoking a reply.
:param str regexp: The original trigger text to be turned into a regexp.
:return regexp: The final regexp object.
### Response:
def reply_regexp(self, user, regexp):
"""Prepares a trigger for the regular expression engine.
:param str user: The user ID invoking a reply.
:param str regexp: The original trigger text to be turned into a regexp.
:return regexp: The final regexp object."""
if regexp in self.master._regexc["trigger"]:
# Already compiled this one!
return self.master._regexc["trigger"][regexp]
# If the trigger is simply '*' then the * there needs to become (.*?)
# to match the blank string too.
regexp = re.sub(RE.zero_star, r'<zerowidthstar>', regexp)
# Filter in arrays.
arrays = re.findall(RE.array, regexp)
for array in arrays:
rep = ''
if array in self.master._array:
rep = r'(?:' + '|'.join(self.expand_array(array)) + ')'
regexp = re.sub(r'\@' + re.escape(array) + r'\b', rep, regexp)
# Simple replacements.
regexp = regexp.replace('*', '(.+?)') # Convert * into (.+?)
regexp = regexp.replace('#', '(\d+?)') # Convert # into (\d+?)
regexp = regexp.replace('_', '(\w+?)') # Convert _ into (\w+?)
regexp = re.sub(RE.weight, '', regexp) # Remove {weight} tags, allow spaces before the bracket
regexp = regexp.replace('<zerowidthstar>', r'(.*?)')
# Optionals.
optionals = re.findall(RE.optionals, regexp)
for match in optionals:
parts = match.split("|")
new = []
for p in parts:
p = r'(?:\\s|\\b)+{}(?:\\s|\\b)+'.format(p.strip())
new.append(p)
# If this optional had a star or anything in it, make it
# non-matching.
pipes = '|'.join(new)
pipes = pipes.replace(r'(.+?)', r'(?:.+?)')
pipes = pipes.replace(r'(\d+?)', r'(?:\d+?)')
pipes = pipes.replace(r'([A-Za-z]+?)', r'(?:[A-Za-z]+?)')
regexp = re.sub(r'\s*\[' + re.escape(match) + '\]\s*',
'(?:' + pipes + r'|(?:\\s|\\b))', regexp)
# _ wildcards can't match numbers!
regexp = re.sub(RE.literal_w, r'[^\\s\\d]', regexp)
# Filter in bot variables.
bvars = re.findall(RE.bot_tag, regexp)
for var in bvars:
rep = ''
if var in self.master._var:
rep = self.format_message(self.master._var[var])
regexp = regexp.replace('<bot {var}>'.format(var=var), rep)
# Filter in user variables.
uvars = re.findall(RE.get_tag, regexp)
for var in uvars:
rep = ''
value = self.master.get_uservar(user, var)
if value not in [None, "undefined"]:
rep = utils.strip_nasties(value)
regexp = regexp.replace('<get {var}>'.format(var=var), rep)
# Filter in <input> and <reply> tags. This is a slow process, so only
# do it if we have to!
if '<input' in regexp or '<reply' in regexp:
history = self.master.get_uservar(user, "__history__")
for type in ['input', 'reply']:
tags = re.findall(r'<' + type + r'([0-9])>', regexp)
for index in tags:
rep = self.format_message(history[type][int(index) - 1])
regexp = regexp.replace('<{type}{index}>'.format(type=type, index=index), rep)
regexp = regexp.replace('<{type}>'.format(type=type),
self.format_message(history[type][0]))
# TODO: the Perl version doesn't do just <input>/<reply> in trigs!
if self.utf8:
return re.compile(r'^' + regexp.lower() + r'$', re.UNICODE)
else:
return re.compile(r'^' + regexp.lower() + r'$') |
def exception_to_signal(sig: Union[SignalException, signal.Signals]):
"""
Rollback any changes done by :py:func:`signal_to_exception`.
"""
if isinstance(sig, SignalException):
signum = sig.signum
else:
signum = sig.value
signal.signal(signum, signal.SIG_DFL) | Rollback any changes done by :py:func:`signal_to_exception`. | Below is the the instruction that describes the task:
### Input:
Rollback any changes done by :py:func:`signal_to_exception`.
### Response:
def exception_to_signal(sig: Union[SignalException, signal.Signals]):
"""
Rollback any changes done by :py:func:`signal_to_exception`.
"""
if isinstance(sig, SignalException):
signum = sig.signum
else:
signum = sig.value
signal.signal(signum, signal.SIG_DFL) |
def is_instance_running(self, instance_id):
"""Checks if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
"""
instance = self._load_instance(instance_id)
if instance.update() == "running":
# If the instance is up&running, ensure it has an IP
# address.
if not instance.ip_address and self.request_floating_ip:
log.debug("Public ip address has to be assigned through "
"elasticluster.")
self._allocate_address(instance)
instance.update()
return True
else:
return False | Checks if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise | Below is the the instruction that describes the task:
### Input:
Checks if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
### Response:
def is_instance_running(self, instance_id):
"""Checks if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
"""
instance = self._load_instance(instance_id)
if instance.update() == "running":
# If the instance is up&running, ensure it has an IP
# address.
if not instance.ip_address and self.request_floating_ip:
log.debug("Public ip address has to be assigned through "
"elasticluster.")
self._allocate_address(instance)
instance.update()
return True
else:
return False |
def organization_subscription_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_subscriptions#delete-organization-subscription"
api_path = "/api/v2/organization_subscriptions/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs) | https://developer.zendesk.com/rest_api/docs/core/organization_subscriptions#delete-organization-subscription | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/organization_subscriptions#delete-organization-subscription
### Response:
def organization_subscription_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_subscriptions#delete-organization-subscription"
api_path = "/api/v2/organization_subscriptions/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs) |
def inference(self, state_arr, limit=1000):
'''
Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route.
'''
self.__inferencing_flag = True
agent_x, agent_y = np.where(state_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
self.__create_enemy(self.__map_arr)
result_list = [(agent_x, agent_y, 0.0)]
result_val_list = [agent_x, agent_y]
for e in range(self.__enemy_num):
result_val_list.append(self.__enemy_pos_list[e][0])
result_val_list.append(self.__enemy_pos_list[e][1])
result_val_list.append(0.0)
result_list.append(tuple(result_val_list))
self.t = 0
while self.t < limit:
next_action_arr = self.extract_possible_actions(state_arr)
next_q_arr = self.function_approximator.inference_q(next_action_arr)
action_arr, q = self.select_action(next_action_arr, next_q_arr)
self.__move_enemy(action_arr)
agent_x, agent_y = np.where(action_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
result_val_list = [agent_x, agent_y]
for e in range(self.__enemy_num):
result_val_list.append(self.__enemy_pos_list[e][0])
result_val_list.append(self.__enemy_pos_list[e][1])
try:
result_val_list.append(q[0])
except IndexError:
result_val_list.append(q)
result_list.append(tuple(result_val_list))
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break
return result_list | Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route. | Below is the the instruction that describes the task:
### Input:
Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route.
### Response:
def inference(self, state_arr, limit=1000):
'''
Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route.
'''
self.__inferencing_flag = True
agent_x, agent_y = np.where(state_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
self.__create_enemy(self.__map_arr)
result_list = [(agent_x, agent_y, 0.0)]
result_val_list = [agent_x, agent_y]
for e in range(self.__enemy_num):
result_val_list.append(self.__enemy_pos_list[e][0])
result_val_list.append(self.__enemy_pos_list[e][1])
result_val_list.append(0.0)
result_list.append(tuple(result_val_list))
self.t = 0
while self.t < limit:
next_action_arr = self.extract_possible_actions(state_arr)
next_q_arr = self.function_approximator.inference_q(next_action_arr)
action_arr, q = self.select_action(next_action_arr, next_q_arr)
self.__move_enemy(action_arr)
agent_x, agent_y = np.where(action_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
result_val_list = [agent_x, agent_y]
for e in range(self.__enemy_num):
result_val_list.append(self.__enemy_pos_list[e][0])
result_val_list.append(self.__enemy_pos_list[e][1])
try:
result_val_list.append(q[0])
except IndexError:
result_val_list.append(q)
result_list.append(tuple(result_val_list))
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break
return result_list |
def from_mask_and_sub_grid_size(cls, mask, sub_grid_size=1):
"""Setup a sub-grid of the unmasked pixels, using a mask and a specified sub-grid size. The center of \
every unmasked pixel's sub-pixels give the grid's (y,x) arc-second coordinates.
Parameters
-----------
mask : Mask
The mask whose masked pixels are used to setup the sub-pixel grid_stack.
sub_grid_size : int
The size (sub_grid_size x sub_grid_size) of each unmasked pixels sub-grid.
"""
sub_grid_masked = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(
mask=mask,
pixel_scales=mask.pixel_scales,
sub_grid_size=sub_grid_size)
return SubGrid(sub_grid_masked, mask, sub_grid_size) | Setup a sub-grid of the unmasked pixels, using a mask and a specified sub-grid size. The center of \
every unmasked pixel's sub-pixels give the grid's (y,x) arc-second coordinates.
Parameters
-----------
mask : Mask
The mask whose masked pixels are used to setup the sub-pixel grid_stack.
sub_grid_size : int
The size (sub_grid_size x sub_grid_size) of each unmasked pixels sub-grid. | Below is the the instruction that describes the task:
### Input:
Setup a sub-grid of the unmasked pixels, using a mask and a specified sub-grid size. The center of \
every unmasked pixel's sub-pixels give the grid's (y,x) arc-second coordinates.
Parameters
-----------
mask : Mask
The mask whose masked pixels are used to setup the sub-pixel grid_stack.
sub_grid_size : int
The size (sub_grid_size x sub_grid_size) of each unmasked pixels sub-grid.
### Response:
def from_mask_and_sub_grid_size(cls, mask, sub_grid_size=1):
"""Setup a sub-grid of the unmasked pixels, using a mask and a specified sub-grid size. The center of \
every unmasked pixel's sub-pixels give the grid's (y,x) arc-second coordinates.
Parameters
-----------
mask : Mask
The mask whose masked pixels are used to setup the sub-pixel grid_stack.
sub_grid_size : int
The size (sub_grid_size x sub_grid_size) of each unmasked pixels sub-grid.
"""
sub_grid_masked = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(
mask=mask,
pixel_scales=mask.pixel_scales,
sub_grid_size=sub_grid_size)
return SubGrid(sub_grid_masked, mask, sub_grid_size) |
def generateToken(bits=32):
"""
Generates a random token based on the given parameters.
:return <str>
"""
if bits == 64:
hasher = hashlib.sha256
elif bits == 32:
hasher = hashlib.md5
else:
raise StandardError('Unknown bit level.')
return hasher(nstr(random.getrandbits(256))).hexdigest() | Generates a random token based on the given parameters.
:return <str> | Below is the the instruction that describes the task:
### Input:
Generates a random token based on the given parameters.
:return <str>
### Response:
def generateToken(bits=32):
"""
Generates a random token based on the given parameters.
:return <str>
"""
if bits == 64:
hasher = hashlib.sha256
elif bits == 32:
hasher = hashlib.md5
else:
raise StandardError('Unknown bit level.')
return hasher(nstr(random.getrandbits(256))).hexdigest() |
def MempoolCheck(self):
"""
Checks the Mempool and removes any tx found on the Blockchain
Implemented to resolve https://github.com/CityOfZion/neo-python/issues/703
"""
txs = []
values = self.MemPool.values()
for tx in values:
txs.append(tx)
for tx in txs:
res = self.RemoveTransaction(tx)
if res:
logger.debug("found tx 0x%s on the blockchain ...removed from mempool" % tx.Hash) | Checks the Mempool and removes any tx found on the Blockchain
Implemented to resolve https://github.com/CityOfZion/neo-python/issues/703 | Below is the the instruction that describes the task:
### Input:
Checks the Mempool and removes any tx found on the Blockchain
Implemented to resolve https://github.com/CityOfZion/neo-python/issues/703
### Response:
def MempoolCheck(self):
"""
Checks the Mempool and removes any tx found on the Blockchain
Implemented to resolve https://github.com/CityOfZion/neo-python/issues/703
"""
txs = []
values = self.MemPool.values()
for tx in values:
txs.append(tx)
for tx in txs:
res = self.RemoveTransaction(tx)
if res:
logger.debug("found tx 0x%s on the blockchain ...removed from mempool" % tx.Hash) |
def format_h2(s, format="text", indents=0):
"""
Encloses string in format text
Args, Returns: see format_h1()
>>> print("\\n".join(format_h2("Header 2", indents=2)))
Header 2
--------
>>> print("\\n".join(format_h2("Header 2", "markdown", 2)))
## Header 2
"""
_CHAR = "-"
if format.startswith("text"):
return format_underline(s, _CHAR, indents)
elif format.startswith("markdown"):
return ["## {}".format(s)]
elif format.startswith("rest"):
return format_underline(s, _CHAR, 0) | Encloses string in format text
Args, Returns: see format_h1()
>>> print("\\n".join(format_h2("Header 2", indents=2)))
Header 2
--------
>>> print("\\n".join(format_h2("Header 2", "markdown", 2)))
## Header 2 | Below is the the instruction that describes the task:
### Input:
Encloses string in format text
Args, Returns: see format_h1()
>>> print("\\n".join(format_h2("Header 2", indents=2)))
Header 2
--------
>>> print("\\n".join(format_h2("Header 2", "markdown", 2)))
## Header 2
### Response:
def format_h2(s, format="text", indents=0):
"""
Encloses string in format text
Args, Returns: see format_h1()
>>> print("\\n".join(format_h2("Header 2", indents=2)))
Header 2
--------
>>> print("\\n".join(format_h2("Header 2", "markdown", 2)))
## Header 2
"""
_CHAR = "-"
if format.startswith("text"):
return format_underline(s, _CHAR, indents)
elif format.startswith("markdown"):
return ["## {}".format(s)]
elif format.startswith("rest"):
return format_underline(s, _CHAR, 0) |
def to_dict(self):
"""
MobID representation as dict
"""
material = {'Data1': self.Data1,
'Data2': self.Data2,
'Data3': self.Data3,
'Data4': list(self.Data4)
}
return {'material':material,
'length': self.length,
'instanceHigh': self.instanceHigh,
'instanceMid': self.instanceMid,
'instanceLow': self.instanceLow,
'SMPTELabel': list(self.SMPTELabel)
} | MobID representation as dict | Below is the the instruction that describes the task:
### Input:
MobID representation as dict
### Response:
def to_dict(self):
"""
MobID representation as dict
"""
material = {'Data1': self.Data1,
'Data2': self.Data2,
'Data3': self.Data3,
'Data4': list(self.Data4)
}
return {'material':material,
'length': self.length,
'instanceHigh': self.instanceHigh,
'instanceMid': self.instanceMid,
'instanceLow': self.instanceLow,
'SMPTELabel': list(self.SMPTELabel)
} |
def from_voxels(voxels):
"""
Converts a voxel list to an ndarray.
Arguments:
voxels (tuple[]): A list of coordinates indicating coordinates of
populated voxels in an ndarray.
Returns:
numpy.ndarray The result of the transformation.
"""
dimensions = len(voxels[0])
for d in range(len(dimensions)):
size.append(max([i[d] for i in voxels]))
result = numpy.zeros(dimensions)
for v in voxels:
result[v] = 1
return result | Converts a voxel list to an ndarray.
Arguments:
voxels (tuple[]): A list of coordinates indicating coordinates of
populated voxels in an ndarray.
Returns:
numpy.ndarray The result of the transformation. | Below is the the instruction that describes the task:
### Input:
Converts a voxel list to an ndarray.
Arguments:
voxels (tuple[]): A list of coordinates indicating coordinates of
populated voxels in an ndarray.
Returns:
numpy.ndarray The result of the transformation.
### Response:
def from_voxels(voxels):
"""
Converts a voxel list to an ndarray.
Arguments:
voxels (tuple[]): A list of coordinates indicating coordinates of
populated voxels in an ndarray.
Returns:
numpy.ndarray The result of the transformation.
"""
dimensions = len(voxels[0])
for d in range(len(dimensions)):
size.append(max([i[d] for i in voxels]))
result = numpy.zeros(dimensions)
for v in voxels:
result[v] = 1
return result |
def enable_mp_crash_reporting():
"""
Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess.
Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead.
This function must be called before any imports to mulitprocessing in order for the monkey-patching to work.
"""
global mp_crash_reporting_enabled
multiprocessing.Process = multiprocessing.process.Process = CrashReportingProcess
mp_crash_reporting_enabled = True | Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess.
Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead.
This function must be called before any imports to mulitprocessing in order for the monkey-patching to work. | Below is the the instruction that describes the task:
### Input:
Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess.
Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead.
This function must be called before any imports to mulitprocessing in order for the monkey-patching to work.
### Response:
def enable_mp_crash_reporting():
"""
Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess.
Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead.
This function must be called before any imports to mulitprocessing in order for the monkey-patching to work.
"""
global mp_crash_reporting_enabled
multiprocessing.Process = multiprocessing.process.Process = CrashReportingProcess
mp_crash_reporting_enabled = True |
def _init_converters(types_map):
"""Prepares the converters for conversion of java types to python
objects.
types_map: Mapping of java.sql.Types field name to java.sql.Types
field constant value"""
global _converters
_converters = {}
for i in _DEFAULT_CONVERTERS:
const_val = types_map[i]
_converters[const_val] = _DEFAULT_CONVERTERS[i] | Prepares the converters for conversion of java types to python
objects.
types_map: Mapping of java.sql.Types field name to java.sql.Types
field constant value | Below is the the instruction that describes the task:
### Input:
Prepares the converters for conversion of java types to python
objects.
types_map: Mapping of java.sql.Types field name to java.sql.Types
field constant value
### Response:
def _init_converters(types_map):
"""Prepares the converters for conversion of java types to python
objects.
types_map: Mapping of java.sql.Types field name to java.sql.Types
field constant value"""
global _converters
_converters = {}
for i in _DEFAULT_CONVERTERS:
const_val = types_map[i]
_converters[const_val] = _DEFAULT_CONVERTERS[i] |
def export(self, name, columns, points):
"""Export the stats to the Statsd server."""
if name == self.plugins_to_export()[0] and self.buffer != {}:
# One complete loop have been done
logger.debug("Export stats ({}) to RESTful endpoint ({})".format(listkeys(self.buffer),
self.client))
# Export stats
post(self.client, json=self.buffer, allow_redirects=True)
# Reset buffer
self.buffer = {}
# Add current stat to the buffer
self.buffer[name] = dict(zip(columns, points)) | Export the stats to the Statsd server. | Below is the the instruction that describes the task:
### Input:
Export the stats to the Statsd server.
### Response:
def export(self, name, columns, points):
"""Export the stats to the Statsd server."""
if name == self.plugins_to_export()[0] and self.buffer != {}:
# One complete loop have been done
logger.debug("Export stats ({}) to RESTful endpoint ({})".format(listkeys(self.buffer),
self.client))
# Export stats
post(self.client, json=self.buffer, allow_redirects=True)
# Reset buffer
self.buffer = {}
# Add current stat to the buffer
self.buffer[name] = dict(zip(columns, points)) |
def x_values_ref(self, series):
"""
The Excel worksheet reference to the X values for this chart (not
including the column label).
"""
top_row = self.series_table_row_offset(series) + 2
bottom_row = top_row + len(series) - 1
return "Sheet1!$A$%d:$A$%d" % (top_row, bottom_row) | The Excel worksheet reference to the X values for this chart (not
including the column label). | Below is the the instruction that describes the task:
### Input:
The Excel worksheet reference to the X values for this chart (not
including the column label).
### Response:
def x_values_ref(self, series):
"""
The Excel worksheet reference to the X values for this chart (not
including the column label).
"""
top_row = self.series_table_row_offset(series) + 2
bottom_row = top_row + len(series) - 1
return "Sheet1!$A$%d:$A$%d" % (top_row, bottom_row) |
async def run_async(self):
"""
Starts the run loop and manages exceptions and cleanup.
"""
try:
await self.run_loop_async()
except Exception as err: # pylint: disable=broad-except
_logger.error("Run loop failed %r", err)
try:
_logger.info("Shutting down all pumps %r", self.host.guid)
await self.remove_all_pumps_async("Shutdown")
except Exception as err: # pylint: disable=broad-except
raise Exception("Failed to remove all pumps {!r}".format(err)) | Starts the run loop and manages exceptions and cleanup. | Below is the the instruction that describes the task:
### Input:
Starts the run loop and manages exceptions and cleanup.
### Response:
async def run_async(self):
"""
Starts the run loop and manages exceptions and cleanup.
"""
try:
await self.run_loop_async()
except Exception as err: # pylint: disable=broad-except
_logger.error("Run loop failed %r", err)
try:
_logger.info("Shutting down all pumps %r", self.host.guid)
await self.remove_all_pumps_async("Shutdown")
except Exception as err: # pylint: disable=broad-except
raise Exception("Failed to remove all pumps {!r}".format(err)) |
def animation_dialog(images, delay_s=1., loop=True, **kwargs):
'''
.. versionadded:: v0.19
Parameters
----------
images : list
Filepaths to images or :class:`gtk.Pixbuf` instances.
delay_s : float, optional
Number of seconds to display each frame.
Default: ``1.0``.
loop : bool, optional
If ``True``, restart animation after last image has been displayed.
Default: ``True``.
Returns
-------
gtk.MessageDialog
Message dialog with animation displayed in `gtk.Image` widget when
dialog is run.
'''
def _as_pixbuf(image):
if isinstance(image, types.StringTypes):
return gtk.gdk.pixbuf_new_from_file(image)
else:
return image
pixbufs = map(_as_pixbuf, images)
# Need this to support background thread execution with GTK.
gtk.gdk.threads_init()
dialog = gtk.MessageDialog(**kwargs)
# Append image to dialog content area.
image = gtk.Image()
content_area = dialog.get_content_area()
content_area.pack_start(image)
content_area.show_all()
stop_animation = threading.Event()
def _stop_animation(*args):
stop_animation.set()
def _animate(dialog):
def __animate():
if loop:
frames = it.cycle(pixbufs)
else:
frames = pixbufs
for pixbuf_i in frames:
gobject.idle_add(image.set_from_pixbuf, pixbuf_i)
if stop_animation.wait(delay_s):
break
thread = threading.Thread(target=__animate)
thread.daemon = True
thread.start()
dialog.connect('destroy', _stop_animation)
dialog.connect('show', _animate)
return dialog | .. versionadded:: v0.19
Parameters
----------
images : list
Filepaths to images or :class:`gtk.Pixbuf` instances.
delay_s : float, optional
Number of seconds to display each frame.
Default: ``1.0``.
loop : bool, optional
If ``True``, restart animation after last image has been displayed.
Default: ``True``.
Returns
-------
gtk.MessageDialog
Message dialog with animation displayed in `gtk.Image` widget when
dialog is run. | Below is the the instruction that describes the task:
### Input:
.. versionadded:: v0.19
Parameters
----------
images : list
Filepaths to images or :class:`gtk.Pixbuf` instances.
delay_s : float, optional
Number of seconds to display each frame.
Default: ``1.0``.
loop : bool, optional
If ``True``, restart animation after last image has been displayed.
Default: ``True``.
Returns
-------
gtk.MessageDialog
Message dialog with animation displayed in `gtk.Image` widget when
dialog is run.
### Response:
def animation_dialog(images, delay_s=1., loop=True, **kwargs):
'''
.. versionadded:: v0.19
Parameters
----------
images : list
Filepaths to images or :class:`gtk.Pixbuf` instances.
delay_s : float, optional
Number of seconds to display each frame.
Default: ``1.0``.
loop : bool, optional
If ``True``, restart animation after last image has been displayed.
Default: ``True``.
Returns
-------
gtk.MessageDialog
Message dialog with animation displayed in `gtk.Image` widget when
dialog is run.
'''
def _as_pixbuf(image):
if isinstance(image, types.StringTypes):
return gtk.gdk.pixbuf_new_from_file(image)
else:
return image
pixbufs = map(_as_pixbuf, images)
# Need this to support background thread execution with GTK.
gtk.gdk.threads_init()
dialog = gtk.MessageDialog(**kwargs)
# Append image to dialog content area.
image = gtk.Image()
content_area = dialog.get_content_area()
content_area.pack_start(image)
content_area.show_all()
stop_animation = threading.Event()
def _stop_animation(*args):
stop_animation.set()
def _animate(dialog):
def __animate():
if loop:
frames = it.cycle(pixbufs)
else:
frames = pixbufs
for pixbuf_i in frames:
gobject.idle_add(image.set_from_pixbuf, pixbuf_i)
if stop_animation.wait(delay_s):
break
thread = threading.Thread(target=__animate)
thread.daemon = True
thread.start()
dialog.connect('destroy', _stop_animation)
dialog.connect('show', _animate)
return dialog |
def parent_of(self, name):
"""
go to parent of node with name, and set as cur_node. Useful
for creating new paragraphs
"""
if not self._in_tag(name):
return
node = self.cur_node
while node.tag != name:
node = node.getparent()
self.cur_node = node.getparent() | go to parent of node with name, and set as cur_node. Useful
for creating new paragraphs | Below is the the instruction that describes the task:
### Input:
go to parent of node with name, and set as cur_node. Useful
for creating new paragraphs
### Response:
def parent_of(self, name):
"""
go to parent of node with name, and set as cur_node. Useful
for creating new paragraphs
"""
if not self._in_tag(name):
return
node = self.cur_node
while node.tag != name:
node = node.getparent()
self.cur_node = node.getparent() |
Subsets and Splits