code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def merge_pdb_range_pairs(prs):
'''Takes in a list of PDB residue IDs (including insertion codes) specifying ranges and returns a sorted list of merged, sorted ranges.
This works as above but we have to split the residues into pairs as "1A" > "19".
'''
new_prs = []
sprs = [sorted((split_pdb_residue(p[0]), split_pdb_residue(p[1]))) for p in prs]
sprs = sorted(sprs)
merged = False
x = 0
from klab import colortext
while x < len(sprs):
newx = x + 1
new_pair = list(sprs[x])
for y in range(x + 1, len(sprs)):
if new_pair[0] <= (sprs[y][0][0] - 1, sprs[y][0][1]) <= new_pair[1]:
new_pair[0] = min(new_pair[0], sprs[y][0])
new_pair[1] = max(new_pair[1], sprs[y][1])
newx = y + 1
if new_pair not in new_prs:
new_prs.append(new_pair)
x = newx
return new_prs | Takes in a list of PDB residue IDs (including insertion codes) specifying ranges and returns a sorted list of merged, sorted ranges.
This works as above but we have to split the residues into pairs as "1A" > "19". | Below is the the instruction that describes the task:
### Input:
Takes in a list of PDB residue IDs (including insertion codes) specifying ranges and returns a sorted list of merged, sorted ranges.
This works as above but we have to split the residues into pairs as "1A" > "19".
### Response:
def merge_pdb_range_pairs(prs):
'''Takes in a list of PDB residue IDs (including insertion codes) specifying ranges and returns a sorted list of merged, sorted ranges.
This works as above but we have to split the residues into pairs as "1A" > "19".
'''
new_prs = []
sprs = [sorted((split_pdb_residue(p[0]), split_pdb_residue(p[1]))) for p in prs]
sprs = sorted(sprs)
merged = False
x = 0
from klab import colortext
while x < len(sprs):
newx = x + 1
new_pair = list(sprs[x])
for y in range(x + 1, len(sprs)):
if new_pair[0] <= (sprs[y][0][0] - 1, sprs[y][0][1]) <= new_pair[1]:
new_pair[0] = min(new_pair[0], sprs[y][0])
new_pair[1] = max(new_pair[1], sprs[y][1])
newx = y + 1
if new_pair not in new_prs:
new_prs.append(new_pair)
x = newx
return new_prs |
def unpublish_one_version(self, **args):
'''
Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init.
'''
# Check args
optional_args = ['handle', 'drs_id', 'version_number']
esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for unpublication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string()
)
assistant.unpublish_one_dataset_version(
handle = args['handle'],
version_number = args['version_number']
) | Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init. | Below is the the instruction that describes the task:
### Input:
Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init.
### Response:
def unpublish_one_version(self, **args):
'''
Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init.
'''
# Check args
optional_args = ['handle', 'drs_id', 'version_number']
esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for unpublication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string()
)
assistant.unpublish_one_dataset_version(
handle = args['handle'],
version_number = args['version_number']
) |
def print_markdown(data, title=None):
"""Print data in GitHub-flavoured Markdown format for issues etc.
data (dict or list of tuples): Label/value pairs.
title (unicode or None): Title, will be rendered as headline 2.
"""
def excl_value(value):
# contains path, i.e. personal info
return isinstance(value, basestring_) and Path(value).exists()
if isinstance(data, dict):
data = list(data.items())
markdown = ["* **{}:** {}".format(l, unicode_(v))
for l, v in data if not excl_value(v)]
if title:
print("\n## {}".format(title))
print('\n{}\n'.format('\n'.join(markdown))) | Print data in GitHub-flavoured Markdown format for issues etc.
data (dict or list of tuples): Label/value pairs.
title (unicode or None): Title, will be rendered as headline 2. | Below is the the instruction that describes the task:
### Input:
Print data in GitHub-flavoured Markdown format for issues etc.
data (dict or list of tuples): Label/value pairs.
title (unicode or None): Title, will be rendered as headline 2.
### Response:
def print_markdown(data, title=None):
"""Print data in GitHub-flavoured Markdown format for issues etc.
data (dict or list of tuples): Label/value pairs.
title (unicode or None): Title, will be rendered as headline 2.
"""
def excl_value(value):
# contains path, i.e. personal info
return isinstance(value, basestring_) and Path(value).exists()
if isinstance(data, dict):
data = list(data.items())
markdown = ["* **{}:** {}".format(l, unicode_(v))
for l, v in data if not excl_value(v)]
if title:
print("\n## {}".format(title))
print('\n{}\n'.format('\n'.join(markdown))) |
def read_raster_window(
input_files,
tile,
indexes=None,
resampling="nearest",
src_nodata=None,
dst_nodata=None,
gdal_opts=None
):
"""
Return NumPy arrays from an input raster.
NumPy arrays are reprojected and resampled to tile properties from input
raster. If tile boundaries cross the antimeridian, data on the other side
of the antimeridian will be read and concatenated to the numpy array
accordingly.
Parameters
----------
input_files : string or list
path to a raster file or list of paths to multiple raster files readable by
rasterio.
tile : Tile
a Tile object
indexes : list or int
a list of band numbers; None will read all.
resampling : string
one of "nearest", "average", "bilinear" or "lanczos"
src_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
dst_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
gdal_opts : dict
GDAL options passed on to rasterio.Env()
Returns
-------
raster : MaskedArray
"""
with rasterio.Env(
**get_gdal_options(
gdal_opts,
is_remote=path_is_remote(
input_files[0] if isinstance(input_files, list) else input_files, s3=True
)
)
) as env:
logger.debug("reading %s with GDAL options %s", input_files, env.options)
return _read_raster_window(
input_files,
tile,
indexes=indexes,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
) | Return NumPy arrays from an input raster.
NumPy arrays are reprojected and resampled to tile properties from input
raster. If tile boundaries cross the antimeridian, data on the other side
of the antimeridian will be read and concatenated to the numpy array
accordingly.
Parameters
----------
input_files : string or list
path to a raster file or list of paths to multiple raster files readable by
rasterio.
tile : Tile
a Tile object
indexes : list or int
a list of band numbers; None will read all.
resampling : string
one of "nearest", "average", "bilinear" or "lanczos"
src_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
dst_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
gdal_opts : dict
GDAL options passed on to rasterio.Env()
Returns
-------
raster : MaskedArray | Below is the the instruction that describes the task:
### Input:
Return NumPy arrays from an input raster.
NumPy arrays are reprojected and resampled to tile properties from input
raster. If tile boundaries cross the antimeridian, data on the other side
of the antimeridian will be read and concatenated to the numpy array
accordingly.
Parameters
----------
input_files : string or list
path to a raster file or list of paths to multiple raster files readable by
rasterio.
tile : Tile
a Tile object
indexes : list or int
a list of band numbers; None will read all.
resampling : string
one of "nearest", "average", "bilinear" or "lanczos"
src_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
dst_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
gdal_opts : dict
GDAL options passed on to rasterio.Env()
Returns
-------
raster : MaskedArray
### Response:
def read_raster_window(
input_files,
tile,
indexes=None,
resampling="nearest",
src_nodata=None,
dst_nodata=None,
gdal_opts=None
):
"""
Return NumPy arrays from an input raster.
NumPy arrays are reprojected and resampled to tile properties from input
raster. If tile boundaries cross the antimeridian, data on the other side
of the antimeridian will be read and concatenated to the numpy array
accordingly.
Parameters
----------
input_files : string or list
path to a raster file or list of paths to multiple raster files readable by
rasterio.
tile : Tile
a Tile object
indexes : list or int
a list of band numbers; None will read all.
resampling : string
one of "nearest", "average", "bilinear" or "lanczos"
src_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
dst_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
gdal_opts : dict
GDAL options passed on to rasterio.Env()
Returns
-------
raster : MaskedArray
"""
with rasterio.Env(
**get_gdal_options(
gdal_opts,
is_remote=path_is_remote(
input_files[0] if isinstance(input_files, list) else input_files, s3=True
)
)
) as env:
logger.debug("reading %s with GDAL options %s", input_files, env.options)
return _read_raster_window(
input_files,
tile,
indexes=indexes,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
) |
async def _async_listen(self, callback=None):
"""Listen loop."""
while True:
if not self._running:
return
try:
packet = await self.get_json(
URL_LISTEN.format(self._url), timeout=30, exceptions=True)
except asyncio.TimeoutError:
continue
except aiohttp.client_exceptions.ClientError as exc:
_LOGGER.warning("ClientError: %s", exc)
self._sleep_task = self.loop.create_task(asyncio.sleep(30))
try:
await self._sleep_task
except asyncio.CancelledError:
pass
self._sleep_task = None
continue
if isinstance(packet, dict) and QS_CMD in packet:
_LOGGER.debug("callback( %s )", packet)
try:
callback(packet)
except Exception as err: # pylint: disable=broad-except
_LOGGER.error("Exception in callback\nType: %s: %s",
type(err), err)
else:
_LOGGER.debug("unknown packet? %s", packet) | Listen loop. | Below is the the instruction that describes the task:
### Input:
Listen loop.
### Response:
async def _async_listen(self, callback=None):
"""Listen loop."""
while True:
if not self._running:
return
try:
packet = await self.get_json(
URL_LISTEN.format(self._url), timeout=30, exceptions=True)
except asyncio.TimeoutError:
continue
except aiohttp.client_exceptions.ClientError as exc:
_LOGGER.warning("ClientError: %s", exc)
self._sleep_task = self.loop.create_task(asyncio.sleep(30))
try:
await self._sleep_task
except asyncio.CancelledError:
pass
self._sleep_task = None
continue
if isinstance(packet, dict) and QS_CMD in packet:
_LOGGER.debug("callback( %s )", packet)
try:
callback(packet)
except Exception as err: # pylint: disable=broad-except
_LOGGER.error("Exception in callback\nType: %s: %s",
type(err), err)
else:
_LOGGER.debug("unknown packet? %s", packet) |
def node(self, source, args=(), env={}):
"""
Calls node with an inline source.
Returns decoded output of stdout and stderr; decoding determine
by locale.
"""
return self._exec(self.node_bin, source, args=args, env=env) | Calls node with an inline source.
Returns decoded output of stdout and stderr; decoding determine
by locale. | Below is the the instruction that describes the task:
### Input:
Calls node with an inline source.
Returns decoded output of stdout and stderr; decoding determine
by locale.
### Response:
def node(self, source, args=(), env={}):
"""
Calls node with an inline source.
Returns decoded output of stdout and stderr; decoding determine
by locale.
"""
return self._exec(self.node_bin, source, args=args, env=env) |
def handle_basic_container_args(options, parser=None):
"""Handle the options specified by add_basic_container_args().
@return: a dict that can be used as kwargs for the ContainerExecutor constructor
"""
dir_modes = {}
error_fn = parser.error if parser else sys.exit
def handle_dir_mode(path, mode):
path = os.path.abspath(path)
if not os.path.isdir(path):
error_fn(
"Cannot specify directory mode for '{}' because it does not exist or is no directory."
.format(path))
if path in dir_modes:
error_fn("Cannot specify multiple directory modes for '{}'.".format(path))
dir_modes[path] = mode
for path in options.hidden_dir:
handle_dir_mode(path, DIR_HIDDEN)
for path in options.read_only_dir:
handle_dir_mode(path, DIR_READ_ONLY)
for path in options.overlay_dir:
handle_dir_mode(path, DIR_OVERLAY)
for path in options.full_access_dir:
handle_dir_mode(path, DIR_FULL_ACCESS)
if options.keep_tmp:
if "/tmp" in dir_modes and not dir_modes["/tmp"] == DIR_FULL_ACCESS:
error_fn("Cannot specify both --keep-tmp and --hidden-dir /tmp.")
dir_modes["/tmp"] = DIR_FULL_ACCESS
elif not "/tmp" in dir_modes:
dir_modes["/tmp"] = DIR_HIDDEN
if not "/" in dir_modes:
dir_modes["/"] = DIR_OVERLAY
if not "/run" in dir_modes:
dir_modes["/run"] = DIR_HIDDEN
if options.container_system_config:
if options.network_access:
logging.warning("The container configuration disables DNS, "
"host lookups will fail despite --network-access. "
"Consider using --keep-system-config.")
else:
# /etc/resolv.conf is necessary for DNS lookups and on many systems is a symlink
# to either /run/resolvconf/resolv.conf or /run/systemd/resolve/sub-resolve.conf,
# so we keep that directory accessible as well.
if not "/run/resolvconf" in dir_modes and os.path.isdir("/run/resolvconf"):
dir_modes["/run/resolvconf"] = DIR_READ_ONLY
if not "/run/systemd/resolve" in dir_modes and os.path.isdir("/run/systemd/resolve"):
dir_modes["/run/systemd/resolve"] = DIR_READ_ONLY
return {
'network_access': options.network_access,
'container_tmpfs': options.tmpfs,
'container_system_config': options.container_system_config,
'dir_modes': dir_modes,
} | Handle the options specified by add_basic_container_args().
@return: a dict that can be used as kwargs for the ContainerExecutor constructor | Below is the the instruction that describes the task:
### Input:
Handle the options specified by add_basic_container_args().
@return: a dict that can be used as kwargs for the ContainerExecutor constructor
### Response:
def handle_basic_container_args(options, parser=None):
"""Handle the options specified by add_basic_container_args().
@return: a dict that can be used as kwargs for the ContainerExecutor constructor
"""
dir_modes = {}
error_fn = parser.error if parser else sys.exit
def handle_dir_mode(path, mode):
path = os.path.abspath(path)
if not os.path.isdir(path):
error_fn(
"Cannot specify directory mode for '{}' because it does not exist or is no directory."
.format(path))
if path in dir_modes:
error_fn("Cannot specify multiple directory modes for '{}'.".format(path))
dir_modes[path] = mode
for path in options.hidden_dir:
handle_dir_mode(path, DIR_HIDDEN)
for path in options.read_only_dir:
handle_dir_mode(path, DIR_READ_ONLY)
for path in options.overlay_dir:
handle_dir_mode(path, DIR_OVERLAY)
for path in options.full_access_dir:
handle_dir_mode(path, DIR_FULL_ACCESS)
if options.keep_tmp:
if "/tmp" in dir_modes and not dir_modes["/tmp"] == DIR_FULL_ACCESS:
error_fn("Cannot specify both --keep-tmp and --hidden-dir /tmp.")
dir_modes["/tmp"] = DIR_FULL_ACCESS
elif not "/tmp" in dir_modes:
dir_modes["/tmp"] = DIR_HIDDEN
if not "/" in dir_modes:
dir_modes["/"] = DIR_OVERLAY
if not "/run" in dir_modes:
dir_modes["/run"] = DIR_HIDDEN
if options.container_system_config:
if options.network_access:
logging.warning("The container configuration disables DNS, "
"host lookups will fail despite --network-access. "
"Consider using --keep-system-config.")
else:
# /etc/resolv.conf is necessary for DNS lookups and on many systems is a symlink
# to either /run/resolvconf/resolv.conf or /run/systemd/resolve/sub-resolve.conf,
# so we keep that directory accessible as well.
if not "/run/resolvconf" in dir_modes and os.path.isdir("/run/resolvconf"):
dir_modes["/run/resolvconf"] = DIR_READ_ONLY
if not "/run/systemd/resolve" in dir_modes and os.path.isdir("/run/systemd/resolve"):
dir_modes["/run/systemd/resolve"] = DIR_READ_ONLY
return {
'network_access': options.network_access,
'container_tmpfs': options.tmpfs,
'container_system_config': options.container_system_config,
'dir_modes': dir_modes,
} |
def id_to_object(self, line):
"""
Resolves an ip adres to a range object, creating it if it doesn't exists.
"""
result = Range.get(line, ignore=404)
if not result:
result = Range(range=line)
result.save()
return result | Resolves an ip adres to a range object, creating it if it doesn't exists. | Below is the the instruction that describes the task:
### Input:
Resolves an ip adres to a range object, creating it if it doesn't exists.
### Response:
def id_to_object(self, line):
"""
Resolves an ip adres to a range object, creating it if it doesn't exists.
"""
result = Range.get(line, ignore=404)
if not result:
result = Range(range=line)
result.save()
return result |
def describe_export(self, export_type):
"""
Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.get_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)[0]
return self.http_get(
self._export_path(export_type),
)[0] | Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export. | Below is the the instruction that describes the task:
### Input:
Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export.
### Response:
def describe_export(self, export_type):
"""
Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.get_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)[0]
return self.http_get(
self._export_path(export_type),
)[0] |
def json_get_data(filename):
"""Get data from json file
"""
with open(filename) as fp:
json_data = json.load(fp)
return json_data
return False | Get data from json file | Below is the the instruction that describes the task:
### Input:
Get data from json file
### Response:
def json_get_data(filename):
"""Get data from json file
"""
with open(filename) as fp:
json_data = json.load(fp)
return json_data
return False |
def process_sub_shrink(ref, alt_str):
"""Process substution where the string shrink"""
if len(ref) == 0:
raise exceptions.InvalidRecordException("Invalid VCF, empty REF")
elif len(ref) == 1:
if ref[0] == alt_str[0]:
return record.Substitution(record.INS, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str) | Process substution where the string shrink | Below is the the instruction that describes the task:
### Input:
Process substution where the string shrink
### Response:
def process_sub_shrink(ref, alt_str):
"""Process substution where the string shrink"""
if len(ref) == 0:
raise exceptions.InvalidRecordException("Invalid VCF, empty REF")
elif len(ref) == 1:
if ref[0] == alt_str[0]:
return record.Substitution(record.INS, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str) |
def fftw_multi_normxcorr(template_array, stream_array, pad_array, seed_ids,
cores_inner, cores_outer):
"""
Use a C loop rather than a Python loop - in some cases this will be fast.
:type template_array: dict
:param template_array:
:type stream_array: dict
:param stream_array:
:type pad_array: dict
:param pad_array:
:type seed_ids: list
:param seed_ids:
rtype: np.ndarray, list
:return: 3D Array of cross-correlations and list of used channels.
"""
utilslib = _load_cdll('libutils')
utilslib.multi_normxcorr_fftw.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float32,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_long, ctypes.c_long, ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.float32,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.float32,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str('C_CONTIGUOUS')),
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int, ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str('C_CONTIGUOUS'))]
utilslib.multi_normxcorr_fftw.restype = ctypes.c_int
'''
Arguments are:
templates (stacked [ch_1-t_1, ch_1-t_2, ..., ch_2-t_1, ch_2-t_2, ...])
number of templates
template length
number of channels
image (stacked [ch_1, ch_2, ..., ch_n])
image length
cross-correlations (stacked as per image)
fft-length
used channels (stacked as per templates)
pad array (stacked as per templates)
'''
# pre processing
used_chans = []
template_len = template_array[seed_ids[0]].shape[1]
for seed_id in seed_ids:
used_chans.append(~np.isnan(template_array[seed_id]).any(axis=1))
template_array[seed_id] = (
(template_array[seed_id] -
template_array[seed_id].mean(axis=-1, keepdims=True)) / (
template_array[seed_id].std(axis=-1, keepdims=True) *
template_len))
template_array[seed_id] = np.nan_to_num(template_array[seed_id])
n_channels = len(seed_ids)
n_templates = template_array[seed_ids[0]].shape[0]
image_len = stream_array[seed_ids[0]].shape[0]
fft_len = next_fast_len(template_len + image_len - 1)
template_array = np.ascontiguousarray([template_array[x]
for x in seed_ids],
dtype=np.float32)
for x in seed_ids:
# Check that stream is non-zero and above variance threshold
if not np.all(stream_array[x] == 0) and np.var(stream_array[x]) < 1e-8:
# Apply gain
stream_array *= 1e8
warnings.warn("Low variance found for {0}, applying gain "
"to stabilise correlations".format(x))
stream_array = np.ascontiguousarray([stream_array[x] for x in seed_ids],
dtype=np.float32)
cccs = np.zeros((n_templates, image_len - template_len + 1),
np.float32)
used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
pad_array_np = np.ascontiguousarray([pad_array[seed_id]
for seed_id in seed_ids],
dtype=np.intc)
variance_warnings = np.ascontiguousarray(
np.zeros(n_channels), dtype=np.intc)
# call C function
ret = utilslib.multi_normxcorr_fftw(
template_array, n_templates, template_len, n_channels, stream_array,
image_len, cccs, fft_len, used_chans_np, pad_array_np, cores_outer,
cores_inner, variance_warnings)
if ret < 0:
raise MemoryError("Memory allocation failed in correlation C-code")
elif ret not in [0, 999]:
print('Error in C code (possible normalisation error)')
print('Maximum cccs %f at %s' %
(cccs.max(), np.unravel_index(cccs.argmax(), cccs.shape)))
print('Minimum cccs %f at %s' %
(cccs.min(), np.unravel_index(cccs.argmin(), cccs.shape)))
raise CorrelationError("Internal correlation error")
elif ret == 999:
warnings.warn("Some correlations not computed, are there "
"zeros in data? If not, consider increasing gain.")
for i, variance_warning in enumerate(variance_warnings):
if variance_warning and variance_warning > template_len:
warnings.warn("Low variance found in {0} places for {1},"
" check result.".format(variance_warning,
seed_ids[i]))
return cccs, used_chans | Use a C loop rather than a Python loop - in some cases this will be fast.
:type template_array: dict
:param template_array:
:type stream_array: dict
:param stream_array:
:type pad_array: dict
:param pad_array:
:type seed_ids: list
:param seed_ids:
rtype: np.ndarray, list
:return: 3D Array of cross-correlations and list of used channels. | Below is the the instruction that describes the task:
### Input:
Use a C loop rather than a Python loop - in some cases this will be fast.
:type template_array: dict
:param template_array:
:type stream_array: dict
:param stream_array:
:type pad_array: dict
:param pad_array:
:type seed_ids: list
:param seed_ids:
rtype: np.ndarray, list
:return: 3D Array of cross-correlations and list of used channels.
### Response:
def fftw_multi_normxcorr(template_array, stream_array, pad_array, seed_ids,
cores_inner, cores_outer):
"""
Use a C loop rather than a Python loop - in some cases this will be fast.
:type template_array: dict
:param template_array:
:type stream_array: dict
:param stream_array:
:type pad_array: dict
:param pad_array:
:type seed_ids: list
:param seed_ids:
rtype: np.ndarray, list
:return: 3D Array of cross-correlations and list of used channels.
"""
utilslib = _load_cdll('libutils')
utilslib.multi_normxcorr_fftw.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float32,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_long, ctypes.c_long, ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.float32,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.float32,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str('C_CONTIGUOUS')),
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int, ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str('C_CONTIGUOUS'))]
utilslib.multi_normxcorr_fftw.restype = ctypes.c_int
'''
Arguments are:
templates (stacked [ch_1-t_1, ch_1-t_2, ..., ch_2-t_1, ch_2-t_2, ...])
number of templates
template length
number of channels
image (stacked [ch_1, ch_2, ..., ch_n])
image length
cross-correlations (stacked as per image)
fft-length
used channels (stacked as per templates)
pad array (stacked as per templates)
'''
# pre processing
used_chans = []
template_len = template_array[seed_ids[0]].shape[1]
for seed_id in seed_ids:
used_chans.append(~np.isnan(template_array[seed_id]).any(axis=1))
template_array[seed_id] = (
(template_array[seed_id] -
template_array[seed_id].mean(axis=-1, keepdims=True)) / (
template_array[seed_id].std(axis=-1, keepdims=True) *
template_len))
template_array[seed_id] = np.nan_to_num(template_array[seed_id])
n_channels = len(seed_ids)
n_templates = template_array[seed_ids[0]].shape[0]
image_len = stream_array[seed_ids[0]].shape[0]
fft_len = next_fast_len(template_len + image_len - 1)
template_array = np.ascontiguousarray([template_array[x]
for x in seed_ids],
dtype=np.float32)
for x in seed_ids:
# Check that stream is non-zero and above variance threshold
if not np.all(stream_array[x] == 0) and np.var(stream_array[x]) < 1e-8:
# Apply gain
stream_array *= 1e8
warnings.warn("Low variance found for {0}, applying gain "
"to stabilise correlations".format(x))
stream_array = np.ascontiguousarray([stream_array[x] for x in seed_ids],
dtype=np.float32)
cccs = np.zeros((n_templates, image_len - template_len + 1),
np.float32)
used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
pad_array_np = np.ascontiguousarray([pad_array[seed_id]
for seed_id in seed_ids],
dtype=np.intc)
variance_warnings = np.ascontiguousarray(
np.zeros(n_channels), dtype=np.intc)
# call C function
ret = utilslib.multi_normxcorr_fftw(
template_array, n_templates, template_len, n_channels, stream_array,
image_len, cccs, fft_len, used_chans_np, pad_array_np, cores_outer,
cores_inner, variance_warnings)
if ret < 0:
raise MemoryError("Memory allocation failed in correlation C-code")
elif ret not in [0, 999]:
print('Error in C code (possible normalisation error)')
print('Maximum cccs %f at %s' %
(cccs.max(), np.unravel_index(cccs.argmax(), cccs.shape)))
print('Minimum cccs %f at %s' %
(cccs.min(), np.unravel_index(cccs.argmin(), cccs.shape)))
raise CorrelationError("Internal correlation error")
elif ret == 999:
warnings.warn("Some correlations not computed, are there "
"zeros in data? If not, consider increasing gain.")
for i, variance_warning in enumerate(variance_warnings):
if variance_warning and variance_warning > template_len:
warnings.warn("Low variance found in {0} places for {1},"
" check result.".format(variance_warning,
seed_ids[i]))
return cccs, used_chans |
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Stream the entire download in chunks."""
self.StreamMedia(callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers,
use_chunks=True) | Stream the entire download in chunks. | Below is the the instruction that describes the task:
### Input:
Stream the entire download in chunks.
### Response:
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Stream the entire download in chunks."""
self.StreamMedia(callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers,
use_chunks=True) |
def showSpec(self, fname):
"""Draws the spectrogram if it is currently None"""
if not self.specPlot.hasImg() and fname is not None:
self.specPlot.fromFile(fname) | Draws the spectrogram if it is currently None | Below is the the instruction that describes the task:
### Input:
Draws the spectrogram if it is currently None
### Response:
def showSpec(self, fname):
"""Draws the spectrogram if it is currently None"""
if not self.specPlot.hasImg() and fname is not None:
self.specPlot.fromFile(fname) |
def run(self):
""" Run the installator """
self._display_header("BACKEND CONFIGURATION")
options = {}
while True:
options = {}
backend = self.ask_backend()
if backend == "local":
self._display_info("Backend chosen: local. Testing the configuration.")
options = self._ask_local_config()
if not self.test_local_docker_conf():
self._display_error(
"An error occurred while testing the configuration. Please make sure you are able do run `docker info` in "
"your command line, and environment parameters like DOCKER_HOST are correctly set.")
if self._ask_boolean("Would you like to continue anyway?", False):
break
else:
break
else:
self._display_warning(
"Backend chosen: manual. As it is a really advanced feature, you will have to configure it yourself in "
"the configuration file, at the end of the setup process.")
options = {"backend": backend}
break
self._display_header("MONGODB CONFIGURATION")
mongo_opt = self.configure_mongodb()
options.update(mongo_opt)
self._display_header("TASK DIRECTORY")
task_directory_opt = self.configure_task_directory()
options.update(task_directory_opt)
self._display_header("CONTAINERS")
self.configure_containers(options)
self._display_header("MISC")
misc_opt = self.configure_misc()
options.update(misc_opt)
database = self.try_mongodb_opts(options["mongo_opt"]["host"], options["mongo_opt"]["database"])
self._display_header("BACKUP DIRECTORY")
backup_directory_opt = self.configure_backup_directory()
options.update(backup_directory_opt)
self._display_header("AUTHENTIFICATION")
auth_opts = self.configure_authentication(database)
options.update(auth_opts)
self._display_info("You may want to add additional plugins to the configuration file.")
self._display_header("REMOTE DEBUGGING - IN BROWSER")
self._display_info(
"If you want to activate the remote debugging of task in the users' browser, you have to install separately "
"INGInious-xterm, which is available on Github, according to the parameters you have given for the hostname and the "
"port range given in the configuration of the remote debugging.")
self._display_info(
"You can leave the following question empty to disable this feature; remote debugging will still be available, "
"but not in the browser.")
webterm = self._ask_with_default(
"Please indicate the link to your installation of INGInious-xterm (for example: "
"https://your-hostname.com:8080).", "")
if webterm != "":
options["webterm"] = webterm
self._display_header("END")
file_dir = self._config_path or os.path.join(os.getcwd(), self.configuration_filename())
try:
yaml.dump(options, open(file_dir, "w"))
self._display_info("Successfully written the configuration file")
except:
self._display_error("Cannot write the configuration file on disk. Here is the content of the file")
print(yaml.dump(options)) | Run the installator | Below is the the instruction that describes the task:
### Input:
Run the installator
### Response:
def run(self):
""" Run the installator """
self._display_header("BACKEND CONFIGURATION")
options = {}
while True:
options = {}
backend = self.ask_backend()
if backend == "local":
self._display_info("Backend chosen: local. Testing the configuration.")
options = self._ask_local_config()
if not self.test_local_docker_conf():
self._display_error(
"An error occurred while testing the configuration. Please make sure you are able do run `docker info` in "
"your command line, and environment parameters like DOCKER_HOST are correctly set.")
if self._ask_boolean("Would you like to continue anyway?", False):
break
else:
break
else:
self._display_warning(
"Backend chosen: manual. As it is a really advanced feature, you will have to configure it yourself in "
"the configuration file, at the end of the setup process.")
options = {"backend": backend}
break
self._display_header("MONGODB CONFIGURATION")
mongo_opt = self.configure_mongodb()
options.update(mongo_opt)
self._display_header("TASK DIRECTORY")
task_directory_opt = self.configure_task_directory()
options.update(task_directory_opt)
self._display_header("CONTAINERS")
self.configure_containers(options)
self._display_header("MISC")
misc_opt = self.configure_misc()
options.update(misc_opt)
database = self.try_mongodb_opts(options["mongo_opt"]["host"], options["mongo_opt"]["database"])
self._display_header("BACKUP DIRECTORY")
backup_directory_opt = self.configure_backup_directory()
options.update(backup_directory_opt)
self._display_header("AUTHENTIFICATION")
auth_opts = self.configure_authentication(database)
options.update(auth_opts)
self._display_info("You may want to add additional plugins to the configuration file.")
self._display_header("REMOTE DEBUGGING - IN BROWSER")
self._display_info(
"If you want to activate the remote debugging of task in the users' browser, you have to install separately "
"INGInious-xterm, which is available on Github, according to the parameters you have given for the hostname and the "
"port range given in the configuration of the remote debugging.")
self._display_info(
"You can leave the following question empty to disable this feature; remote debugging will still be available, "
"but not in the browser.")
webterm = self._ask_with_default(
"Please indicate the link to your installation of INGInious-xterm (for example: "
"https://your-hostname.com:8080).", "")
if webterm != "":
options["webterm"] = webterm
self._display_header("END")
file_dir = self._config_path or os.path.join(os.getcwd(), self.configuration_filename())
try:
yaml.dump(options, open(file_dir, "w"))
self._display_info("Successfully written the configuration file")
except:
self._display_error("Cannot write the configuration file on disk. Here is the content of the file")
print(yaml.dump(options)) |
def _init():
""" Create global Config object, parse command flags
"""
global config, _data_path, _allowed_config_keys
app_dir = _get_vispy_app_dir()
if app_dir is not None:
_data_path = op.join(app_dir, 'data')
_test_data_path = op.join(app_dir, 'test_data')
else:
_data_path = _test_data_path = None
# All allowed config keys and the types they may have
_allowed_config_keys = {
'data_path': string_types,
'default_backend': string_types,
'gl_backend': string_types,
'gl_debug': (bool,),
'glir_file': string_types+file_types,
'include_path': list,
'logging_level': string_types,
'qt_lib': string_types,
'dpi': (int, type(None)),
'profile': string_types + (type(None),),
'audit_tests': (bool,),
'test_data_path': string_types + (type(None),),
}
# Default values for all config options
default_config_options = {
'data_path': _data_path,
'default_backend': '',
'gl_backend': 'gl2',
'gl_debug': False,
'glir_file': '',
'include_path': [],
'logging_level': 'info',
'qt_lib': 'any',
'dpi': None,
'profile': None,
'audit_tests': False,
'test_data_path': _test_data_path,
}
config = Config(**default_config_options)
try:
config.update(**_load_config())
except Exception as err:
raise Exception('Error while reading vispy config file "%s":\n %s' %
(_get_config_fname(), err.message))
set_log_level(config['logging_level'])
_parse_command_line_arguments() | Create global Config object, parse command flags | Below is the the instruction that describes the task:
### Input:
Create global Config object, parse command flags
### Response:
def _init():
""" Create global Config object, parse command flags
"""
global config, _data_path, _allowed_config_keys
app_dir = _get_vispy_app_dir()
if app_dir is not None:
_data_path = op.join(app_dir, 'data')
_test_data_path = op.join(app_dir, 'test_data')
else:
_data_path = _test_data_path = None
# All allowed config keys and the types they may have
_allowed_config_keys = {
'data_path': string_types,
'default_backend': string_types,
'gl_backend': string_types,
'gl_debug': (bool,),
'glir_file': string_types+file_types,
'include_path': list,
'logging_level': string_types,
'qt_lib': string_types,
'dpi': (int, type(None)),
'profile': string_types + (type(None),),
'audit_tests': (bool,),
'test_data_path': string_types + (type(None),),
}
# Default values for all config options
default_config_options = {
'data_path': _data_path,
'default_backend': '',
'gl_backend': 'gl2',
'gl_debug': False,
'glir_file': '',
'include_path': [],
'logging_level': 'info',
'qt_lib': 'any',
'dpi': None,
'profile': None,
'audit_tests': False,
'test_data_path': _test_data_path,
}
config = Config(**default_config_options)
try:
config.update(**_load_config())
except Exception as err:
raise Exception('Error while reading vispy config file "%s":\n %s' %
(_get_config_fname(), err.message))
set_log_level(config['logging_level'])
_parse_command_line_arguments() |
def p_instance_port_arg(self, p):
'instance_port_arg : DOT ID LPAREN identifier RPAREN'
p[0] = PortArg(p[2], p[4], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | instance_port_arg : DOT ID LPAREN identifier RPAREN | Below is the the instruction that describes the task:
### Input:
instance_port_arg : DOT ID LPAREN identifier RPAREN
### Response:
def p_instance_port_arg(self, p):
'instance_port_arg : DOT ID LPAREN identifier RPAREN'
p[0] = PortArg(p[2], p[4], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def _build(self, x, prev_state):
"""Connects the core to the graph.
Args:
x: Input `Tensor` of shape `(batch_size, input_size)`.
prev_state: Previous state. This could be a `Tensor`, or a tuple of
`Tensor`s.
Returns:
The tuple `(output, state)` for this core.
Raises:
ValueError: if the `Tensor` `x` does not have rank 2.
"""
x.get_shape().with_rank(2)
self._batch_size = x.get_shape().as_list()[0]
self._dtype = x.dtype
x_zeros = tf.concat(
[x, tf.zeros(
shape=(self._batch_size, 1), dtype=self._dtype)], 1)
x_ones = tf.concat(
[x, tf.ones(
shape=(self._batch_size, 1), dtype=self._dtype)], 1)
# Weights for the halting signal
halting_linear = basic.Linear(name="halting_linear", output_size=1)
body = functools.partial(
self._body, halting_linear=halting_linear, x_ones=x_ones)
cumul_halting_init = tf.zeros(shape=(self._batch_size, 1),
dtype=self._dtype)
iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)
core_output_size = [x.value for x in self._core.output_size]
out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size),
dtype=self._dtype)
cumul_state_init = _nested_zeros_like(prev_state)
remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)
(unused_final_x, final_out, unused_final_state, final_cumul_state,
unused_final_halting, final_iteration, final_remainder) = tf.while_loop(
self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init,
cumul_halting_init, iteration_init, remainder_init])
act_output = basic.Linear(
name="act_output_linear", output_size=self._output_size)(final_out)
return (act_output, (final_iteration, final_remainder)), final_cumul_state | Connects the core to the graph.
Args:
x: Input `Tensor` of shape `(batch_size, input_size)`.
prev_state: Previous state. This could be a `Tensor`, or a tuple of
`Tensor`s.
Returns:
The tuple `(output, state)` for this core.
Raises:
ValueError: if the `Tensor` `x` does not have rank 2. | Below is the the instruction that describes the task:
### Input:
Connects the core to the graph.
Args:
x: Input `Tensor` of shape `(batch_size, input_size)`.
prev_state: Previous state. This could be a `Tensor`, or a tuple of
`Tensor`s.
Returns:
The tuple `(output, state)` for this core.
Raises:
ValueError: if the `Tensor` `x` does not have rank 2.
### Response:
def _build(self, x, prev_state):
"""Connects the core to the graph.
Args:
x: Input `Tensor` of shape `(batch_size, input_size)`.
prev_state: Previous state. This could be a `Tensor`, or a tuple of
`Tensor`s.
Returns:
The tuple `(output, state)` for this core.
Raises:
ValueError: if the `Tensor` `x` does not have rank 2.
"""
x.get_shape().with_rank(2)
self._batch_size = x.get_shape().as_list()[0]
self._dtype = x.dtype
x_zeros = tf.concat(
[x, tf.zeros(
shape=(self._batch_size, 1), dtype=self._dtype)], 1)
x_ones = tf.concat(
[x, tf.ones(
shape=(self._batch_size, 1), dtype=self._dtype)], 1)
# Weights for the halting signal
halting_linear = basic.Linear(name="halting_linear", output_size=1)
body = functools.partial(
self._body, halting_linear=halting_linear, x_ones=x_ones)
cumul_halting_init = tf.zeros(shape=(self._batch_size, 1),
dtype=self._dtype)
iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)
core_output_size = [x.value for x in self._core.output_size]
out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size),
dtype=self._dtype)
cumul_state_init = _nested_zeros_like(prev_state)
remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)
(unused_final_x, final_out, unused_final_state, final_cumul_state,
unused_final_halting, final_iteration, final_remainder) = tf.while_loop(
self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init,
cumul_halting_init, iteration_init, remainder_init])
act_output = basic.Linear(
name="act_output_linear", output_size=self._output_size)(final_out)
return (act_output, (final_iteration, final_remainder)), final_cumul_state |
def addLayerNode(self, layerName, bias = None, weights = {}):
"""
Adds a new node to a layer, and puts in new weights. Adds node on the end.
Weights will be random, unless specified.
bias = the new node's bias weight
weights = dict of {connectedLayerName: [weights], ...}
Example:
>>> net = Network() # doctest: +ELLIPSIS
Conx using seed: ...
>>> net.addLayers(2, 5, 1)
>>> net.addLayerNode("hidden", bias = -0.12, weights = {"input": [1, 0], "output": [0]})
"""
self.changeLayerSize(layerName, self[layerName].size + 1)
if bias != None:
self[layerName].weight[-1] = bias
for name in list(weights.keys()):
for c in self.connections:
if c.fromLayer.name == name and c.toLayer.name == layerName:
for i in range(self[name].size):
self[name, layerName].weight[i][-1] = weights[name][i]
elif c.toLayer.name == name and c.fromLayer.name == layerName:
for j in range(self[name].size):
self[layerName, name].weight[-1][j] = weights[name][j] | Adds a new node to a layer, and puts in new weights. Adds node on the end.
Weights will be random, unless specified.
bias = the new node's bias weight
weights = dict of {connectedLayerName: [weights], ...}
Example:
>>> net = Network() # doctest: +ELLIPSIS
Conx using seed: ...
>>> net.addLayers(2, 5, 1)
>>> net.addLayerNode("hidden", bias = -0.12, weights = {"input": [1, 0], "output": [0]}) | Below is the the instruction that describes the task:
### Input:
Adds a new node to a layer, and puts in new weights. Adds node on the end.
Weights will be random, unless specified.
bias = the new node's bias weight
weights = dict of {connectedLayerName: [weights], ...}
Example:
>>> net = Network() # doctest: +ELLIPSIS
Conx using seed: ...
>>> net.addLayers(2, 5, 1)
>>> net.addLayerNode("hidden", bias = -0.12, weights = {"input": [1, 0], "output": [0]})
### Response:
def addLayerNode(self, layerName, bias = None, weights = {}):
"""
Adds a new node to a layer, and puts in new weights. Adds node on the end.
Weights will be random, unless specified.
bias = the new node's bias weight
weights = dict of {connectedLayerName: [weights], ...}
Example:
>>> net = Network() # doctest: +ELLIPSIS
Conx using seed: ...
>>> net.addLayers(2, 5, 1)
>>> net.addLayerNode("hidden", bias = -0.12, weights = {"input": [1, 0], "output": [0]})
"""
self.changeLayerSize(layerName, self[layerName].size + 1)
if bias != None:
self[layerName].weight[-1] = bias
for name in list(weights.keys()):
for c in self.connections:
if c.fromLayer.name == name and c.toLayer.name == layerName:
for i in range(self[name].size):
self[name, layerName].weight[i][-1] = weights[name][i]
elif c.toLayer.name == name and c.fromLayer.name == layerName:
for j in range(self[name].size):
self[layerName, name].weight[-1][j] = weights[name][j] |
def is_active_trip(feed: "Feed", trip_id: str, date: str) -> bool:
"""
Return ``True`` if the ``feed.calendar`` or ``feed.calendar_dates``
says that the trip runs on the given date; return ``False``
otherwise.
Note that a trip that starts on date d, ends after 23:59:59, and
does not start again on date d+1 is considered active on date d and
not active on date d+1.
This subtle point, which is a side effect of the GTFS, can
lead to confusion.
Parameters
----------
feed : Feed
trip_id : string
ID of a trip in ``feed.trips``
date : string
YYYYMMDD date string
Returns
-------
boolean
``True`` if and only if the given trip starts on the given
date.
Notes
-----
- This function is key for getting all trips, routes, etc. that are
active on a given date, so the function needs to be fast
- Assume the following feed attributes are not ``None``:
* ``feed.trips``
"""
service = feed._trips_i.at[trip_id, "service_id"]
# Check feed._calendar_dates_g.
caldg = feed._calendar_dates_g
if caldg is not None:
if (service, date) in caldg.groups:
et = caldg.get_group((service, date))["exception_type"].iat[0]
if et == 1:
return True
else:
# Exception type is 2
return False
# Check feed._calendar_i
cali = feed._calendar_i
if cali is not None:
if service in cali.index:
weekday_str = hp.weekday_to_str(hp.datestr_to_date(date).weekday())
if (
cali.at[service, "start_date"]
<= date
<= cali.at[service, "end_date"]
and cali.at[service, weekday_str] == 1
):
return True
else:
return False
# If you made it here, then something went wrong
return False | Return ``True`` if the ``feed.calendar`` or ``feed.calendar_dates``
says that the trip runs on the given date; return ``False``
otherwise.
Note that a trip that starts on date d, ends after 23:59:59, and
does not start again on date d+1 is considered active on date d and
not active on date d+1.
This subtle point, which is a side effect of the GTFS, can
lead to confusion.
Parameters
----------
feed : Feed
trip_id : string
ID of a trip in ``feed.trips``
date : string
YYYYMMDD date string
Returns
-------
boolean
``True`` if and only if the given trip starts on the given
date.
Notes
-----
- This function is key for getting all trips, routes, etc. that are
active on a given date, so the function needs to be fast
- Assume the following feed attributes are not ``None``:
* ``feed.trips`` | Below is the the instruction that describes the task:
### Input:
Return ``True`` if the ``feed.calendar`` or ``feed.calendar_dates``
says that the trip runs on the given date; return ``False``
otherwise.
Note that a trip that starts on date d, ends after 23:59:59, and
does not start again on date d+1 is considered active on date d and
not active on date d+1.
This subtle point, which is a side effect of the GTFS, can
lead to confusion.
Parameters
----------
feed : Feed
trip_id : string
ID of a trip in ``feed.trips``
date : string
YYYYMMDD date string
Returns
-------
boolean
``True`` if and only if the given trip starts on the given
date.
Notes
-----
- This function is key for getting all trips, routes, etc. that are
active on a given date, so the function needs to be fast
- Assume the following feed attributes are not ``None``:
* ``feed.trips``
### Response:
def is_active_trip(feed: "Feed", trip_id: str, date: str) -> bool:
"""
Return ``True`` if the ``feed.calendar`` or ``feed.calendar_dates``
says that the trip runs on the given date; return ``False``
otherwise.
Note that a trip that starts on date d, ends after 23:59:59, and
does not start again on date d+1 is considered active on date d and
not active on date d+1.
This subtle point, which is a side effect of the GTFS, can
lead to confusion.
Parameters
----------
feed : Feed
trip_id : string
ID of a trip in ``feed.trips``
date : string
YYYYMMDD date string
Returns
-------
boolean
``True`` if and only if the given trip starts on the given
date.
Notes
-----
- This function is key for getting all trips, routes, etc. that are
active on a given date, so the function needs to be fast
- Assume the following feed attributes are not ``None``:
* ``feed.trips``
"""
service = feed._trips_i.at[trip_id, "service_id"]
# Check feed._calendar_dates_g.
caldg = feed._calendar_dates_g
if caldg is not None:
if (service, date) in caldg.groups:
et = caldg.get_group((service, date))["exception_type"].iat[0]
if et == 1:
return True
else:
# Exception type is 2
return False
# Check feed._calendar_i
cali = feed._calendar_i
if cali is not None:
if service in cali.index:
weekday_str = hp.weekday_to_str(hp.datestr_to_date(date).weekday())
if (
cali.at[service, "start_date"]
<= date
<= cali.at[service, "end_date"]
and cali.at[service, weekday_str] == 1
):
return True
else:
return False
# If you made it here, then something went wrong
return False |
def fetch(self, category=CATEGORY_BUG, from_date=DEFAULT_DATETIME):
"""Fetch the bugs from the repository.
The method retrieves, from a Bugzilla repository, the bugs
updated since the given date.
:param category: the category of items to fetch
:param from_date: obtain bugs updated since this date
:returns: a generator of bugs
"""
if not from_date:
from_date = DEFAULT_DATETIME
kwargs = {'from_date': from_date}
items = super().fetch(category, **kwargs)
return items | Fetch the bugs from the repository.
The method retrieves, from a Bugzilla repository, the bugs
updated since the given date.
:param category: the category of items to fetch
:param from_date: obtain bugs updated since this date
:returns: a generator of bugs | Below is the the instruction that describes the task:
### Input:
Fetch the bugs from the repository.
The method retrieves, from a Bugzilla repository, the bugs
updated since the given date.
:param category: the category of items to fetch
:param from_date: obtain bugs updated since this date
:returns: a generator of bugs
### Response:
def fetch(self, category=CATEGORY_BUG, from_date=DEFAULT_DATETIME):
"""Fetch the bugs from the repository.
The method retrieves, from a Bugzilla repository, the bugs
updated since the given date.
:param category: the category of items to fetch
:param from_date: obtain bugs updated since this date
:returns: a generator of bugs
"""
if not from_date:
from_date = DEFAULT_DATETIME
kwargs = {'from_date': from_date}
items = super().fetch(category, **kwargs)
return items |
def _make_overlay(self):
""" (Unstable) Create a new overlay that acts like a chained map: Values
missing in the overlay are copied from the source map. Both maps
share the same meta entries.
Entries that were copied from the source are called 'virtual'. You
can not delete virtual keys, but overwrite them, which turns them
into non-virtual entries. Setting keys on an overlay never affects
its source, but may affect any number of child overlays.
Other than collections.ChainMap or most other implementations, this
approach does not resolve missing keys on demand, but instead
actively copies all values from the source to the overlay and keeps
track of virtual and non-virtual keys internally. This removes any
lookup-overhead. Read-access is as fast as a build-in dict for both
virtual and non-virtual keys.
Changes are propagated recursively and depth-first. A failing
on-change handler in an overlay stops the propagation of virtual
values and may result in an partly updated tree. Take extra care
here and make sure that on-change handlers never fail.
Used by Route.config
"""
# Cleanup dead references
self._overlays[:] = [ref for ref in self._overlays if ref() is not None]
overlay = ConfigDict()
overlay._meta = self._meta
overlay._source = self
self._overlays.append(weakref.ref(overlay))
for key in self:
overlay._set_virtual(key, self[key])
return overlay | (Unstable) Create a new overlay that acts like a chained map: Values
missing in the overlay are copied from the source map. Both maps
share the same meta entries.
Entries that were copied from the source are called 'virtual'. You
can not delete virtual keys, but overwrite them, which turns them
into non-virtual entries. Setting keys on an overlay never affects
its source, but may affect any number of child overlays.
Other than collections.ChainMap or most other implementations, this
approach does not resolve missing keys on demand, but instead
actively copies all values from the source to the overlay and keeps
track of virtual and non-virtual keys internally. This removes any
lookup-overhead. Read-access is as fast as a build-in dict for both
virtual and non-virtual keys.
Changes are propagated recursively and depth-first. A failing
on-change handler in an overlay stops the propagation of virtual
values and may result in an partly updated tree. Take extra care
here and make sure that on-change handlers never fail.
Used by Route.config | Below is the the instruction that describes the task:
### Input:
(Unstable) Create a new overlay that acts like a chained map: Values
missing in the overlay are copied from the source map. Both maps
share the same meta entries.
Entries that were copied from the source are called 'virtual'. You
can not delete virtual keys, but overwrite them, which turns them
into non-virtual entries. Setting keys on an overlay never affects
its source, but may affect any number of child overlays.
Other than collections.ChainMap or most other implementations, this
approach does not resolve missing keys on demand, but instead
actively copies all values from the source to the overlay and keeps
track of virtual and non-virtual keys internally. This removes any
lookup-overhead. Read-access is as fast as a build-in dict for both
virtual and non-virtual keys.
Changes are propagated recursively and depth-first. A failing
on-change handler in an overlay stops the propagation of virtual
values and may result in an partly updated tree. Take extra care
here and make sure that on-change handlers never fail.
Used by Route.config
### Response:
def _make_overlay(self):
""" (Unstable) Create a new overlay that acts like a chained map: Values
missing in the overlay are copied from the source map. Both maps
share the same meta entries.
Entries that were copied from the source are called 'virtual'. You
can not delete virtual keys, but overwrite them, which turns them
into non-virtual entries. Setting keys on an overlay never affects
its source, but may affect any number of child overlays.
Other than collections.ChainMap or most other implementations, this
approach does not resolve missing keys on demand, but instead
actively copies all values from the source to the overlay and keeps
track of virtual and non-virtual keys internally. This removes any
lookup-overhead. Read-access is as fast as a build-in dict for both
virtual and non-virtual keys.
Changes are propagated recursively and depth-first. A failing
on-change handler in an overlay stops the propagation of virtual
values and may result in an partly updated tree. Take extra care
here and make sure that on-change handlers never fail.
Used by Route.config
"""
# Cleanup dead references
self._overlays[:] = [ref for ref in self._overlays if ref() is not None]
overlay = ConfigDict()
overlay._meta = self._meta
overlay._source = self
self._overlays.append(weakref.ref(overlay))
for key in self:
overlay._set_virtual(key, self[key])
return overlay |
def get_index(cls):
"""Gets the index for this model.
The index for this model is specified in `settings.ES_INDEXES`
which is a dict of mapping type -> index name.
By default, this uses `.get_mapping_type()` to determine the
mapping and returns the value in `settings.ES_INDEXES` for that
or ``settings.ES_INDEXES['default']``.
Override this to compute it differently.
:returns: index name to use
"""
indexes = settings.ES_INDEXES
index = indexes.get(cls.get_mapping_type_name()) or indexes['default']
if not (isinstance(index, six.string_types)):
# FIXME - not sure what to do here, but we only want one
# index and somehow this isn't one index.
index = index[0]
return index | Gets the index for this model.
The index for this model is specified in `settings.ES_INDEXES`
which is a dict of mapping type -> index name.
By default, this uses `.get_mapping_type()` to determine the
mapping and returns the value in `settings.ES_INDEXES` for that
or ``settings.ES_INDEXES['default']``.
Override this to compute it differently.
:returns: index name to use | Below is the the instruction that describes the task:
### Input:
Gets the index for this model.
The index for this model is specified in `settings.ES_INDEXES`
which is a dict of mapping type -> index name.
By default, this uses `.get_mapping_type()` to determine the
mapping and returns the value in `settings.ES_INDEXES` for that
or ``settings.ES_INDEXES['default']``.
Override this to compute it differently.
:returns: index name to use
### Response:
def get_index(cls):
"""Gets the index for this model.
The index for this model is specified in `settings.ES_INDEXES`
which is a dict of mapping type -> index name.
By default, this uses `.get_mapping_type()` to determine the
mapping and returns the value in `settings.ES_INDEXES` for that
or ``settings.ES_INDEXES['default']``.
Override this to compute it differently.
:returns: index name to use
"""
indexes = settings.ES_INDEXES
index = indexes.get(cls.get_mapping_type_name()) or indexes['default']
if not (isinstance(index, six.string_types)):
# FIXME - not sure what to do here, but we only want one
# index and somehow this isn't one index.
index = index[0]
return index |
def cleanup_bundle():
"""Deletes files used for creating bundle.
* vendored/*
* bundle.zip
"""
paths = ['./vendored', './bundle.zip']
for path in paths:
if os.path.exists(path):
log.debug("Deleting %s..." % path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path) | Deletes files used for creating bundle.
* vendored/*
* bundle.zip | Below is the the instruction that describes the task:
### Input:
Deletes files used for creating bundle.
* vendored/*
* bundle.zip
### Response:
def cleanup_bundle():
"""Deletes files used for creating bundle.
* vendored/*
* bundle.zip
"""
paths = ['./vendored', './bundle.zip']
for path in paths:
if os.path.exists(path):
log.debug("Deleting %s..." % path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path) |
def get_descendants(self):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
"""
if self.is_leaf():
return get_result_class(self.__class__).objects.none()
return self.__class__.get_tree(self).exclude(pk=self.pk) | :returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself | Below is the the instruction that describes the task:
### Input:
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
### Response:
def get_descendants(self):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
"""
if self.is_leaf():
return get_result_class(self.__class__).objects.none()
return self.__class__.get_tree(self).exclude(pk=self.pk) |
def _filter_filepaths(self, filepaths):
"""
helps iterate through all the file parsers
each filter is applied individually to the
same set of `filepaths`
"""
if self.file_filters:
plugin_filepaths = set()
for file_filter in self.file_filters:
plugin_paths = file_filter(filepaths)
plugin_filepaths.update(plugin_paths)
else:
plugin_filepaths = filepaths
return plugin_filepaths | helps iterate through all the file parsers
each filter is applied individually to the
same set of `filepaths` | Below is the the instruction that describes the task:
### Input:
helps iterate through all the file parsers
each filter is applied individually to the
same set of `filepaths`
### Response:
def _filter_filepaths(self, filepaths):
"""
helps iterate through all the file parsers
each filter is applied individually to the
same set of `filepaths`
"""
if self.file_filters:
plugin_filepaths = set()
for file_filter in self.file_filters:
plugin_paths = file_filter(filepaths)
plugin_filepaths.update(plugin_paths)
else:
plugin_filepaths = filepaths
return plugin_filepaths |
def fill(self):
'''
Writes data on internal tarfile instance, which writes to current
object, using :meth:`write`.
As this method is blocking, it is used inside a thread.
This method is called automatically, on a thread, on initialization,
so there is little need to call it manually.
'''
if self.exclude:
exclude = self.exclude
ap = functools.partial(os.path.join, self.path)
self._tarfile.add(
self.path, "",
filter=lambda info: None if exclude(ap(info.name)) else info
)
else:
self._tarfile.add(self.path, "")
self._tarfile.close() # force stream flush
self._finished += 1
if not self._result.is_set():
self._result.set() | Writes data on internal tarfile instance, which writes to current
object, using :meth:`write`.
As this method is blocking, it is used inside a thread.
This method is called automatically, on a thread, on initialization,
so there is little need to call it manually. | Below is the the instruction that describes the task:
### Input:
Writes data on internal tarfile instance, which writes to current
object, using :meth:`write`.
As this method is blocking, it is used inside a thread.
This method is called automatically, on a thread, on initialization,
so there is little need to call it manually.
### Response:
def fill(self):
'''
Writes data on internal tarfile instance, which writes to current
object, using :meth:`write`.
As this method is blocking, it is used inside a thread.
This method is called automatically, on a thread, on initialization,
so there is little need to call it manually.
'''
if self.exclude:
exclude = self.exclude
ap = functools.partial(os.path.join, self.path)
self._tarfile.add(
self.path, "",
filter=lambda info: None if exclude(ap(info.name)) else info
)
else:
self._tarfile.add(self.path, "")
self._tarfile.close() # force stream flush
self._finished += 1
if not self._result.is_set():
self._result.set() |
def _make_lock_path(self, lock_name_base):
"""
Create path to lock file with given name as base.
:param str lock_name_base: Lock file name, designed to not be prefixed
with the lock file designation, but that's permitted.
:return str: Path to the lock file.
"""
# For lock prefix validation, separate file name from other path
# components, as we care about the name prefix not path prefix.
base, name = os.path.split(lock_name_base)
lock_name = self._ensure_lock_prefix(name)
if base:
lock_name = os.path.join(base, lock_name)
return pipeline_filepath(self, filename=lock_name) | Create path to lock file with given name as base.
:param str lock_name_base: Lock file name, designed to not be prefixed
with the lock file designation, but that's permitted.
:return str: Path to the lock file. | Below is the the instruction that describes the task:
### Input:
Create path to lock file with given name as base.
:param str lock_name_base: Lock file name, designed to not be prefixed
with the lock file designation, but that's permitted.
:return str: Path to the lock file.
### Response:
def _make_lock_path(self, lock_name_base):
"""
Create path to lock file with given name as base.
:param str lock_name_base: Lock file name, designed to not be prefixed
with the lock file designation, but that's permitted.
:return str: Path to the lock file.
"""
# For lock prefix validation, separate file name from other path
# components, as we care about the name prefix not path prefix.
base, name = os.path.split(lock_name_base)
lock_name = self._ensure_lock_prefix(name)
if base:
lock_name = os.path.join(base, lock_name)
return pipeline_filepath(self, filename=lock_name) |
def is_integer(value, min=None, max=None):
"""
A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor = Validator()
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0
"""
(min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
if not isinstance(value, int_or_string_types):
raise VdtTypeError(value)
if isinstance(value, string_types):
# if it's a string - does it represent an integer ?
try:
value = int(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value | A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor = Validator()
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0 | Below is the the instruction that describes the task:
### Input:
A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor = Validator()
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0
### Response:
def is_integer(value, min=None, max=None):
"""
A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor = Validator()
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0
"""
(min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
if not isinstance(value, int_or_string_types):
raise VdtTypeError(value)
if isinstance(value, string_types):
# if it's a string - does it represent an integer ?
try:
value = int(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value |
def from_json(cls, json_obj):
"""Build a MetricResponse from JSON.
:param json_obj: JSON data representing a Cube Metric.
:type json_obj: `String` or `json`
:throws: `InvalidMetricError` when any of {type,time,data} fields are
not present in json_obj.
"""
if isinstance(json_obj, str):
json_obj = json.loads(json_obj)
time = None
value = None
if cls.TIME_FIELD_NAME in json_obj:
time = json_obj[cls.TIME_FIELD_NAME]
else:
raise InvalidMetricError("{field} must be present!".format(
field=cls.TIME_FIELD_NAME))
if cls.VALUE_FIELD_NAME in json_obj:
value = json_obj[cls.VALUE_FIELD_NAME]
return cls(time, value) | Build a MetricResponse from JSON.
:param json_obj: JSON data representing a Cube Metric.
:type json_obj: `String` or `json`
:throws: `InvalidMetricError` when any of {type,time,data} fields are
not present in json_obj. | Below is the the instruction that describes the task:
### Input:
Build a MetricResponse from JSON.
:param json_obj: JSON data representing a Cube Metric.
:type json_obj: `String` or `json`
:throws: `InvalidMetricError` when any of {type,time,data} fields are
not present in json_obj.
### Response:
def from_json(cls, json_obj):
"""Build a MetricResponse from JSON.
:param json_obj: JSON data representing a Cube Metric.
:type json_obj: `String` or `json`
:throws: `InvalidMetricError` when any of {type,time,data} fields are
not present in json_obj.
"""
if isinstance(json_obj, str):
json_obj = json.loads(json_obj)
time = None
value = None
if cls.TIME_FIELD_NAME in json_obj:
time = json_obj[cls.TIME_FIELD_NAME]
else:
raise InvalidMetricError("{field} must be present!".format(
field=cls.TIME_FIELD_NAME))
if cls.VALUE_FIELD_NAME in json_obj:
value = json_obj[cls.VALUE_FIELD_NAME]
return cls(time, value) |
def iiif_info_handler(prefix=None, identifier=None,
config=None, klass=None, auth=None, **args):
"""Handler for IIIF Image Information requests."""
if (not auth or degraded_request(identifier) or auth.info_authz()):
# go ahead with request as made
if (auth):
logging.debug("Authorized for image %s" % identifier)
i = IIIFHandler(prefix, identifier, config, klass, auth)
try:
return i.image_information_response()
except IIIFError as e:
return i.error_response(e)
elif (auth.info_authn()):
# authn but not authz -> 401
abort(401)
else:
# redirect to degraded
response = redirect(host_port_prefix(
config.host, config.port, prefix) + '/' + identifier + '-deg/info.json')
response.headers['Access-control-allow-origin'] = '*'
return response | Handler for IIIF Image Information requests. | Below is the the instruction that describes the task:
### Input:
Handler for IIIF Image Information requests.
### Response:
def iiif_info_handler(prefix=None, identifier=None,
config=None, klass=None, auth=None, **args):
"""Handler for IIIF Image Information requests."""
if (not auth or degraded_request(identifier) or auth.info_authz()):
# go ahead with request as made
if (auth):
logging.debug("Authorized for image %s" % identifier)
i = IIIFHandler(prefix, identifier, config, klass, auth)
try:
return i.image_information_response()
except IIIFError as e:
return i.error_response(e)
elif (auth.info_authn()):
# authn but not authz -> 401
abort(401)
else:
# redirect to degraded
response = redirect(host_port_prefix(
config.host, config.port, prefix) + '/' + identifier + '-deg/info.json')
response.headers['Access-control-allow-origin'] = '*'
return response |
def get_term(self,term_id):
"""
Returns the term object for the supplied identifier
@type term_id: string
@param term_id: term identifier
"""
if term_id in self.idx:
return Cterm(self.idx[term_id],self.type)
else:
return None | Returns the term object for the supplied identifier
@type term_id: string
@param term_id: term identifier | Below is the the instruction that describes the task:
### Input:
Returns the term object for the supplied identifier
@type term_id: string
@param term_id: term identifier
### Response:
def get_term(self,term_id):
"""
Returns the term object for the supplied identifier
@type term_id: string
@param term_id: term identifier
"""
if term_id in self.idx:
return Cterm(self.idx[term_id],self.type)
else:
return None |
def _get_resource_hash(zone_name, record):
"""Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique
resource IDs
Args:
zone_name (`str`): The name of the DNS Zone the record belongs to
record (`dict`): A record dict to generate the hash from
Returns:
`str`
"""
record_data = defaultdict(int, record)
if type(record_data['GeoLocation']) == dict:
record_data['GeoLocation'] = ":".join(["{}={}".format(k, v) for k, v in record_data['GeoLocation'].items()])
args = [
zone_name,
record_data['Name'],
record_data['Type'],
record_data['Weight'],
record_data['Region'],
record_data['GeoLocation'],
record_data['Failover'],
record_data['HealthCheckId'],
record_data['TrafficPolicyInstanceId']
]
return get_resource_id('r53r', args) | Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique
resource IDs
Args:
zone_name (`str`): The name of the DNS Zone the record belongs to
record (`dict`): A record dict to generate the hash from
Returns:
`str` | Below is the the instruction that describes the task:
### Input:
Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique
resource IDs
Args:
zone_name (`str`): The name of the DNS Zone the record belongs to
record (`dict`): A record dict to generate the hash from
Returns:
`str`
### Response:
def _get_resource_hash(zone_name, record):
"""Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique
resource IDs
Args:
zone_name (`str`): The name of the DNS Zone the record belongs to
record (`dict`): A record dict to generate the hash from
Returns:
`str`
"""
record_data = defaultdict(int, record)
if type(record_data['GeoLocation']) == dict:
record_data['GeoLocation'] = ":".join(["{}={}".format(k, v) for k, v in record_data['GeoLocation'].items()])
args = [
zone_name,
record_data['Name'],
record_data['Type'],
record_data['Weight'],
record_data['Region'],
record_data['GeoLocation'],
record_data['Failover'],
record_data['HealthCheckId'],
record_data['TrafficPolicyInstanceId']
]
return get_resource_id('r53r', args) |
def on_for(self, node): # ('target', 'iter', 'body', 'orelse')
"""For blocks."""
for val in self.run(node.iter):
self.node_assign(node.target, val)
self._interrupt = None
for tnode in node.body:
self.run(tnode)
if self._interrupt is not None:
break
if isinstance(self._interrupt, ast.Break):
break
else:
for tnode in node.orelse:
self.run(tnode)
self._interrupt = None | For blocks. | Below is the the instruction that describes the task:
### Input:
For blocks.
### Response:
def on_for(self, node): # ('target', 'iter', 'body', 'orelse')
"""For blocks."""
for val in self.run(node.iter):
self.node_assign(node.target, val)
self._interrupt = None
for tnode in node.body:
self.run(tnode)
if self._interrupt is not None:
break
if isinstance(self._interrupt, ast.Break):
break
else:
for tnode in node.orelse:
self.run(tnode)
self._interrupt = None |
def gaussian_prior_model_for_arguments(self, arguments):
"""
Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
prior_models: [PriorModel]
A new list of prior models with gaussian priors
"""
return CollectionPriorModel(
{
key: value.gaussian_prior_model_for_arguments(arguments)
if isinstance(value, AbstractPriorModel)
else value
for key, value in self.__dict__.items() if key not in ('component_number', 'item_number', 'id')
}
) | Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
prior_models: [PriorModel]
A new list of prior models with gaussian priors | Below is the the instruction that describes the task:
### Input:
Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
prior_models: [PriorModel]
A new list of prior models with gaussian priors
### Response:
def gaussian_prior_model_for_arguments(self, arguments):
"""
Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
prior_models: [PriorModel]
A new list of prior models with gaussian priors
"""
return CollectionPriorModel(
{
key: value.gaussian_prior_model_for_arguments(arguments)
if isinstance(value, AbstractPriorModel)
else value
for key, value in self.__dict__.items() if key not in ('component_number', 'item_number', 'id')
}
) |
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course, __ = self.get_course_and_check_rights(courseid)
return self.page(course) | GET request | Below is the the instruction that describes the task:
### Input:
GET request
### Response:
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course, __ = self.get_course_and_check_rights(courseid)
return self.page(course) |
def get_class_attributes(cls):
"""Return a generator for class attributes' names and value.
This method strict relies on the PEP 520 (Preserving Class Attribute
Definition Order), implemented on Python 3.6. So, if this behaviour
changes this whole lib can loose its functionality (since the
attributes order are a strong requirement.) For the same reason, this
lib will not work on python versions earlier than 3.6.
.. code-block:: python3
for name, value in self.get_class_attributes():
print("attribute name: {}".format(name))
print("attribute type: {}".format(value))
Returns:
generator: tuples with attribute name and value.
"""
#: see this method docstring for a important notice about the use of
#: cls.__dict__
for name, value in cls.__dict__.items():
# gets only our (kytos) attributes. this ignores methods, dunder
# methods and attributes, and common python type attributes.
if GenericStruct._is_pyof_attribute(value):
yield (name, value) | Return a generator for class attributes' names and value.
This method strict relies on the PEP 520 (Preserving Class Attribute
Definition Order), implemented on Python 3.6. So, if this behaviour
changes this whole lib can loose its functionality (since the
attributes order are a strong requirement.) For the same reason, this
lib will not work on python versions earlier than 3.6.
.. code-block:: python3
for name, value in self.get_class_attributes():
print("attribute name: {}".format(name))
print("attribute type: {}".format(value))
Returns:
generator: tuples with attribute name and value. | Below is the the instruction that describes the task:
### Input:
Return a generator for class attributes' names and value.
This method strict relies on the PEP 520 (Preserving Class Attribute
Definition Order), implemented on Python 3.6. So, if this behaviour
changes this whole lib can loose its functionality (since the
attributes order are a strong requirement.) For the same reason, this
lib will not work on python versions earlier than 3.6.
.. code-block:: python3
for name, value in self.get_class_attributes():
print("attribute name: {}".format(name))
print("attribute type: {}".format(value))
Returns:
generator: tuples with attribute name and value.
### Response:
def get_class_attributes(cls):
"""Return a generator for class attributes' names and value.
This method strict relies on the PEP 520 (Preserving Class Attribute
Definition Order), implemented on Python 3.6. So, if this behaviour
changes this whole lib can loose its functionality (since the
attributes order are a strong requirement.) For the same reason, this
lib will not work on python versions earlier than 3.6.
.. code-block:: python3
for name, value in self.get_class_attributes():
print("attribute name: {}".format(name))
print("attribute type: {}".format(value))
Returns:
generator: tuples with attribute name and value.
"""
#: see this method docstring for a important notice about the use of
#: cls.__dict__
for name, value in cls.__dict__.items():
# gets only our (kytos) attributes. this ignores methods, dunder
# methods and attributes, and common python type attributes.
if GenericStruct._is_pyof_attribute(value):
yield (name, value) |
def set_title(self):
"""Parses title and set value."""
try:
self.title = self.soup.find('title').string
except AttributeError:
self.title = None | Parses title and set value. | Below is the the instruction that describes the task:
### Input:
Parses title and set value.
### Response:
def set_title(self):
"""Parses title and set value."""
try:
self.title = self.soup.find('title').string
except AttributeError:
self.title = None |
def getMonthlyPerformance():
'''
This function does the work of compiling monthly performance data
that can either be rendered as CSV or as JSON
'''
when_all = {
'eventregistration__dropIn': False,
'eventregistration__cancelled': False,
}
# Get objects at the Series level so that we can calculate StudentHours
series_counts = list(Series.objects.annotate(
eventregistrations=Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField())),)
.values('year','month','eventregistrations','duration'))
for series in series_counts:
series['studenthours'] = (series.get('eventregistrations') or 0) * (series.get('duration') or 0)
all_years = set([x['year'] for x in series_counts])
dataseries_list = ['EventRegistrations', 'Registrations','Hours','StudentHours','AvgStudents']
yearTotals = {}
# Initialize dictionaries
for dataseries in dataseries_list:
yearTotals[dataseries] = {'MonthlyAverage': {}}
for year in all_years:
yearTotals[dataseries][year] = {}
# Fill in by year and month for a cleaner looping process
for year in all_years:
# Monthly Totals
for month in range(1,13):
# Total EventRegistrations per month is retrieved by the query above.
yearTotals['EventRegistrations'][year][month] = sum([x['eventregistrations'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])
# Total Registrations per month and hours per month require a separate query for each month
yearTotals['Registrations'][year][month] = len(Registration.objects.filter(eventregistration__dropIn=False, eventregistration__cancelled=False,eventregistration__event__year=year,eventregistration__event__month=month).distinct())
yearTotals['Hours'][year][month] = sum([x['duration'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])
yearTotals['StudentHours'][year][month] = sum([x['studenthours'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])
if yearTotals['Hours'][year][month] > 0:
yearTotals['AvgStudents'][year][month] = yearTotals['StudentHours'][year][month] / float(yearTotals['Hours'][year][month])
else:
yearTotals['AvgStudents'][year][month] = 0
# Annual Totals
for sub_series in ['EventRegistrations','Registrations','Hours','StudentHours']:
yearTotals[sub_series][year]['Total'] = sum([x for x in yearTotals[sub_series][year].values()])
# Annual (Monthly) Averages
month_count = len([x for k,x in yearTotals['Hours'][year].items() if k in range(1,13) and x > 0])
if month_count > 0:
for sub_series in ['EventRegistrations','Registrations','Hours','StudentHours']:
yearTotals[sub_series][year]['Average'] = yearTotals[sub_series][year]['Total'] / float(month_count)
yearTotals['AvgStudents'][year]['Average'] = yearTotals['StudentHours'][year]['Total'] / float(yearTotals['Hours'][year]['Total'])
# Monthly Averages
for month in range(1,13):
yearly_hours_data = [x[month] for k,x in yearTotals['Hours'].items() if k in all_years and x[month] > 0]
yearly_studenthours_data = [x[month] for k,x in yearTotals['StudentHours'].items() if k in all_years and x[month] > 0]
yearly_eventregistrations_data = [x[month] for k,x in yearTotals['EventRegistrations'].items() if k in all_years and yearTotals['Hours'][k][month] > 0]
yearly_registrations_data = [x[month] for k,x in yearTotals['Registrations'].items() if k in all_years and yearTotals['Hours'][k][month] > 0]
year_count = len(yearly_hours_data)
if year_count > 0:
yearTotals['EventRegistrations']['MonthlyAverage'][month] = sum([x for x in yearly_eventregistrations_data]) / year_count
yearTotals['Registrations']['MonthlyAverage'][month] = sum([x for x in yearly_registrations_data]) / year_count
yearTotals['Hours']['MonthlyAverage'][month] = sum([x for x in yearly_hours_data]) / year_count
yearTotals['StudentHours']['MonthlyAverage'][month] = sum([x for x in yearly_studenthours_data]) / year_count
yearTotals['AvgStudents']['MonthlyAverage'][month] = yearTotals['StudentHours']['MonthlyAverage'][month] / float(yearTotals['Hours']['MonthlyAverage'][month])
return yearTotals | This function does the work of compiling monthly performance data
that can either be rendered as CSV or as JSON | Below is the the instruction that describes the task:
### Input:
This function does the work of compiling monthly performance data
that can either be rendered as CSV or as JSON
### Response:
def getMonthlyPerformance():
'''
This function does the work of compiling monthly performance data
that can either be rendered as CSV or as JSON
'''
when_all = {
'eventregistration__dropIn': False,
'eventregistration__cancelled': False,
}
# Get objects at the Series level so that we can calculate StudentHours
series_counts = list(Series.objects.annotate(
eventregistrations=Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField())),)
.values('year','month','eventregistrations','duration'))
for series in series_counts:
series['studenthours'] = (series.get('eventregistrations') or 0) * (series.get('duration') or 0)
all_years = set([x['year'] for x in series_counts])
dataseries_list = ['EventRegistrations', 'Registrations','Hours','StudentHours','AvgStudents']
yearTotals = {}
# Initialize dictionaries
for dataseries in dataseries_list:
yearTotals[dataseries] = {'MonthlyAverage': {}}
for year in all_years:
yearTotals[dataseries][year] = {}
# Fill in by year and month for a cleaner looping process
for year in all_years:
# Monthly Totals
for month in range(1,13):
# Total EventRegistrations per month is retrieved by the query above.
yearTotals['EventRegistrations'][year][month] = sum([x['eventregistrations'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])
# Total Registrations per month and hours per month require a separate query for each month
yearTotals['Registrations'][year][month] = len(Registration.objects.filter(eventregistration__dropIn=False, eventregistration__cancelled=False,eventregistration__event__year=year,eventregistration__event__month=month).distinct())
yearTotals['Hours'][year][month] = sum([x['duration'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])
yearTotals['StudentHours'][year][month] = sum([x['studenthours'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])
if yearTotals['Hours'][year][month] > 0:
yearTotals['AvgStudents'][year][month] = yearTotals['StudentHours'][year][month] / float(yearTotals['Hours'][year][month])
else:
yearTotals['AvgStudents'][year][month] = 0
# Annual Totals
for sub_series in ['EventRegistrations','Registrations','Hours','StudentHours']:
yearTotals[sub_series][year]['Total'] = sum([x for x in yearTotals[sub_series][year].values()])
# Annual (Monthly) Averages
month_count = len([x for k,x in yearTotals['Hours'][year].items() if k in range(1,13) and x > 0])
if month_count > 0:
for sub_series in ['EventRegistrations','Registrations','Hours','StudentHours']:
yearTotals[sub_series][year]['Average'] = yearTotals[sub_series][year]['Total'] / float(month_count)
yearTotals['AvgStudents'][year]['Average'] = yearTotals['StudentHours'][year]['Total'] / float(yearTotals['Hours'][year]['Total'])
# Monthly Averages
for month in range(1,13):
yearly_hours_data = [x[month] for k,x in yearTotals['Hours'].items() if k in all_years and x[month] > 0]
yearly_studenthours_data = [x[month] for k,x in yearTotals['StudentHours'].items() if k in all_years and x[month] > 0]
yearly_eventregistrations_data = [x[month] for k,x in yearTotals['EventRegistrations'].items() if k in all_years and yearTotals['Hours'][k][month] > 0]
yearly_registrations_data = [x[month] for k,x in yearTotals['Registrations'].items() if k in all_years and yearTotals['Hours'][k][month] > 0]
year_count = len(yearly_hours_data)
if year_count > 0:
yearTotals['EventRegistrations']['MonthlyAverage'][month] = sum([x for x in yearly_eventregistrations_data]) / year_count
yearTotals['Registrations']['MonthlyAverage'][month] = sum([x for x in yearly_registrations_data]) / year_count
yearTotals['Hours']['MonthlyAverage'][month] = sum([x for x in yearly_hours_data]) / year_count
yearTotals['StudentHours']['MonthlyAverage'][month] = sum([x for x in yearly_studenthours_data]) / year_count
yearTotals['AvgStudents']['MonthlyAverage'][month] = yearTotals['StudentHours']['MonthlyAverage'][month] / float(yearTotals['Hours']['MonthlyAverage'][month])
return yearTotals |
def _get_headers(self):
"""
assumes comment have been stripped with extract
:return:
"""
header = self.lines[0]
self.lines = self.lines[1:]
self.headers = \
[self.clean(h) for h in header.split(self.seperator)]
if self.is_strip:
self.headers = self.headers[1:-1]
return self.headers | assumes comment have been stripped with extract
:return: | Below is the the instruction that describes the task:
### Input:
assumes comment have been stripped with extract
:return:
### Response:
def _get_headers(self):
"""
assumes comment have been stripped with extract
:return:
"""
header = self.lines[0]
self.lines = self.lines[1:]
self.headers = \
[self.clean(h) for h in header.split(self.seperator)]
if self.is_strip:
self.headers = self.headers[1:-1]
return self.headers |
def remove_dashboard_tag(self, id, tag_value, **kwargs): # noqa: E501
"""Remove a tag from a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_dashboard_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_dashboard_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
else:
(data) = self.remove_dashboard_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
return data | Remove a tag from a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_dashboard_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Remove a tag from a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_dashboard_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
### Response:
def remove_dashboard_tag(self, id, tag_value, **kwargs): # noqa: E501
"""Remove a tag from a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_dashboard_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_dashboard_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
else:
(data) = self.remove_dashboard_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
return data |
def obtain_band_edges(self):
'''
Fill up the atomic orbitals with available electrons.
Return HOMO, LUMO, and whether it's a metal.
'''
orbitals = self.aos_as_list()
electrons = Composition(self.composition).total_electrons
partial_filled = []
for orbital in orbitals:
if electrons <= 0:
break
if 's' in orbital[1]:
electrons += -2
elif 'p' in orbital[1]:
electrons += -6
elif 'd' in orbital[1]:
electrons += -10
elif 'f' in orbital[1]:
electrons += -14
partial_filled.append(orbital)
if electrons != 0:
homo = partial_filled[-1]
lumo = partial_filled[-1]
else:
homo = partial_filled[-1]
try:
lumo = orbitals[len(partial_filled)]
except:
lumo = None
if homo == lumo:
metal = True
else:
metal = False
return {'HOMO': homo, 'LUMO': lumo, 'metal': metal} | Fill up the atomic orbitals with available electrons.
Return HOMO, LUMO, and whether it's a metal. | Below is the the instruction that describes the task:
### Input:
Fill up the atomic orbitals with available electrons.
Return HOMO, LUMO, and whether it's a metal.
### Response:
def obtain_band_edges(self):
'''
Fill up the atomic orbitals with available electrons.
Return HOMO, LUMO, and whether it's a metal.
'''
orbitals = self.aos_as_list()
electrons = Composition(self.composition).total_electrons
partial_filled = []
for orbital in orbitals:
if electrons <= 0:
break
if 's' in orbital[1]:
electrons += -2
elif 'p' in orbital[1]:
electrons += -6
elif 'd' in orbital[1]:
electrons += -10
elif 'f' in orbital[1]:
electrons += -14
partial_filled.append(orbital)
if electrons != 0:
homo = partial_filled[-1]
lumo = partial_filled[-1]
else:
homo = partial_filled[-1]
try:
lumo = orbitals[len(partial_filled)]
except:
lumo = None
if homo == lumo:
metal = True
else:
metal = False
return {'HOMO': homo, 'LUMO': lumo, 'metal': metal} |
def values_list(self, *args, **kwargs):
"""Return the primary keys as a list.
The only valid call is values_list('pk', flat=True)
"""
flat = kwargs.pop('flat', False)
assert flat is True
assert len(args) == 1
assert args[0] == self.model._meta.pk.name
return self.pks | Return the primary keys as a list.
The only valid call is values_list('pk', flat=True) | Below is the the instruction that describes the task:
### Input:
Return the primary keys as a list.
The only valid call is values_list('pk', flat=True)
### Response:
def values_list(self, *args, **kwargs):
"""Return the primary keys as a list.
The only valid call is values_list('pk', flat=True)
"""
flat = kwargs.pop('flat', False)
assert flat is True
assert len(args) == 1
assert args[0] == self.model._meta.pk.name
return self.pks |
def get_class_properties(self, dev_class, class_prop):
"""
get_class_properties(self, dev_class, class_prop) -> None
Returns the class properties
Parameters :
- dev_class : (DeviceClass) the DeviceClass object
- class_prop : [in, out] (dict<str, None>) the property names. Will be filled
with property values
Return : None"""
# initialize default values
if class_prop == {} or not Util._UseDb:
return
# call database to get properties
props = self.db.get_class_property(dev_class.get_name(), list(class_prop.keys()))
# if value defined in database, store it
for name in class_prop:
if props[name]:
type = self.get_property_type(name, class_prop)
values = self.stringArray2values(props[name], type)
self.set_property_values(name, class_prop, values)
else:
print(name + " property NOT found in database") | get_class_properties(self, dev_class, class_prop) -> None
Returns the class properties
Parameters :
- dev_class : (DeviceClass) the DeviceClass object
- class_prop : [in, out] (dict<str, None>) the property names. Will be filled
with property values
Return : None | Below is the the instruction that describes the task:
### Input:
get_class_properties(self, dev_class, class_prop) -> None
Returns the class properties
Parameters :
- dev_class : (DeviceClass) the DeviceClass object
- class_prop : [in, out] (dict<str, None>) the property names. Will be filled
with property values
Return : None
### Response:
def get_class_properties(self, dev_class, class_prop):
"""
get_class_properties(self, dev_class, class_prop) -> None
Returns the class properties
Parameters :
- dev_class : (DeviceClass) the DeviceClass object
- class_prop : [in, out] (dict<str, None>) the property names. Will be filled
with property values
Return : None"""
# initialize default values
if class_prop == {} or not Util._UseDb:
return
# call database to get properties
props = self.db.get_class_property(dev_class.get_name(), list(class_prop.keys()))
# if value defined in database, store it
for name in class_prop:
if props[name]:
type = self.get_property_type(name, class_prop)
values = self.stringArray2values(props[name], type)
self.set_property_values(name, class_prop, values)
else:
print(name + " property NOT found in database") |
def attach(self, engine, start=Events.STARTED, pause=Events.COMPLETED, resume=None, step=None):
""" Register callbacks to control the timer.
Args:
engine (Engine):
Engine that this timer will be attached to.
start (Events):
Event which should start (reset) the timer.
pause (Events):
Event which should pause the timer.
resume (Events, optional):
Event which should resume the timer.
step (Events, optional):
Event which should call the `step` method of the counter.
Returns:
self (Timer)
"""
engine.add_event_handler(start, self.reset)
engine.add_event_handler(pause, self.pause)
if resume is not None:
engine.add_event_handler(resume, self.resume)
if step is not None:
engine.add_event_handler(step, self.step)
return self | Register callbacks to control the timer.
Args:
engine (Engine):
Engine that this timer will be attached to.
start (Events):
Event which should start (reset) the timer.
pause (Events):
Event which should pause the timer.
resume (Events, optional):
Event which should resume the timer.
step (Events, optional):
Event which should call the `step` method of the counter.
Returns:
self (Timer) | Below is the the instruction that describes the task:
### Input:
Register callbacks to control the timer.
Args:
engine (Engine):
Engine that this timer will be attached to.
start (Events):
Event which should start (reset) the timer.
pause (Events):
Event which should pause the timer.
resume (Events, optional):
Event which should resume the timer.
step (Events, optional):
Event which should call the `step` method of the counter.
Returns:
self (Timer)
### Response:
def attach(self, engine, start=Events.STARTED, pause=Events.COMPLETED, resume=None, step=None):
""" Register callbacks to control the timer.
Args:
engine (Engine):
Engine that this timer will be attached to.
start (Events):
Event which should start (reset) the timer.
pause (Events):
Event which should pause the timer.
resume (Events, optional):
Event which should resume the timer.
step (Events, optional):
Event which should call the `step` method of the counter.
Returns:
self (Timer)
"""
engine.add_event_handler(start, self.reset)
engine.add_event_handler(pause, self.pause)
if resume is not None:
engine.add_event_handler(resume, self.resume)
if step is not None:
engine.add_event_handler(step, self.step)
return self |
def tiles_from_bbox(self, geometry, zoom):
"""
All metatiles intersecting with given bounding box.
- geometry: shapely geometry
- zoom: zoom level
"""
validate_zoom(zoom)
return self.tiles_from_bounds(geometry.bounds, zoom) | All metatiles intersecting with given bounding box.
- geometry: shapely geometry
- zoom: zoom level | Below is the the instruction that describes the task:
### Input:
All metatiles intersecting with given bounding box.
- geometry: shapely geometry
- zoom: zoom level
### Response:
def tiles_from_bbox(self, geometry, zoom):
"""
All metatiles intersecting with given bounding box.
- geometry: shapely geometry
- zoom: zoom level
"""
validate_zoom(zoom)
return self.tiles_from_bounds(geometry.bounds, zoom) |
def changes(self, **kwargs):
"""List the merge request changes.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: List of changes
"""
path = '%s/%s/changes' % (self.manager.path, self.get_id())
return self.manager.gitlab.http_get(path, **kwargs) | List the merge request changes.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: List of changes | Below is the the instruction that describes the task:
### Input:
List the merge request changes.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: List of changes
### Response:
def changes(self, **kwargs):
"""List the merge request changes.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: List of changes
"""
path = '%s/%s/changes' % (self.manager.path, self.get_id())
return self.manager.gitlab.http_get(path, **kwargs) |
def count(self):
'''Estimate the cardinality count based on the technique described in
`this paper <http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694>`_.
Returns:
int: The estimated cardinality of the set represented by this MinHash.
'''
k = len(self)
return np.float(k) / np.sum(self.hashvalues / np.float(_max_hash)) - 1.0 | Estimate the cardinality count based on the technique described in
`this paper <http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694>`_.
Returns:
int: The estimated cardinality of the set represented by this MinHash. | Below is the the instruction that describes the task:
### Input:
Estimate the cardinality count based on the technique described in
`this paper <http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694>`_.
Returns:
int: The estimated cardinality of the set represented by this MinHash.
### Response:
def count(self):
'''Estimate the cardinality count based on the technique described in
`this paper <http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694>`_.
Returns:
int: The estimated cardinality of the set represented by this MinHash.
'''
k = len(self)
return np.float(k) / np.sum(self.hashvalues / np.float(_max_hash)) - 1.0 |
def drive(self):
"""Get wrapper to the drive containing this device."""
if self.is_drive:
return self
cleartext = self.luks_cleartext_slave
if cleartext:
return cleartext.drive
if self.is_block:
return self._daemon[self._P.Block.Drive]
return None | Get wrapper to the drive containing this device. | Below is the the instruction that describes the task:
### Input:
Get wrapper to the drive containing this device.
### Response:
def drive(self):
"""Get wrapper to the drive containing this device."""
if self.is_drive:
return self
cleartext = self.luks_cleartext_slave
if cleartext:
return cleartext.drive
if self.is_block:
return self._daemon[self._P.Block.Drive]
return None |
def solve_value(self, value, resource):
"""Solve a resource with a value, without coercing.
Arguments
---------
value : ?
A value to solve in combination with the given resource. The first filter of the
resource will be applied on this value (next filters on the result of the previous
filter).
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` to solve with the given value.
Returns
-------
The result of all filters applied on the value for the first filter, and result of the
previous filter for next filters.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, allow_class=True)
>>> registry.register(str)
>>> class MySolver(Solver):
... def coerce(self, value, resource): return value
>>> solver = MySolver(registry)
>>> from dataql.resources import Filter, NamedArg, PosArg, SliceFilter
>>> field = Field(None,
... filters=[
... Filter(name='fromtimestamp', args=[PosArg(1433109600)]),
... Filter(name='replace', args=[NamedArg('year', '=', 2014)]),
... Filter(name='strftime', args=[PosArg('%F')]),
... Filter(name='replace', args=[PosArg('2014'), PosArg('2015')]),
... ]
... )
>>> solver.solve_value(date, field)
'2015-06-01'
>>> solver.solve_value(None, field)
>>> d = {'foo': {'date': date(2015, 6, 1)}, 'bar': {'date': None}, 'baz': [{'date': None}]}
>>> registry.register(dict)
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='foo'),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
'2015-06-01'
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='bar'),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='baz'),
... SliceFilter(0),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
# Example of how to raise a ``CannotSolve`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise CannotSolve(solver, Field('fromtimestamp'), date) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...CannotSolve: Solver `<MySolver>` was not able to solve...`<Field[fromtimestamp]>`.
"""
# The given value is the starting point on which we apply the first filter.
result = value
# Apply filters one by one on the previous result.
if result is not None:
for filter_ in resource.filters:
result = self.registry.solve_filter(result, filter_)
if result is None:
break
return result | Solve a resource with a value, without coercing.
Arguments
---------
value : ?
A value to solve in combination with the given resource. The first filter of the
resource will be applied on this value (next filters on the result of the previous
filter).
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` to solve with the given value.
Returns
-------
The result of all filters applied on the value for the first filter, and result of the
previous filter for next filters.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, allow_class=True)
>>> registry.register(str)
>>> class MySolver(Solver):
... def coerce(self, value, resource): return value
>>> solver = MySolver(registry)
>>> from dataql.resources import Filter, NamedArg, PosArg, SliceFilter
>>> field = Field(None,
... filters=[
... Filter(name='fromtimestamp', args=[PosArg(1433109600)]),
... Filter(name='replace', args=[NamedArg('year', '=', 2014)]),
... Filter(name='strftime', args=[PosArg('%F')]),
... Filter(name='replace', args=[PosArg('2014'), PosArg('2015')]),
... ]
... )
>>> solver.solve_value(date, field)
'2015-06-01'
>>> solver.solve_value(None, field)
>>> d = {'foo': {'date': date(2015, 6, 1)}, 'bar': {'date': None}, 'baz': [{'date': None}]}
>>> registry.register(dict)
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='foo'),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
'2015-06-01'
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='bar'),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='baz'),
... SliceFilter(0),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
# Example of how to raise a ``CannotSolve`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise CannotSolve(solver, Field('fromtimestamp'), date) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...CannotSolve: Solver `<MySolver>` was not able to solve...`<Field[fromtimestamp]>`. | Below is the the instruction that describes the task:
### Input:
Solve a resource with a value, without coercing.
Arguments
---------
value : ?
A value to solve in combination with the given resource. The first filter of the
resource will be applied on this value (next filters on the result of the previous
filter).
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` to solve with the given value.
Returns
-------
The result of all filters applied on the value for the first filter, and result of the
previous filter for next filters.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, allow_class=True)
>>> registry.register(str)
>>> class MySolver(Solver):
... def coerce(self, value, resource): return value
>>> solver = MySolver(registry)
>>> from dataql.resources import Filter, NamedArg, PosArg, SliceFilter
>>> field = Field(None,
... filters=[
... Filter(name='fromtimestamp', args=[PosArg(1433109600)]),
... Filter(name='replace', args=[NamedArg('year', '=', 2014)]),
... Filter(name='strftime', args=[PosArg('%F')]),
... Filter(name='replace', args=[PosArg('2014'), PosArg('2015')]),
... ]
... )
>>> solver.solve_value(date, field)
'2015-06-01'
>>> solver.solve_value(None, field)
>>> d = {'foo': {'date': date(2015, 6, 1)}, 'bar': {'date': None}, 'baz': [{'date': None}]}
>>> registry.register(dict)
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='foo'),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
'2015-06-01'
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='bar'),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='baz'),
... SliceFilter(0),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
# Example of how to raise a ``CannotSolve`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise CannotSolve(solver, Field('fromtimestamp'), date) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...CannotSolve: Solver `<MySolver>` was not able to solve...`<Field[fromtimestamp]>`.
### Response:
def solve_value(self, value, resource):
"""Solve a resource with a value, without coercing.
Arguments
---------
value : ?
A value to solve in combination with the given resource. The first filter of the
resource will be applied on this value (next filters on the result of the previous
filter).
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` to solve with the given value.
Returns
-------
The result of all filters applied on the value for the first filter, and result of the
previous filter for next filters.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, allow_class=True)
>>> registry.register(str)
>>> class MySolver(Solver):
... def coerce(self, value, resource): return value
>>> solver = MySolver(registry)
>>> from dataql.resources import Filter, NamedArg, PosArg, SliceFilter
>>> field = Field(None,
... filters=[
... Filter(name='fromtimestamp', args=[PosArg(1433109600)]),
... Filter(name='replace', args=[NamedArg('year', '=', 2014)]),
... Filter(name='strftime', args=[PosArg('%F')]),
... Filter(name='replace', args=[PosArg('2014'), PosArg('2015')]),
... ]
... )
>>> solver.solve_value(date, field)
'2015-06-01'
>>> solver.solve_value(None, field)
>>> d = {'foo': {'date': date(2015, 6, 1)}, 'bar': {'date': None}, 'baz': [{'date': None}]}
>>> registry.register(dict)
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='foo'),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
'2015-06-01'
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='bar'),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='baz'),
... SliceFilter(0),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
# Example of how to raise a ``CannotSolve`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise CannotSolve(solver, Field('fromtimestamp'), date) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...CannotSolve: Solver `<MySolver>` was not able to solve...`<Field[fromtimestamp]>`.
"""
# The given value is the starting point on which we apply the first filter.
result = value
# Apply filters one by one on the previous result.
if result is not None:
for filter_ in resource.filters:
result = self.registry.solve_filter(result, filter_)
if result is None:
break
return result |
def register_area(self, area_code, index, userdata):
"""Shares a memory area with the server. That memory block will be
visible by the clients.
"""
size = ctypes.sizeof(userdata)
logger.info("registering area %s, index %s, size %s" % (area_code,
index, size))
size = ctypes.sizeof(userdata)
return self.library.Srv_RegisterArea(self.pointer, area_code, index,
ctypes.byref(userdata), size) | Shares a memory area with the server. That memory block will be
visible by the clients. | Below is the the instruction that describes the task:
### Input:
Shares a memory area with the server. That memory block will be
visible by the clients.
### Response:
def register_area(self, area_code, index, userdata):
"""Shares a memory area with the server. That memory block will be
visible by the clients.
"""
size = ctypes.sizeof(userdata)
logger.info("registering area %s, index %s, size %s" % (area_code,
index, size))
size = ctypes.sizeof(userdata)
return self.library.Srv_RegisterArea(self.pointer, area_code, index,
ctypes.byref(userdata), size) |
def make_email(from_addr: str,
date: str = None,
sender: str = "",
reply_to: Union[str, List[str]] = "",
to: Union[str, List[str]] = "",
cc: Union[str, List[str]] = "",
bcc: Union[str, List[str]] = "",
subject: str = "",
body: str = "",
content_type: str = CONTENT_TYPE_TEXT,
charset: str = "utf8",
attachment_filenames: Sequence[str] = None,
attachment_binaries: Sequence[bytes] = None,
attachment_binary_filenames: Sequence[str] = None,
verbose: bool = False) -> email.mime.multipart.MIMEMultipart:
"""
Makes an e-mail message.
Arguments that can be multiple e-mail addresses are (a) a single e-mail
address as a string, or (b) a list of strings (each a single e-mail
address), or (c) a comma-separated list of multiple e-mail addresses.
Args:
from_addr: name of the sender for the "From:" field
date: e-mail date in RFC 2822 format, or ``None`` for "now"
sender: name of the sender for the "Sender:" field
reply_to: name of the sender for the "Reply-To:" field
to: e-mail address(es) of the recipients for "To:" field
cc: e-mail address(es) of the recipients for "Cc:" field
bcc: e-mail address(es) of the recipients for "Bcc:" field
subject: e-mail subject
body: e-mail body
content_type: MIME type for body content, default ``text/plain``
charset: character set for body; default ``utf8``
attachment_filenames: filenames of attachments to add
attachment_binaries: binary objects to add as attachments
attachment_binary_filenames: filenames corresponding to
``attachment_binaries``
verbose: be verbose?
Returns:
a :class:`email.mime.multipart.MIMEMultipart`
Raises:
:exc:`AssertionError`, :exc:`ValueError`
"""
def _csv_list_to_list(x: str) -> List[str]:
stripped = [item.strip() for item in x.split(COMMA)]
return [item for item in stripped if item]
def _assert_nocomma(x: Union[str, List[str]]) -> None:
if isinstance(x, str):
x = [x]
for _addr in x:
assert COMMA not in _addr, (
"Commas not allowed in e-mail addresses: {!r}".format(_addr)
)
# -------------------------------------------------------------------------
# Arguments
# -------------------------------------------------------------------------
if not date:
date = email.utils.formatdate(localtime=True)
assert isinstance(from_addr, str), (
"'From:' can only be a single address "
"(for Python sendmail, not RFC 2822); was {!r}".format(from_addr)
)
_assert_nocomma(from_addr)
assert isinstance(sender, str), (
"'Sender:' can only be a single address; was {!r}".format(sender)
)
_assert_nocomma(sender)
if isinstance(reply_to, str):
reply_to = [reply_to] if reply_to else [] # type: List[str]
_assert_nocomma(reply_to)
if isinstance(to, str):
to = _csv_list_to_list(to)
if isinstance(cc, str):
cc = _csv_list_to_list(cc)
if isinstance(bcc, str):
bcc = _csv_list_to_list(bcc)
assert to or cc or bcc, "No recipients (must have some of: To, Cc, Bcc)"
_assert_nocomma(to)
_assert_nocomma(cc)
_assert_nocomma(bcc)
attachment_filenames = attachment_filenames or [] # type: List[str]
assert all(attachment_filenames), (
"Missing attachment filenames: {!r}".format(attachment_filenames)
)
attachment_binaries = attachment_binaries or [] # type: List[bytes]
attachment_binary_filenames = attachment_binary_filenames or [] # type: List[str] # noqa
assert len(attachment_binaries) == len(attachment_binary_filenames), (
"If you specify attachment_binaries or attachment_binary_filenames, "
"they must be iterables of the same length."
)
assert all(attachment_binary_filenames), (
"Missing filenames for attached binaries: {!r}".format(
attachment_binary_filenames)
)
# -------------------------------------------------------------------------
# Make message
# -------------------------------------------------------------------------
msg = email.mime.multipart.MIMEMultipart()
# Headers: mandatory
msg["From"] = from_addr
msg["Date"] = date
msg["Subject"] = subject
# Headers: optional
if sender:
msg["Sender"] = sender # Single only, not a list
if reply_to:
msg["Reply-To"] = COMMASPACE.join(reply_to)
if to:
msg["To"] = COMMASPACE.join(to)
if cc:
msg["Cc"] = COMMASPACE.join(cc)
if bcc:
msg["Bcc"] = COMMASPACE.join(bcc)
# Body
if content_type == CONTENT_TYPE_TEXT:
msgbody = email.mime.text.MIMEText(body, "plain", charset)
elif content_type == CONTENT_TYPE_HTML:
msgbody = email.mime.text.MIMEText(body, "html", charset)
else:
raise ValueError("unknown content_type")
msg.attach(msgbody)
# Attachments
# noinspection PyPep8,PyBroadException
try:
if attachment_filenames:
# -----------------------------------------------------------------
# Attach things by filename
# -----------------------------------------------------------------
if verbose:
log.debug("attachment_filenames: {}", attachment_filenames)
# noinspection PyTypeChecker
for f in attachment_filenames:
part = email.mime.base.MIMEBase("application", "octet-stream")
part.set_payload(open(f, "rb").read())
email.encoders.encode_base64(part)
part.add_header(
'Content-Disposition',
'attachment; filename="%s"' % os.path.basename(f)
)
msg.attach(part)
if attachment_binaries:
# -----------------------------------------------------------------
# Binary attachments, which have a notional filename
# -----------------------------------------------------------------
if verbose:
log.debug("attachment_binary_filenames: {}",
attachment_binary_filenames)
for i in range(len(attachment_binaries)):
blob = attachment_binaries[i]
filename = attachment_binary_filenames[i]
part = email.mime.base.MIMEBase("application", "octet-stream")
part.set_payload(blob)
email.encoders.encode_base64(part)
part.add_header(
'Content-Disposition',
'attachment; filename="%s"' % filename)
msg.attach(part)
except Exception as e:
raise ValueError("send_email: Failed to attach files: {}".format(e))
return msg | Makes an e-mail message.
Arguments that can be multiple e-mail addresses are (a) a single e-mail
address as a string, or (b) a list of strings (each a single e-mail
address), or (c) a comma-separated list of multiple e-mail addresses.
Args:
from_addr: name of the sender for the "From:" field
date: e-mail date in RFC 2822 format, or ``None`` for "now"
sender: name of the sender for the "Sender:" field
reply_to: name of the sender for the "Reply-To:" field
to: e-mail address(es) of the recipients for "To:" field
cc: e-mail address(es) of the recipients for "Cc:" field
bcc: e-mail address(es) of the recipients for "Bcc:" field
subject: e-mail subject
body: e-mail body
content_type: MIME type for body content, default ``text/plain``
charset: character set for body; default ``utf8``
attachment_filenames: filenames of attachments to add
attachment_binaries: binary objects to add as attachments
attachment_binary_filenames: filenames corresponding to
``attachment_binaries``
verbose: be verbose?
Returns:
a :class:`email.mime.multipart.MIMEMultipart`
Raises:
:exc:`AssertionError`, :exc:`ValueError` | Below is the the instruction that describes the task:
### Input:
Makes an e-mail message.
Arguments that can be multiple e-mail addresses are (a) a single e-mail
address as a string, or (b) a list of strings (each a single e-mail
address), or (c) a comma-separated list of multiple e-mail addresses.
Args:
from_addr: name of the sender for the "From:" field
date: e-mail date in RFC 2822 format, or ``None`` for "now"
sender: name of the sender for the "Sender:" field
reply_to: name of the sender for the "Reply-To:" field
to: e-mail address(es) of the recipients for "To:" field
cc: e-mail address(es) of the recipients for "Cc:" field
bcc: e-mail address(es) of the recipients for "Bcc:" field
subject: e-mail subject
body: e-mail body
content_type: MIME type for body content, default ``text/plain``
charset: character set for body; default ``utf8``
attachment_filenames: filenames of attachments to add
attachment_binaries: binary objects to add as attachments
attachment_binary_filenames: filenames corresponding to
``attachment_binaries``
verbose: be verbose?
Returns:
a :class:`email.mime.multipart.MIMEMultipart`
Raises:
:exc:`AssertionError`, :exc:`ValueError`
### Response:
def make_email(from_addr: str,
date: str = None,
sender: str = "",
reply_to: Union[str, List[str]] = "",
to: Union[str, List[str]] = "",
cc: Union[str, List[str]] = "",
bcc: Union[str, List[str]] = "",
subject: str = "",
body: str = "",
content_type: str = CONTENT_TYPE_TEXT,
charset: str = "utf8",
attachment_filenames: Sequence[str] = None,
attachment_binaries: Sequence[bytes] = None,
attachment_binary_filenames: Sequence[str] = None,
verbose: bool = False) -> email.mime.multipart.MIMEMultipart:
"""
Makes an e-mail message.
Arguments that can be multiple e-mail addresses are (a) a single e-mail
address as a string, or (b) a list of strings (each a single e-mail
address), or (c) a comma-separated list of multiple e-mail addresses.
Args:
from_addr: name of the sender for the "From:" field
date: e-mail date in RFC 2822 format, or ``None`` for "now"
sender: name of the sender for the "Sender:" field
reply_to: name of the sender for the "Reply-To:" field
to: e-mail address(es) of the recipients for "To:" field
cc: e-mail address(es) of the recipients for "Cc:" field
bcc: e-mail address(es) of the recipients for "Bcc:" field
subject: e-mail subject
body: e-mail body
content_type: MIME type for body content, default ``text/plain``
charset: character set for body; default ``utf8``
attachment_filenames: filenames of attachments to add
attachment_binaries: binary objects to add as attachments
attachment_binary_filenames: filenames corresponding to
``attachment_binaries``
verbose: be verbose?
Returns:
a :class:`email.mime.multipart.MIMEMultipart`
Raises:
:exc:`AssertionError`, :exc:`ValueError`
"""
def _csv_list_to_list(x: str) -> List[str]:
stripped = [item.strip() for item in x.split(COMMA)]
return [item for item in stripped if item]
def _assert_nocomma(x: Union[str, List[str]]) -> None:
if isinstance(x, str):
x = [x]
for _addr in x:
assert COMMA not in _addr, (
"Commas not allowed in e-mail addresses: {!r}".format(_addr)
)
# -------------------------------------------------------------------------
# Arguments
# -------------------------------------------------------------------------
if not date:
date = email.utils.formatdate(localtime=True)
assert isinstance(from_addr, str), (
"'From:' can only be a single address "
"(for Python sendmail, not RFC 2822); was {!r}".format(from_addr)
)
_assert_nocomma(from_addr)
assert isinstance(sender, str), (
"'Sender:' can only be a single address; was {!r}".format(sender)
)
_assert_nocomma(sender)
if isinstance(reply_to, str):
reply_to = [reply_to] if reply_to else [] # type: List[str]
_assert_nocomma(reply_to)
if isinstance(to, str):
to = _csv_list_to_list(to)
if isinstance(cc, str):
cc = _csv_list_to_list(cc)
if isinstance(bcc, str):
bcc = _csv_list_to_list(bcc)
assert to or cc or bcc, "No recipients (must have some of: To, Cc, Bcc)"
_assert_nocomma(to)
_assert_nocomma(cc)
_assert_nocomma(bcc)
attachment_filenames = attachment_filenames or [] # type: List[str]
assert all(attachment_filenames), (
"Missing attachment filenames: {!r}".format(attachment_filenames)
)
attachment_binaries = attachment_binaries or [] # type: List[bytes]
attachment_binary_filenames = attachment_binary_filenames or [] # type: List[str] # noqa
assert len(attachment_binaries) == len(attachment_binary_filenames), (
"If you specify attachment_binaries or attachment_binary_filenames, "
"they must be iterables of the same length."
)
assert all(attachment_binary_filenames), (
"Missing filenames for attached binaries: {!r}".format(
attachment_binary_filenames)
)
# -------------------------------------------------------------------------
# Make message
# -------------------------------------------------------------------------
msg = email.mime.multipart.MIMEMultipart()
# Headers: mandatory
msg["From"] = from_addr
msg["Date"] = date
msg["Subject"] = subject
# Headers: optional
if sender:
msg["Sender"] = sender # Single only, not a list
if reply_to:
msg["Reply-To"] = COMMASPACE.join(reply_to)
if to:
msg["To"] = COMMASPACE.join(to)
if cc:
msg["Cc"] = COMMASPACE.join(cc)
if bcc:
msg["Bcc"] = COMMASPACE.join(bcc)
# Body
if content_type == CONTENT_TYPE_TEXT:
msgbody = email.mime.text.MIMEText(body, "plain", charset)
elif content_type == CONTENT_TYPE_HTML:
msgbody = email.mime.text.MIMEText(body, "html", charset)
else:
raise ValueError("unknown content_type")
msg.attach(msgbody)
# Attachments
# noinspection PyPep8,PyBroadException
try:
if attachment_filenames:
# -----------------------------------------------------------------
# Attach things by filename
# -----------------------------------------------------------------
if verbose:
log.debug("attachment_filenames: {}", attachment_filenames)
# noinspection PyTypeChecker
for f in attachment_filenames:
part = email.mime.base.MIMEBase("application", "octet-stream")
part.set_payload(open(f, "rb").read())
email.encoders.encode_base64(part)
part.add_header(
'Content-Disposition',
'attachment; filename="%s"' % os.path.basename(f)
)
msg.attach(part)
if attachment_binaries:
# -----------------------------------------------------------------
# Binary attachments, which have a notional filename
# -----------------------------------------------------------------
if verbose:
log.debug("attachment_binary_filenames: {}",
attachment_binary_filenames)
for i in range(len(attachment_binaries)):
blob = attachment_binaries[i]
filename = attachment_binary_filenames[i]
part = email.mime.base.MIMEBase("application", "octet-stream")
part.set_payload(blob)
email.encoders.encode_base64(part)
part.add_header(
'Content-Disposition',
'attachment; filename="%s"' % filename)
msg.attach(part)
except Exception as e:
raise ValueError("send_email: Failed to attach files: {}".format(e))
return msg |
def from_file(self, filename):
"""Update running digest with content of named file."""
f = open(filename, 'rb')
while True:
data = f.read(10480)
if not data:
break
self.update(data)
f.close() | Update running digest with content of named file. | Below is the the instruction that describes the task:
### Input:
Update running digest with content of named file.
### Response:
def from_file(self, filename):
"""Update running digest with content of named file."""
f = open(filename, 'rb')
while True:
data = f.read(10480)
if not data:
break
self.update(data)
f.close() |
def load_sgraph(filename, format='binary', delimiter='auto'):
"""
Load SGraph from text file or previously saved SGraph binary.
Parameters
----------
filename : string
Location of the file. Can be a local path or a remote URL.
format : {'binary', 'snap', 'csv', 'tsv'}, optional
Format to of the file to load.
- 'binary': native graph format obtained from `SGraph.save`.
- 'snap': tab or space separated edge list format with comments, used in
the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_.
- 'csv': comma-separated edge list without header or comments.
- 'tsv': tab-separated edge list without header or comments.
delimiter : str, optional
Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those
format has default delimiter, but sometimes it is useful to
overwrite the default delimiter.
Returns
-------
out : SGraph
Loaded SGraph.
See Also
--------
SGraph, SGraph.save
Examples
--------
>>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = turicreate.load_sgraph('mygraph')
"""
if not format in ['binary', 'snap', 'csv', 'tsv']:
raise ValueError('Invalid format: %s' % format)
with cython_context():
g = None
if format is 'binary':
proxy = glconnect.get_unity().load_graph(_make_internal_url(filename))
g = SGraph(_proxy=proxy)
elif format is 'snap':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter,
header=False, column_type_hints=int)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'csv':
if delimiter == 'auto':
delimiter = ','
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'tsv':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
g.summary() # materialize
return g | Load SGraph from text file or previously saved SGraph binary.
Parameters
----------
filename : string
Location of the file. Can be a local path or a remote URL.
format : {'binary', 'snap', 'csv', 'tsv'}, optional
Format to of the file to load.
- 'binary': native graph format obtained from `SGraph.save`.
- 'snap': tab or space separated edge list format with comments, used in
the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_.
- 'csv': comma-separated edge list without header or comments.
- 'tsv': tab-separated edge list without header or comments.
delimiter : str, optional
Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those
format has default delimiter, but sometimes it is useful to
overwrite the default delimiter.
Returns
-------
out : SGraph
Loaded SGraph.
See Also
--------
SGraph, SGraph.save
Examples
--------
>>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = turicreate.load_sgraph('mygraph') | Below is the the instruction that describes the task:
### Input:
Load SGraph from text file or previously saved SGraph binary.
Parameters
----------
filename : string
Location of the file. Can be a local path or a remote URL.
format : {'binary', 'snap', 'csv', 'tsv'}, optional
Format to of the file to load.
- 'binary': native graph format obtained from `SGraph.save`.
- 'snap': tab or space separated edge list format with comments, used in
the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_.
- 'csv': comma-separated edge list without header or comments.
- 'tsv': tab-separated edge list without header or comments.
delimiter : str, optional
Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those
format has default delimiter, but sometimes it is useful to
overwrite the default delimiter.
Returns
-------
out : SGraph
Loaded SGraph.
See Also
--------
SGraph, SGraph.save
Examples
--------
>>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = turicreate.load_sgraph('mygraph')
### Response:
def load_sgraph(filename, format='binary', delimiter='auto'):
"""
Load SGraph from text file or previously saved SGraph binary.
Parameters
----------
filename : string
Location of the file. Can be a local path or a remote URL.
format : {'binary', 'snap', 'csv', 'tsv'}, optional
Format to of the file to load.
- 'binary': native graph format obtained from `SGraph.save`.
- 'snap': tab or space separated edge list format with comments, used in
the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_.
- 'csv': comma-separated edge list without header or comments.
- 'tsv': tab-separated edge list without header or comments.
delimiter : str, optional
Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those
format has default delimiter, but sometimes it is useful to
overwrite the default delimiter.
Returns
-------
out : SGraph
Loaded SGraph.
See Also
--------
SGraph, SGraph.save
Examples
--------
>>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = turicreate.load_sgraph('mygraph')
"""
if not format in ['binary', 'snap', 'csv', 'tsv']:
raise ValueError('Invalid format: %s' % format)
with cython_context():
g = None
if format is 'binary':
proxy = glconnect.get_unity().load_graph(_make_internal_url(filename))
g = SGraph(_proxy=proxy)
elif format is 'snap':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter,
header=False, column_type_hints=int)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'csv':
if delimiter == 'auto':
delimiter = ','
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'tsv':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
g.summary() # materialize
return g |
def _get_object_parser(self, json):
"""
Parses a json document into a pandas object.
"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient, "dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates, "numpy": self.numpy,
"precise_float": self.precise_float, "date_unit": self.date_unit
}
obj = None
if typ == 'frame':
obj = FrameParser(json, **kwargs).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
kwargs['dtype'] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj | Parses a json document into a pandas object. | Below is the the instruction that describes the task:
### Input:
Parses a json document into a pandas object.
### Response:
def _get_object_parser(self, json):
"""
Parses a json document into a pandas object.
"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient, "dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates, "numpy": self.numpy,
"precise_float": self.precise_float, "date_unit": self.date_unit
}
obj = None
if typ == 'frame':
obj = FrameParser(json, **kwargs).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
kwargs['dtype'] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj |
def updateColumnName(self, networkId, tableType, body, verbose=None):
"""
Renames an existing column in the table specified by the `tableType` and `networkId` parameters.
:param networkId: SUID of the network containing the table
:param tableType: Table Type
:param body: Old and new column name
:param verbose: print more
:returns: default: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/tables/'+str(tableType)+'/columns', method="PUT", body=body, verbose=verbose)
return response | Renames an existing column in the table specified by the `tableType` and `networkId` parameters.
:param networkId: SUID of the network containing the table
:param tableType: Table Type
:param body: Old and new column name
:param verbose: print more
:returns: default: successful operation | Below is the the instruction that describes the task:
### Input:
Renames an existing column in the table specified by the `tableType` and `networkId` parameters.
:param networkId: SUID of the network containing the table
:param tableType: Table Type
:param body: Old and new column name
:param verbose: print more
:returns: default: successful operation
### Response:
def updateColumnName(self, networkId, tableType, body, verbose=None):
"""
Renames an existing column in the table specified by the `tableType` and `networkId` parameters.
:param networkId: SUID of the network containing the table
:param tableType: Table Type
:param body: Old and new column name
:param verbose: print more
:returns: default: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/tables/'+str(tableType)+'/columns', method="PUT", body=body, verbose=verbose)
return response |
def _mirtop(out_files, hairpin, gff3, species, out):
"""
Convert miraligner to mirtop format
"""
args = argparse.Namespace()
args.hairpin = hairpin
args.sps = species
args.gtf = gff3
args.add_extra = True
args.files = out_files
args.format = "seqbuster"
args.out_format = "gff"
args.out = out
reader(args) | Convert miraligner to mirtop format | Below is the the instruction that describes the task:
### Input:
Convert miraligner to mirtop format
### Response:
def _mirtop(out_files, hairpin, gff3, species, out):
"""
Convert miraligner to mirtop format
"""
args = argparse.Namespace()
args.hairpin = hairpin
args.sps = species
args.gtf = gff3
args.add_extra = True
args.files = out_files
args.format = "seqbuster"
args.out_format = "gff"
args.out = out
reader(args) |
def _get_output(self, a, image):
""" Looks up the precomputed adversarial image for a given image.
"""
sd = np.square(self._input_images - image)
mses = np.mean(sd, axis=tuple(range(1, sd.ndim)))
index = np.argmin(mses)
# if we run into numerical problems with this approach, we might
# need to add a very tiny threshold here
if mses[index] > 0:
raise ValueError('No precomputed output image for this image')
return self._output_images[index] | Looks up the precomputed adversarial image for a given image. | Below is the the instruction that describes the task:
### Input:
Looks up the precomputed adversarial image for a given image.
### Response:
def _get_output(self, a, image):
""" Looks up the precomputed adversarial image for a given image.
"""
sd = np.square(self._input_images - image)
mses = np.mean(sd, axis=tuple(range(1, sd.ndim)))
index = np.argmin(mses)
# if we run into numerical problems with this approach, we might
# need to add a very tiny threshold here
if mses[index] > 0:
raise ValueError('No precomputed output image for this image')
return self._output_images[index] |
def to_array(self):
"""
Serializes this File to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(File, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
if self.file_path is not None:
array['file_path'] = u(self.file_path) # py2: type unicode, py3: type str
return array | Serializes this File to a dictionary.
:return: dictionary representation of this object.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Serializes this File to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
### Response:
def to_array(self):
"""
Serializes this File to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(File, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
if self.file_path is not None:
array['file_path'] = u(self.file_path) # py2: type unicode, py3: type str
return array |
def updateAllKeys(self):
"""Update times for all keys in the layout."""
for kf, key in zip(self.kf_list, self.sorted_key_list()):
kf.update(key, self.dct[key]) | Update times for all keys in the layout. | Below is the the instruction that describes the task:
### Input:
Update times for all keys in the layout.
### Response:
def updateAllKeys(self):
"""Update times for all keys in the layout."""
for kf, key in zip(self.kf_list, self.sorted_key_list()):
kf.update(key, self.dct[key]) |
def _deprecated_config_handler(self, func, msg, warning_class):
""" this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param warning_class: class of warning exception to be raised
:param func: function to be wrapped around
"""
@wraps(func)
def config_handler(*args, **kwargs):
warnings.warn(msg, warning_class)
return func(*args, **kwargs)
return config_handler | this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param warning_class: class of warning exception to be raised
:param func: function to be wrapped around | Below is the the instruction that describes the task:
### Input:
this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param warning_class: class of warning exception to be raised
:param func: function to be wrapped around
### Response:
def _deprecated_config_handler(self, func, msg, warning_class):
""" this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param warning_class: class of warning exception to be raised
:param func: function to be wrapped around
"""
@wraps(func)
def config_handler(*args, **kwargs):
warnings.warn(msg, warning_class)
return func(*args, **kwargs)
return config_handler |
def QA_fetch_stock_basic_info_tushare(collections=DATABASE.stock_info_tushare):
'''
purpose:
tushare 股票列表数据库
code,代码
name,名称
industry,所属行业
area,地区
pe,市盈率
outstanding,流通股本(亿)
totals,总股本(亿)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
esp,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
undp,未分利润
perundp, 每股未分配
rev,收入同比(%)
profit,利润同比(%)
gpr,毛利率(%)
npr,净利润率(%)
holders,股东人数
add by tauruswang,
:param collections: stock_info_tushare 集合
:return:
'''
'获取股票基本信息'
items = [item for item in collections.find()]
# 🛠todo 转变成 dataframe 类型数据
return items | purpose:
tushare 股票列表数据库
code,代码
name,名称
industry,所属行业
area,地区
pe,市盈率
outstanding,流通股本(亿)
totals,总股本(亿)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
esp,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
undp,未分利润
perundp, 每股未分配
rev,收入同比(%)
profit,利润同比(%)
gpr,毛利率(%)
npr,净利润率(%)
holders,股东人数
add by tauruswang,
:param collections: stock_info_tushare 集合
:return: | Below is the the instruction that describes the task:
### Input:
purpose:
tushare 股票列表数据库
code,代码
name,名称
industry,所属行业
area,地区
pe,市盈率
outstanding,流通股本(亿)
totals,总股本(亿)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
esp,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
undp,未分利润
perundp, 每股未分配
rev,收入同比(%)
profit,利润同比(%)
gpr,毛利率(%)
npr,净利润率(%)
holders,股东人数
add by tauruswang,
:param collections: stock_info_tushare 集合
:return:
### Response:
def QA_fetch_stock_basic_info_tushare(collections=DATABASE.stock_info_tushare):
'''
purpose:
tushare 股票列表数据库
code,代码
name,名称
industry,所属行业
area,地区
pe,市盈率
outstanding,流通股本(亿)
totals,总股本(亿)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
esp,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
undp,未分利润
perundp, 每股未分配
rev,收入同比(%)
profit,利润同比(%)
gpr,毛利率(%)
npr,净利润率(%)
holders,股东人数
add by tauruswang,
:param collections: stock_info_tushare 集合
:return:
'''
'获取股票基本信息'
items = [item for item in collections.find()]
# 🛠todo 转变成 dataframe 类型数据
return items |
def tagfunc(nargs=None, ndefs=None, nouts=None):
"""
decorate of tagged function
"""
def wrapper(f):
return wraps(f)(FunctionWithTag(f, nargs=nargs, nouts=nouts, ndefs=ndefs))
return wrapper | decorate of tagged function | Below is the the instruction that describes the task:
### Input:
decorate of tagged function
### Response:
def tagfunc(nargs=None, ndefs=None, nouts=None):
"""
decorate of tagged function
"""
def wrapper(f):
return wraps(f)(FunctionWithTag(f, nargs=nargs, nouts=nouts, ndefs=ndefs))
return wrapper |
def _ensure_tuple_or_list(arg_name, tuple_or_list):
"""Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
"""
if not isinstance(tuple_or_list, (tuple, list)):
raise TypeError(
"Expected %s to be a tuple or list. "
"Received %r" % (arg_name, tuple_or_list)
)
return list(tuple_or_list) | Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list. | Below is the the instruction that describes the task:
### Input:
Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
### Response:
def _ensure_tuple_or_list(arg_name, tuple_or_list):
"""Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
"""
if not isinstance(tuple_or_list, (tuple, list)):
raise TypeError(
"Expected %s to be a tuple or list. "
"Received %r" % (arg_name, tuple_or_list)
)
return list(tuple_or_list) |
def keys(self, section=None):
"""Provide dict like keys method"""
if not section and self.section:
section = self.section
config = self.config.get(section, {}) if section else self.config
return config.keys() | Provide dict like keys method | Below is the the instruction that describes the task:
### Input:
Provide dict like keys method
### Response:
def keys(self, section=None):
"""Provide dict like keys method"""
if not section and self.section:
section = self.section
config = self.config.get(section, {}) if section else self.config
return config.keys() |
def state_entry(self, args=None, **kwargs): # pylint: disable=arguments-differ
"""
Create an entry state.
:param args: List of SootArgument values (optional).
"""
state = self.state_blank(**kwargs)
# for the Java main method `public static main(String[] args)`,
# we add symbolic cmdline arguments
if not args and state.addr.method.name == 'main' and \
state.addr.method.params[0] == 'java.lang.String[]':
cmd_line_args = SimSootExpr_NewArray.new_array(state, "java.lang.String", BVS('argc', 32))
cmd_line_args.add_default_value_generator(self.generate_symbolic_cmd_line_arg)
args = [SootArgument(cmd_line_args, "java.lang.String[]")]
# for referencing the Java array, we need to know the array reference
# => saves it in the globals dict
state.globals['cmd_line_args'] = cmd_line_args
# setup arguments
SimEngineSoot.setup_arguments(state, args)
return state | Create an entry state.
:param args: List of SootArgument values (optional). | Below is the the instruction that describes the task:
### Input:
Create an entry state.
:param args: List of SootArgument values (optional).
### Response:
def state_entry(self, args=None, **kwargs): # pylint: disable=arguments-differ
"""
Create an entry state.
:param args: List of SootArgument values (optional).
"""
state = self.state_blank(**kwargs)
# for the Java main method `public static main(String[] args)`,
# we add symbolic cmdline arguments
if not args and state.addr.method.name == 'main' and \
state.addr.method.params[0] == 'java.lang.String[]':
cmd_line_args = SimSootExpr_NewArray.new_array(state, "java.lang.String", BVS('argc', 32))
cmd_line_args.add_default_value_generator(self.generate_symbolic_cmd_line_arg)
args = [SootArgument(cmd_line_args, "java.lang.String[]")]
# for referencing the Java array, we need to know the array reference
# => saves it in the globals dict
state.globals['cmd_line_args'] = cmd_line_args
# setup arguments
SimEngineSoot.setup_arguments(state, args)
return state |
def nunpack(s, default=0):
"""Unpacks 1 to 4 byte integers (big endian)."""
l = len(s)
if not l:
return default
elif l == 1:
return ord(s)
elif l == 2:
return struct.unpack('>H', s)[0]
elif l == 3:
return struct.unpack('>L', b'\x00'+s)[0]
elif l == 4:
return struct.unpack('>L', s)[0]
else:
raise TypeError('invalid length: %d' % l) | Unpacks 1 to 4 byte integers (big endian). | Below is the the instruction that describes the task:
### Input:
Unpacks 1 to 4 byte integers (big endian).
### Response:
def nunpack(s, default=0):
"""Unpacks 1 to 4 byte integers (big endian)."""
l = len(s)
if not l:
return default
elif l == 1:
return ord(s)
elif l == 2:
return struct.unpack('>H', s)[0]
elif l == 3:
return struct.unpack('>L', b'\x00'+s)[0]
elif l == 4:
return struct.unpack('>L', s)[0]
else:
raise TypeError('invalid length: %d' % l) |
def choropleth(self, *args, **kwargs):
"""Call the Choropleth class with the same arguments.
This method may be deleted after a year from now (Nov 2018).
"""
warnings.warn(
'The choropleth method has been deprecated. Instead use the new '
'Choropleth class, which has the same arguments. See the example '
'notebook \'GeoJSON_and_choropleth\' for how to do this.',
FutureWarning
)
from folium.features import Choropleth
self.add_child(Choropleth(*args, **kwargs)) | Call the Choropleth class with the same arguments.
This method may be deleted after a year from now (Nov 2018). | Below is the the instruction that describes the task:
### Input:
Call the Choropleth class with the same arguments.
This method may be deleted after a year from now (Nov 2018).
### Response:
def choropleth(self, *args, **kwargs):
"""Call the Choropleth class with the same arguments.
This method may be deleted after a year from now (Nov 2018).
"""
warnings.warn(
'The choropleth method has been deprecated. Instead use the new '
'Choropleth class, which has the same arguments. See the example '
'notebook \'GeoJSON_and_choropleth\' for how to do this.',
FutureWarning
)
from folium.features import Choropleth
self.add_child(Choropleth(*args, **kwargs)) |
def description(self):
"""
Get a string describing the HID descriptor.
"""
return \
"""HIDDevice:
{} | {:x}:{:x} | {} | {} | {}
release_number: {}
usage_page: {}
usage: {}
interface_number: {}\
""".format(self.path,
self.vendor_id,
self.product_id,
self.manufacturer_string,
self.product_string,
self.serial_number,
self.release_number,
self.usage_page,
self.usage,
self.interface_number
) | Get a string describing the HID descriptor. | Below is the the instruction that describes the task:
### Input:
Get a string describing the HID descriptor.
### Response:
def description(self):
"""
Get a string describing the HID descriptor.
"""
return \
"""HIDDevice:
{} | {:x}:{:x} | {} | {} | {}
release_number: {}
usage_page: {}
usage: {}
interface_number: {}\
""".format(self.path,
self.vendor_id,
self.product_id,
self.manufacturer_string,
self.product_string,
self.serial_number,
self.release_number,
self.usage_page,
self.usage,
self.interface_number
) |
def __put_buttons_in_buttonframe(choices):
"""Put the buttons in the buttons frame"""
global __widgetTexts, __firstWidget, buttonsFrame
__firstWidget = None
__widgetTexts = {}
i = 0
for buttonText in choices:
tempButton = tk.Button(buttonsFrame, takefocus=1, text=buttonText)
_bindArrows(tempButton)
tempButton.pack(expand=tk.YES, side=tk.LEFT, padx='1m', pady='1m', ipadx='2m', ipady='1m')
# remember the text associated with this widget
__widgetTexts[tempButton] = buttonText
# remember the first widget, so we can put the focus there
if i == 0:
__firstWidget = tempButton
i = 1
# for the commandButton, bind activation events to the activation event handler
commandButton = tempButton
handler = __buttonEvent
for selectionEvent in STANDARD_SELECTION_EVENTS:
commandButton.bind('<%s>' % selectionEvent, handler)
if CANCEL_TEXT in choices:
commandButton.bind('<Escape>', __cancelButtonEvent) | Put the buttons in the buttons frame | Below is the the instruction that describes the task:
### Input:
Put the buttons in the buttons frame
### Response:
def __put_buttons_in_buttonframe(choices):
"""Put the buttons in the buttons frame"""
global __widgetTexts, __firstWidget, buttonsFrame
__firstWidget = None
__widgetTexts = {}
i = 0
for buttonText in choices:
tempButton = tk.Button(buttonsFrame, takefocus=1, text=buttonText)
_bindArrows(tempButton)
tempButton.pack(expand=tk.YES, side=tk.LEFT, padx='1m', pady='1m', ipadx='2m', ipady='1m')
# remember the text associated with this widget
__widgetTexts[tempButton] = buttonText
# remember the first widget, so we can put the focus there
if i == 0:
__firstWidget = tempButton
i = 1
# for the commandButton, bind activation events to the activation event handler
commandButton = tempButton
handler = __buttonEvent
for selectionEvent in STANDARD_SELECTION_EVENTS:
commandButton.bind('<%s>' % selectionEvent, handler)
if CANCEL_TEXT in choices:
commandButton.bind('<Escape>', __cancelButtonEvent) |
def delete_file(self, id):
"""
Delete file.
Remove the specified file
curl -XDELETE 'https://<canvas>/api/v1/files/<file_id>' \
-H 'Authorization: Bearer <token>'
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/files/{id}".format(**path), data=data, params=params, no_data=True) | Delete file.
Remove the specified file
curl -XDELETE 'https://<canvas>/api/v1/files/<file_id>' \
-H 'Authorization: Bearer <token>' | Below is the the instruction that describes the task:
### Input:
Delete file.
Remove the specified file
curl -XDELETE 'https://<canvas>/api/v1/files/<file_id>' \
-H 'Authorization: Bearer <token>'
### Response:
def delete_file(self, id):
"""
Delete file.
Remove the specified file
curl -XDELETE 'https://<canvas>/api/v1/files/<file_id>' \
-H 'Authorization: Bearer <token>'
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/files/{id}".format(**path), data=data, params=params, no_data=True) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict |
def post(self, path, data=None):
"""Encapsulates POST requests"""
data = data or {}
response = requests.post(self.url(path), data=to_json(data), headers=self.request_header())
return self.parse_response(response) | Encapsulates POST requests | Below is the the instruction that describes the task:
### Input:
Encapsulates POST requests
### Response:
def post(self, path, data=None):
"""Encapsulates POST requests"""
data = data or {}
response = requests.post(self.url(path), data=to_json(data), headers=self.request_header())
return self.parse_response(response) |
def _core_qft(qubits: List[int], coeff: int) -> Program:
"""
Generates the core program to perform the quantum Fourier transform
:param qubits: A list of qubit indexes.
:param coeff: A modifier for the angle used in rotations (-1 for inverse QFT, 1 for QFT)
:return: A Quil program to compute the core (inverse) QFT of the qubits.
"""
q = qubits[0]
qs = qubits[1:]
if 1 == len(qubits):
return [H(q)]
else:
n = 1 + len(qs)
cR = []
for idx, i in enumerate(range(n - 1, 0, -1)):
q_idx = qs[idx]
angle = math.pi / 2 ** (n - i)
cR.append(CPHASE(coeff * angle, q, q_idx))
return _core_qft(qs, coeff) + list(reversed(cR)) + [H(q)] | Generates the core program to perform the quantum Fourier transform
:param qubits: A list of qubit indexes.
:param coeff: A modifier for the angle used in rotations (-1 for inverse QFT, 1 for QFT)
:return: A Quil program to compute the core (inverse) QFT of the qubits. | Below is the the instruction that describes the task:
### Input:
Generates the core program to perform the quantum Fourier transform
:param qubits: A list of qubit indexes.
:param coeff: A modifier for the angle used in rotations (-1 for inverse QFT, 1 for QFT)
:return: A Quil program to compute the core (inverse) QFT of the qubits.
### Response:
def _core_qft(qubits: List[int], coeff: int) -> Program:
"""
Generates the core program to perform the quantum Fourier transform
:param qubits: A list of qubit indexes.
:param coeff: A modifier for the angle used in rotations (-1 for inverse QFT, 1 for QFT)
:return: A Quil program to compute the core (inverse) QFT of the qubits.
"""
q = qubits[0]
qs = qubits[1:]
if 1 == len(qubits):
return [H(q)]
else:
n = 1 + len(qs)
cR = []
for idx, i in enumerate(range(n - 1, 0, -1)):
q_idx = qs[idx]
angle = math.pi / 2 ** (n - i)
cR.append(CPHASE(coeff * angle, q, q_idx))
return _core_qft(qs, coeff) + list(reversed(cR)) + [H(q)] |
def _grad_one_param(self, funct, p, dl=2e-5, rts=False, nout=1, **kwargs):
"""
Gradient of `func` wrt a single parameter `p`. (see _graddoc)
"""
vals = self.get_values(p)
f0 = funct(**kwargs)
self.update(p, vals+dl)
f1 = funct(**kwargs)
if rts:
self.update(p, vals)
if nout == 1:
return (f1 - f0) / dl
else:
return [(f1[i] - f0[i]) / dl for i in range(nout)] | Gradient of `func` wrt a single parameter `p`. (see _graddoc) | Below is the the instruction that describes the task:
### Input:
Gradient of `func` wrt a single parameter `p`. (see _graddoc)
### Response:
def _grad_one_param(self, funct, p, dl=2e-5, rts=False, nout=1, **kwargs):
"""
Gradient of `func` wrt a single parameter `p`. (see _graddoc)
"""
vals = self.get_values(p)
f0 = funct(**kwargs)
self.update(p, vals+dl)
f1 = funct(**kwargs)
if rts:
self.update(p, vals)
if nout == 1:
return (f1 - f0) / dl
else:
return [(f1[i] - f0[i]) / dl for i in range(nout)] |
def _execute_on_selected(self, p_cmd_str, p_execute_signal):
"""
Executes command specified by p_cmd_str on selected todo item.
p_cmd_str should be a string with one replacement field ('{}') which
will be substituted by id of the selected todo item.
p_execute_signal is the signal name passed to the main loop. It should
be one of 'execute_command' or 'execute_command_silent'.
"""
try:
todo = self.listbox.focus.todo
todo_id = str(self.view.todolist.number(todo))
urwid.emit_signal(self, p_execute_signal, p_cmd_str, todo_id)
# force screen redraw after editing
if p_cmd_str.startswith('edit'):
urwid.emit_signal(self, 'refresh')
except AttributeError:
# No todo item selected
pass | Executes command specified by p_cmd_str on selected todo item.
p_cmd_str should be a string with one replacement field ('{}') which
will be substituted by id of the selected todo item.
p_execute_signal is the signal name passed to the main loop. It should
be one of 'execute_command' or 'execute_command_silent'. | Below is the the instruction that describes the task:
### Input:
Executes command specified by p_cmd_str on selected todo item.
p_cmd_str should be a string with one replacement field ('{}') which
will be substituted by id of the selected todo item.
p_execute_signal is the signal name passed to the main loop. It should
be one of 'execute_command' or 'execute_command_silent'.
### Response:
def _execute_on_selected(self, p_cmd_str, p_execute_signal):
"""
Executes command specified by p_cmd_str on selected todo item.
p_cmd_str should be a string with one replacement field ('{}') which
will be substituted by id of the selected todo item.
p_execute_signal is the signal name passed to the main loop. It should
be one of 'execute_command' or 'execute_command_silent'.
"""
try:
todo = self.listbox.focus.todo
todo_id = str(self.view.todolist.number(todo))
urwid.emit_signal(self, p_execute_signal, p_cmd_str, todo_id)
# force screen redraw after editing
if p_cmd_str.startswith('edit'):
urwid.emit_signal(self, 'refresh')
except AttributeError:
# No todo item selected
pass |
def _range2cols(areas):
"""
Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
cols = []
for rng in areas.split(","):
if ":" in rng:
rng = rng.split(":")
cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1))
else:
cols.append(_excel2num(rng))
return cols | Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27] | Below is the the instruction that describes the task:
### Input:
Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
### Response:
def _range2cols(areas):
"""
Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
cols = []
for rng in areas.split(","):
if ":" in rng:
rng = rng.split(":")
cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1))
else:
cols.append(_excel2num(rng))
return cols |
def to_bytes(self):
'''
Create bytes from properties
'''
# Verify that properties make sense
self.sanitize()
# Start with the type
bitstream = BitArray('uint:4=%d' % self.message_type)
# Add the flags
bitstream += BitArray('bool=%d' % self.proxy_map_reply)
# Add reserved bits
bitstream += self._reserved1
# Decide on the has_xtr_site_id value
has_xtr_site_id = bool(self.xtr_id or self.site_id or self.for_rtr)
bitstream += BitArray('bool=%d, bool=%d' % (has_xtr_site_id,
self.for_rtr))
# Add reserved bits
bitstream += self._reserved2
# Add the rest of the flags
bitstream += BitArray('bool=%d' % self.want_map_notify)
# Add record count
bitstream += BitArray('uint:8=%d' % len(self.records))
# Add the nonce
bitstream += BitArray(bytes=self.nonce)
# Add the key-id and authentication data
bitstream += BitArray('uint:16=%d, uint:16=%d, hex=%s'
% (self.key_id,
len(self.authentication_data),
self.authentication_data.encode('hex')))
# Add the map-reply records
for record in self.records:
bitstream += record.to_bitstream()
# Add xTR-ID and site-ID if we said we would
if has_xtr_site_id:
bitstream += BitArray('uint:128=%d, uint:64=%d' % (self.xtr_id,
self.site_id))
return bitstream.bytes | Create bytes from properties | Below is the the instruction that describes the task:
### Input:
Create bytes from properties
### Response:
def to_bytes(self):
'''
Create bytes from properties
'''
# Verify that properties make sense
self.sanitize()
# Start with the type
bitstream = BitArray('uint:4=%d' % self.message_type)
# Add the flags
bitstream += BitArray('bool=%d' % self.proxy_map_reply)
# Add reserved bits
bitstream += self._reserved1
# Decide on the has_xtr_site_id value
has_xtr_site_id = bool(self.xtr_id or self.site_id or self.for_rtr)
bitstream += BitArray('bool=%d, bool=%d' % (has_xtr_site_id,
self.for_rtr))
# Add reserved bits
bitstream += self._reserved2
# Add the rest of the flags
bitstream += BitArray('bool=%d' % self.want_map_notify)
# Add record count
bitstream += BitArray('uint:8=%d' % len(self.records))
# Add the nonce
bitstream += BitArray(bytes=self.nonce)
# Add the key-id and authentication data
bitstream += BitArray('uint:16=%d, uint:16=%d, hex=%s'
% (self.key_id,
len(self.authentication_data),
self.authentication_data.encode('hex')))
# Add the map-reply records
for record in self.records:
bitstream += record.to_bitstream()
# Add xTR-ID and site-ID if we said we would
if has_xtr_site_id:
bitstream += BitArray('uint:128=%d, uint:64=%d' % (self.xtr_id,
self.site_id))
return bitstream.bytes |
def import_gpg_key(key):
"""Imports a GPG key"""
if not key:
raise CryptoritoError('Invalid GPG Key')
key_fd, key_filename = mkstemp("cryptorito-gpg-import")
key_handle = os.fdopen(key_fd, 'w')
key_handle.write(polite_string(key))
key_handle.close()
cmd = flatten([gnupg_bin(), gnupg_home(), "--import", key_filename])
output = stderr_output(cmd)
msg = 'gpg: Total number processed: 1'
output_bits = polite_string(output).split('\n')
return len([line for line in output_bits if line == msg]) == 1 | Imports a GPG key | Below is the the instruction that describes the task:
### Input:
Imports a GPG key
### Response:
def import_gpg_key(key):
"""Imports a GPG key"""
if not key:
raise CryptoritoError('Invalid GPG Key')
key_fd, key_filename = mkstemp("cryptorito-gpg-import")
key_handle = os.fdopen(key_fd, 'w')
key_handle.write(polite_string(key))
key_handle.close()
cmd = flatten([gnupg_bin(), gnupg_home(), "--import", key_filename])
output = stderr_output(cmd)
msg = 'gpg: Total number processed: 1'
output_bits = polite_string(output).split('\n')
return len([line for line in output_bits if line == msg]) == 1 |
def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):
"""Print the contents of a byte string as a 256 colour image.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
width
Width of image to render in pixels (default: 64)
height
Height of image to render in pixels (default: auto)
palette
List of Colours to use (default: test palette)
"""
for line in pixdump_iter( source, start, end, length, width, height, palette ):
print( line ) | Print the contents of a byte string as a 256 colour image.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
width
Width of image to render in pixels (default: 64)
height
Height of image to render in pixels (default: auto)
palette
List of Colours to use (default: test palette) | Below is the the instruction that describes the task:
### Input:
Print the contents of a byte string as a 256 colour image.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
width
Width of image to render in pixels (default: 64)
height
Height of image to render in pixels (default: auto)
palette
List of Colours to use (default: test palette)
### Response:
def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):
"""Print the contents of a byte string as a 256 colour image.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
width
Width of image to render in pixels (default: 64)
height
Height of image to render in pixels (default: auto)
palette
List of Colours to use (default: test palette)
"""
for line in pixdump_iter( source, start, end, length, width, height, palette ):
print( line ) |
def validate(self, value, model=None, context=None):
"""
Validate
Perform value validation and return result
:param value: value to check
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult
"""
regex = self.regex()
match = regex.match(value)
if not match:
return Error(self.not_email)
# success otherwise
return Error() | Validate
Perform value validation and return result
:param value: value to check
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult | Below is the the instruction that describes the task:
### Input:
Validate
Perform value validation and return result
:param value: value to check
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult
### Response:
def validate(self, value, model=None, context=None):
"""
Validate
Perform value validation and return result
:param value: value to check
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult
"""
regex = self.regex()
match = regex.match(value)
if not match:
return Error(self.not_email)
# success otherwise
return Error() |
def transfer(self):
"""
Returns a MappedObject containing the account's transfer pool data
"""
result = self.client.get('/account/transfer')
if not 'used' in result:
raise UnexpectedResponseError('Unexpected response when getting Transfer Pool!')
return MappedObject(**result) | Returns a MappedObject containing the account's transfer pool data | Below is the the instruction that describes the task:
### Input:
Returns a MappedObject containing the account's transfer pool data
### Response:
def transfer(self):
"""
Returns a MappedObject containing the account's transfer pool data
"""
result = self.client.get('/account/transfer')
if not 'used' in result:
raise UnexpectedResponseError('Unexpected response when getting Transfer Pool!')
return MappedObject(**result) |
def update(self, uid):
'''
Update the wiki.
'''
postinfo = MWiki.get_by_uid(uid)
if self.check_post_role()['EDIT'] or postinfo.user_name == self.get_current_user():
pass
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
cnt_old = tornado.escape.xhtml_unescape(postinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass
else:
MWikiHist.create_wiki_history(postinfo)
MWiki.update(uid, post_data)
# cele_gen_whoosh.delay()
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/wiki/{0}'.format(tornado.escape.url_escape(post_data['title']))) | Update the wiki. | Below is the the instruction that describes the task:
### Input:
Update the wiki.
### Response:
def update(self, uid):
'''
Update the wiki.
'''
postinfo = MWiki.get_by_uid(uid)
if self.check_post_role()['EDIT'] or postinfo.user_name == self.get_current_user():
pass
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
cnt_old = tornado.escape.xhtml_unescape(postinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass
else:
MWikiHist.create_wiki_history(postinfo)
MWiki.update(uid, post_data)
# cele_gen_whoosh.delay()
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/wiki/{0}'.format(tornado.escape.url_escape(post_data['title']))) |
def get_session_index(self):
"""
Gets the SessionIndex from the AuthnStatement
Could be used to be stored in the local session in order
to be used in a future Logout Request that the SP could
send to the SP, to set what specific session must be deleted
:returns: The SessionIndex value
:rtype: string|None
"""
session_index = None
authn_statement_nodes = self.__query_assertion('/saml:AuthnStatement[@SessionIndex]')
if authn_statement_nodes:
session_index = authn_statement_nodes[0].get('SessionIndex')
return session_index | Gets the SessionIndex from the AuthnStatement
Could be used to be stored in the local session in order
to be used in a future Logout Request that the SP could
send to the SP, to set what specific session must be deleted
:returns: The SessionIndex value
:rtype: string|None | Below is the the instruction that describes the task:
### Input:
Gets the SessionIndex from the AuthnStatement
Could be used to be stored in the local session in order
to be used in a future Logout Request that the SP could
send to the SP, to set what specific session must be deleted
:returns: The SessionIndex value
:rtype: string|None
### Response:
def get_session_index(self):
"""
Gets the SessionIndex from the AuthnStatement
Could be used to be stored in the local session in order
to be used in a future Logout Request that the SP could
send to the SP, to set what specific session must be deleted
:returns: The SessionIndex value
:rtype: string|None
"""
session_index = None
authn_statement_nodes = self.__query_assertion('/saml:AuthnStatement[@SessionIndex]')
if authn_statement_nodes:
session_index = authn_statement_nodes[0].get('SessionIndex')
return session_index |
def abort(self, frame):
"""
Handles ABORT command: Rolls back specified transaction.
"""
if not frame.transaction:
raise ProtocolError("Missing transaction for ABORT command.")
if not frame.transaction in self.engine.transactions:
raise ProtocolError("Invalid transaction: %s" % frame.transaction)
self.engine.queue_manager.resend_transaction_frames(
self.engine.connection, frame.transaction)
del self.engine.transactions[frame.transaction] | Handles ABORT command: Rolls back specified transaction. | Below is the the instruction that describes the task:
### Input:
Handles ABORT command: Rolls back specified transaction.
### Response:
def abort(self, frame):
"""
Handles ABORT command: Rolls back specified transaction.
"""
if not frame.transaction:
raise ProtocolError("Missing transaction for ABORT command.")
if not frame.transaction in self.engine.transactions:
raise ProtocolError("Invalid transaction: %s" % frame.transaction)
self.engine.queue_manager.resend_transaction_frames(
self.engine.connection, frame.transaction)
del self.engine.transactions[frame.transaction] |
def stripinstallbuilder(target, source, env):
""" Strips the install builder action from the source list and stores
the final installation location as the "PACKAGING_INSTALL_LOCATION" of
the source of the source file. This effectively removes the final installed
files from the source list while remembering the installation location.
It also warns about files which have no install builder attached.
"""
def has_no_install_location(file):
return not (file.has_builder() and\
hasattr(file.builder, 'name') and\
(file.builder.name=="InstallBuilder" or\
file.builder.name=="InstallAsBuilder"))
if len([src for src in source if has_no_install_location(src)]):
warn(Warning, "there are files to package which have no\
InstallBuilder attached, this might lead to irreproducible packages")
n_source=[]
for s in source:
if has_no_install_location(s):
n_source.append(s)
else:
for ss in s.sources:
n_source.append(ss)
copy_attr(s, ss)
ss.Tag('PACKAGING_INSTALL_LOCATION', s.get_path())
return (target, n_source) | Strips the install builder action from the source list and stores
the final installation location as the "PACKAGING_INSTALL_LOCATION" of
the source of the source file. This effectively removes the final installed
files from the source list while remembering the installation location.
It also warns about files which have no install builder attached. | Below is the the instruction that describes the task:
### Input:
Strips the install builder action from the source list and stores
the final installation location as the "PACKAGING_INSTALL_LOCATION" of
the source of the source file. This effectively removes the final installed
files from the source list while remembering the installation location.
It also warns about files which have no install builder attached.
### Response:
def stripinstallbuilder(target, source, env):
""" Strips the install builder action from the source list and stores
the final installation location as the "PACKAGING_INSTALL_LOCATION" of
the source of the source file. This effectively removes the final installed
files from the source list while remembering the installation location.
It also warns about files which have no install builder attached.
"""
def has_no_install_location(file):
return not (file.has_builder() and\
hasattr(file.builder, 'name') and\
(file.builder.name=="InstallBuilder" or\
file.builder.name=="InstallAsBuilder"))
if len([src for src in source if has_no_install_location(src)]):
warn(Warning, "there are files to package which have no\
InstallBuilder attached, this might lead to irreproducible packages")
n_source=[]
for s in source:
if has_no_install_location(s):
n_source.append(s)
else:
for ss in s.sources:
n_source.append(ss)
copy_attr(s, ss)
ss.Tag('PACKAGING_INSTALL_LOCATION', s.get_path())
return (target, n_source) |
def rc_channels_scaled_encode(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi):
'''
The scaled values of the RC channels received. (-100%) -10000, (0%) 0,
(100%) 10000. Channels that are inactive should be set
to UINT16_MAX.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t)
chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t)
'''
return MAVLink_rc_channels_scaled_message(time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi) | The scaled values of the RC channels received. (-100%) -10000, (0%) 0,
(100%) 10000. Channels that are inactive should be set
to UINT16_MAX.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t)
chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t) | Below is the the instruction that describes the task:
### Input:
The scaled values of the RC channels received. (-100%) -10000, (0%) 0,
(100%) 10000. Channels that are inactive should be set
to UINT16_MAX.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t)
chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t)
### Response:
def rc_channels_scaled_encode(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi):
'''
The scaled values of the RC channels received. (-100%) -10000, (0%) 0,
(100%) 10000. Channels that are inactive should be set
to UINT16_MAX.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t)
chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t)
rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t)
'''
return MAVLink_rc_channels_scaled_message(time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi) |
def remove_templates(self):
"""Clean useless elements like templates because they are not needed anymore
:return: None
"""
self.hosts.remove_templates()
self.contacts.remove_templates()
self.services.remove_templates()
self.servicedependencies.remove_templates()
self.hostdependencies.remove_templates()
self.timeperiods.remove_templates() | Clean useless elements like templates because they are not needed anymore
:return: None | Below is the the instruction that describes the task:
### Input:
Clean useless elements like templates because they are not needed anymore
:return: None
### Response:
def remove_templates(self):
"""Clean useless elements like templates because they are not needed anymore
:return: None
"""
self.hosts.remove_templates()
self.contacts.remove_templates()
self.services.remove_templates()
self.servicedependencies.remove_templates()
self.hostdependencies.remove_templates()
self.timeperiods.remove_templates() |
def bind(self, destination='', source='', routing_key='',
arguments=None):
"""Bind an Exchange.
:param str destination: Exchange name
:param str source: Exchange to bind to
:param str routing_key: The routing key to use
:param dict arguments: Bind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(destination):
raise AMQPInvalidArgument('destination should be a string')
elif not compatibility.is_string(source):
raise AMQPInvalidArgument('source should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
bind_frame = pamqp_exchange.Bind(destination=destination,
source=source,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(bind_frame) | Bind an Exchange.
:param str destination: Exchange name
:param str source: Exchange to bind to
:param str routing_key: The routing key to use
:param dict arguments: Bind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Bind an Exchange.
:param str destination: Exchange name
:param str source: Exchange to bind to
:param str routing_key: The routing key to use
:param dict arguments: Bind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
### Response:
def bind(self, destination='', source='', routing_key='',
arguments=None):
"""Bind an Exchange.
:param str destination: Exchange name
:param str source: Exchange to bind to
:param str routing_key: The routing key to use
:param dict arguments: Bind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(destination):
raise AMQPInvalidArgument('destination should be a string')
elif not compatibility.is_string(source):
raise AMQPInvalidArgument('source should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
bind_frame = pamqp_exchange.Bind(destination=destination,
source=source,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(bind_frame) |
def split_somatic(items):
"""Split somatic batches, adding a germline target.
Enables separate germline calling of samples using shared alignments.
"""
items = [_clean_flat_variantcaller(x) for x in items]
somatic_groups, somatic, non_somatic = vcfutils.somatic_batches(items)
# extract germline samples to run from normals in tumor/normal pairs
germline_added = set([])
germline = []
for somatic_group in somatic_groups:
paired = vcfutils.get_paired(somatic_group)
if paired and paired.normal_data:
cur = utils.deepish_copy(paired.normal_data)
vc = dd.get_variantcaller(cur)
if isinstance(vc, dict) and "germline" in vc:
if cur["description"] not in germline_added:
germline_added.add(cur["description"])
cur["rgnames"]["sample"] = cur["description"]
cur["metadata"]["batch"] = "%s-germline" % cur["description"]
cur["metadata"]["phenotype"] = "germline"
cur = remove_align_qc_tools(cur)
cur["config"]["algorithm"]["variantcaller"] = vc["germline"]
germline.append(cur)
# Fix variantcalling specification for only somatic targets
somatic_out = []
for data in somatic:
vc = dd.get_variantcaller(data)
if isinstance(vc, dict) and "somatic" in vc:
data["config"]["algorithm"]["variantcaller"] = vc["somatic"]
somatic_out.append(data)
return non_somatic + somatic_out + germline | Split somatic batches, adding a germline target.
Enables separate germline calling of samples using shared alignments. | Below is the the instruction that describes the task:
### Input:
Split somatic batches, adding a germline target.
Enables separate germline calling of samples using shared alignments.
### Response:
def split_somatic(items):
"""Split somatic batches, adding a germline target.
Enables separate germline calling of samples using shared alignments.
"""
items = [_clean_flat_variantcaller(x) for x in items]
somatic_groups, somatic, non_somatic = vcfutils.somatic_batches(items)
# extract germline samples to run from normals in tumor/normal pairs
germline_added = set([])
germline = []
for somatic_group in somatic_groups:
paired = vcfutils.get_paired(somatic_group)
if paired and paired.normal_data:
cur = utils.deepish_copy(paired.normal_data)
vc = dd.get_variantcaller(cur)
if isinstance(vc, dict) and "germline" in vc:
if cur["description"] not in germline_added:
germline_added.add(cur["description"])
cur["rgnames"]["sample"] = cur["description"]
cur["metadata"]["batch"] = "%s-germline" % cur["description"]
cur["metadata"]["phenotype"] = "germline"
cur = remove_align_qc_tools(cur)
cur["config"]["algorithm"]["variantcaller"] = vc["germline"]
germline.append(cur)
# Fix variantcalling specification for only somatic targets
somatic_out = []
for data in somatic:
vc = dd.get_variantcaller(data)
if isinstance(vc, dict) and "somatic" in vc:
data["config"]["algorithm"]["variantcaller"] = vc["somatic"]
somatic_out.append(data)
return non_somatic + somatic_out + germline |
def execute(self, payload, *args, flavour: ModuleType, **kwargs):
"""
Synchronously run ``payload`` and provide its output
If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.
"""
if args or kwargs:
payload = functools.partial(payload, *args, **kwargs)
return self._meta_runner.run_payload(payload, flavour=flavour) | Synchronously run ``payload`` and provide its output
If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution. | Below is the the instruction that describes the task:
### Input:
Synchronously run ``payload`` and provide its output
If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.
### Response:
def execute(self, payload, *args, flavour: ModuleType, **kwargs):
"""
Synchronously run ``payload`` and provide its output
If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.
"""
if args or kwargs:
payload = functools.partial(payload, *args, **kwargs)
return self._meta_runner.run_payload(payload, flavour=flavour) |
def set_comment(self,c):
"""
Sets the comment for the element
@type c: string
@param c: comment for the element
"""
c = ' '+c.replace('-','').strip()+' '
self.node.insert(0,etree.Comment(c)) | Sets the comment for the element
@type c: string
@param c: comment for the element | Below is the the instruction that describes the task:
### Input:
Sets the comment for the element
@type c: string
@param c: comment for the element
### Response:
def set_comment(self,c):
"""
Sets the comment for the element
@type c: string
@param c: comment for the element
"""
c = ' '+c.replace('-','').strip()+' '
self.node.insert(0,etree.Comment(c)) |
def dict_merge(set1, set2):
"""Joins two dictionaries."""
return dict(list(set1.items()) + list(set2.items())) | Joins two dictionaries. | Below is the the instruction that describes the task:
### Input:
Joins two dictionaries.
### Response:
def dict_merge(set1, set2):
"""Joins two dictionaries."""
return dict(list(set1.items()) + list(set2.items())) |
def trans_history(
self, from_=None, count=None, from_id=None, end_id=None,
order=None, since=None, end=None
):
"""
Returns the history of transactions.
To use this method you need a privilege of the info key.
:param int or None from_: transaction ID, from which the display starts (default 0)
:param int or None count: number of transaction to be displayed (default 1000)
:param int or None from_id: transaction ID, from which the display starts (default 0)
:param int or None end_id: transaction ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.)
"""
return self._trade_api_call(
'TransHistory', from_=from_, count=count, from_id=from_id, end_id=end_id,
order=order, since=since, end=end
) | Returns the history of transactions.
To use this method you need a privilege of the info key.
:param int or None from_: transaction ID, from which the display starts (default 0)
:param int or None count: number of transaction to be displayed (default 1000)
:param int or None from_id: transaction ID, from which the display starts (default 0)
:param int or None end_id: transaction ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.) | Below is the the instruction that describes the task:
### Input:
Returns the history of transactions.
To use this method you need a privilege of the info key.
:param int or None from_: transaction ID, from which the display starts (default 0)
:param int or None count: number of transaction to be displayed (default 1000)
:param int or None from_id: transaction ID, from which the display starts (default 0)
:param int or None end_id: transaction ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.)
### Response:
def trans_history(
self, from_=None, count=None, from_id=None, end_id=None,
order=None, since=None, end=None
):
"""
Returns the history of transactions.
To use this method you need a privilege of the info key.
:param int or None from_: transaction ID, from which the display starts (default 0)
:param int or None count: number of transaction to be displayed (default 1000)
:param int or None from_id: transaction ID, from which the display starts (default 0)
:param int or None end_id: transaction ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.)
"""
return self._trade_api_call(
'TransHistory', from_=from_, count=count, from_id=from_id, end_id=end_id,
order=order, since=since, end=end
) |
def wait_for_instance_deletion(credentials, project, zone, instance_name,
interval_seconds=5):
"""Wait until an instance is deleted.
We require that initially, the specified instance exists.
TODO: docstring
"""
t0 = time.time()
access_token = credentials.get_access_token()
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
r = requests.get('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances/%s'
% (project, zone, instance_name),
headers=headers)
if r.status_code == 404:
raise AssertionError('Instance "%s" does not exist!' % instance_name)
r.raise_for_status()
_LOGGER.debug('Instance "%s" exists.', instance_name)
while True:
time.sleep(interval_seconds)
access_token = credentials.get_access_token()
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
r = requests.get('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances/%s'
% (project, zone, instance_name),
headers=headers)
if r.status_code == 404:
break
r.raise_for_status()
_LOGGER.debug('Instance "%s" still exists.', instance_name)
t1 = time.time()
t = t1-t0
t_min = t/60.0
_LOGGER.info('Instance was deleted after %.1f s (%.1f m).', t, t_min) | Wait until an instance is deleted.
We require that initially, the specified instance exists.
TODO: docstring | Below is the the instruction that describes the task:
### Input:
Wait until an instance is deleted.
We require that initially, the specified instance exists.
TODO: docstring
### Response:
def wait_for_instance_deletion(credentials, project, zone, instance_name,
interval_seconds=5):
"""Wait until an instance is deleted.
We require that initially, the specified instance exists.
TODO: docstring
"""
t0 = time.time()
access_token = credentials.get_access_token()
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
r = requests.get('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances/%s'
% (project, zone, instance_name),
headers=headers)
if r.status_code == 404:
raise AssertionError('Instance "%s" does not exist!' % instance_name)
r.raise_for_status()
_LOGGER.debug('Instance "%s" exists.', instance_name)
while True:
time.sleep(interval_seconds)
access_token = credentials.get_access_token()
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
r = requests.get('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances/%s'
% (project, zone, instance_name),
headers=headers)
if r.status_code == 404:
break
r.raise_for_status()
_LOGGER.debug('Instance "%s" still exists.', instance_name)
t1 = time.time()
t = t1-t0
t_min = t/60.0
_LOGGER.info('Instance was deleted after %.1f s (%.1f m).', t, t_min) |
def lineWidth(self, lw=None):
"""Set/get width of mesh edges. Same as `lw()`."""
if lw is not None:
if lw == 0:
self.GetProperty().EdgeVisibilityOff()
return
self.GetProperty().EdgeVisibilityOn()
self.GetProperty().SetLineWidth(lw)
else:
return self.GetProperty().GetLineWidth()
return self | Set/get width of mesh edges. Same as `lw()`. | Below is the the instruction that describes the task:
### Input:
Set/get width of mesh edges. Same as `lw()`.
### Response:
def lineWidth(self, lw=None):
"""Set/get width of mesh edges. Same as `lw()`."""
if lw is not None:
if lw == 0:
self.GetProperty().EdgeVisibilityOff()
return
self.GetProperty().EdgeVisibilityOn()
self.GetProperty().SetLineWidth(lw)
else:
return self.GetProperty().GetLineWidth()
return self |
def autozoom(self, n=None):
"""
Auto-scales the axes to fit all the data in plot index n. If n == None,
auto-scale everyone.
"""
if n==None:
for p in self.plot_widgets: p.autoRange()
else: self.plot_widgets[n].autoRange()
return self | Auto-scales the axes to fit all the data in plot index n. If n == None,
auto-scale everyone. | Below is the the instruction that describes the task:
### Input:
Auto-scales the axes to fit all the data in plot index n. If n == None,
auto-scale everyone.
### Response:
def autozoom(self, n=None):
"""
Auto-scales the axes to fit all the data in plot index n. If n == None,
auto-scale everyone.
"""
if n==None:
for p in self.plot_widgets: p.autoRange()
else: self.plot_widgets[n].autoRange()
return self |
def ensure_treasury_data(symbol, first_date, last_date, now, environ=None):
"""
Ensure we have treasury data from treasury module associated with
`symbol`.
Parameters
----------
symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
symbol, INDEX_MAPPING['SPY'],
)
first_date = max(first_date, loader_module.earliest_possible_date())
data = _load_cached_data(filename, first_date, last_date, now, 'treasury',
environ)
if data is not None:
return data
# If no cached data was found or it was missing any dates then download the
# necessary data.
logger.info(
('Downloading treasury data for {symbol!r} '
'from {first_date} to {last_date}'),
symbol=symbol,
first_date=first_date,
last_date=last_date
)
try:
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(get_data_filepath(filename, environ))
except (OSError, IOError, HTTPError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn(
("Still don't have expected treasury data for {symbol!r} "
"from {first_date} to {last_date} after redownload!"),
symbol=symbol,
first_date=first_date,
last_date=last_date
)
return data | Ensure we have treasury data from treasury module associated with
`symbol`.
Parameters
----------
symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path. | Below is the the instruction that describes the task:
### Input:
Ensure we have treasury data from treasury module associated with
`symbol`.
Parameters
----------
symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
### Response:
def ensure_treasury_data(symbol, first_date, last_date, now, environ=None):
"""
Ensure we have treasury data from treasury module associated with
`symbol`.
Parameters
----------
symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
symbol, INDEX_MAPPING['SPY'],
)
first_date = max(first_date, loader_module.earliest_possible_date())
data = _load_cached_data(filename, first_date, last_date, now, 'treasury',
environ)
if data is not None:
return data
# If no cached data was found or it was missing any dates then download the
# necessary data.
logger.info(
('Downloading treasury data for {symbol!r} '
'from {first_date} to {last_date}'),
symbol=symbol,
first_date=first_date,
last_date=last_date
)
try:
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(get_data_filepath(filename, environ))
except (OSError, IOError, HTTPError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn(
("Still don't have expected treasury data for {symbol!r} "
"from {first_date} to {last_date} after redownload!"),
symbol=symbol,
first_date=first_date,
last_date=last_date
)
return data |
def rewrite_record_file(workspace, src_record_file, mutated_file_tuples):
"""Given a RECORD file and list of mutated file tuples, update the RECORD file in place.
The RECORD file should always be a member of the mutated files, due to both containing
versions, and having a version in its filename.
"""
mutated_files = set()
dst_record_file = None
for src, dst in mutated_file_tuples:
if src == src_record_file:
dst_record_file = dst
else:
mutated_files.add(dst)
if not dst_record_file:
raise Exception('Malformed whl or bad globs: `{}` was not rewritten.'.format(src_record_file))
output_records = []
file_name = os.path.join(workspace, dst_record_file)
for line in read_file(file_name).splitlines():
filename, fingerprint_str, size_str = line.rsplit(',', 3)
if filename in mutated_files:
fingerprint_str, size_str = fingerprint_file(workspace, filename)
output_line = ','.join((filename, fingerprint_str, size_str))
else:
output_line = line
output_records.append(output_line)
safe_file_dump(file_name, '\r\n'.join(output_records) + '\r\n') | Given a RECORD file and list of mutated file tuples, update the RECORD file in place.
The RECORD file should always be a member of the mutated files, due to both containing
versions, and having a version in its filename. | Below is the the instruction that describes the task:
### Input:
Given a RECORD file and list of mutated file tuples, update the RECORD file in place.
The RECORD file should always be a member of the mutated files, due to both containing
versions, and having a version in its filename.
### Response:
def rewrite_record_file(workspace, src_record_file, mutated_file_tuples):
"""Given a RECORD file and list of mutated file tuples, update the RECORD file in place.
The RECORD file should always be a member of the mutated files, due to both containing
versions, and having a version in its filename.
"""
mutated_files = set()
dst_record_file = None
for src, dst in mutated_file_tuples:
if src == src_record_file:
dst_record_file = dst
else:
mutated_files.add(dst)
if not dst_record_file:
raise Exception('Malformed whl or bad globs: `{}` was not rewritten.'.format(src_record_file))
output_records = []
file_name = os.path.join(workspace, dst_record_file)
for line in read_file(file_name).splitlines():
filename, fingerprint_str, size_str = line.rsplit(',', 3)
if filename in mutated_files:
fingerprint_str, size_str = fingerprint_file(workspace, filename)
output_line = ','.join((filename, fingerprint_str, size_str))
else:
output_line = line
output_records.append(output_line)
safe_file_dump(file_name, '\r\n'.join(output_records) + '\r\n') |
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes) | Decrypts `s' with passphrase `passphrase' | Below is the the instruction that describes the task:
### Input:
Decrypts `s' with passphrase `passphrase'
### Response:
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes) |
Subsets and Splits