code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def parse_plugin_metadata(content):
"""Parse summary metadata to a Python object.
Arguments:
content: The `content` field of a `SummaryMetadata` proto
corresponding to the pr_curves plugin.
Returns:
A `PrCurvesPlugin` protobuf object.
"""
if not isinstance(content, bytes):
raise TypeError('Content type must be bytes')
result = plugin_data_pb2.PrCurvePluginData.FromString(content)
if result.version == 0:
return result
else:
logger.warn(
'Unknown metadata version: %s. The latest version known to '
'this build of TensorBoard is %s; perhaps a newer build is '
'available?', result.version, PROTO_VERSION)
return result | Parse summary metadata to a Python object.
Arguments:
content: The `content` field of a `SummaryMetadata` proto
corresponding to the pr_curves plugin.
Returns:
A `PrCurvesPlugin` protobuf object. | Below is the the instruction that describes the task:
### Input:
Parse summary metadata to a Python object.
Arguments:
content: The `content` field of a `SummaryMetadata` proto
corresponding to the pr_curves plugin.
Returns:
A `PrCurvesPlugin` protobuf object.
### Response:
def parse_plugin_metadata(content):
"""Parse summary metadata to a Python object.
Arguments:
content: The `content` field of a `SummaryMetadata` proto
corresponding to the pr_curves plugin.
Returns:
A `PrCurvesPlugin` protobuf object.
"""
if not isinstance(content, bytes):
raise TypeError('Content type must be bytes')
result = plugin_data_pb2.PrCurvePluginData.FromString(content)
if result.version == 0:
return result
else:
logger.warn(
'Unknown metadata version: %s. The latest version known to '
'this build of TensorBoard is %s; perhaps a newer build is '
'available?', result.version, PROTO_VERSION)
return result |
def init_menu():
"""Initialize menu before first request."""
# Register settings menu
item = current_menu.submenu('settings.admin')
item.register(
"admin.index",
# NOTE: Menu item text (icon replaced by a cogs icon).
_('%(icon)s Administration', icon='<i class="fa fa-cogs fa-fw"></i>'),
visible_when=_has_admin_access,
order=100) | Initialize menu before first request. | Below is the the instruction that describes the task:
### Input:
Initialize menu before first request.
### Response:
def init_menu():
"""Initialize menu before first request."""
# Register settings menu
item = current_menu.submenu('settings.admin')
item.register(
"admin.index",
# NOTE: Menu item text (icon replaced by a cogs icon).
_('%(icon)s Administration', icon='<i class="fa fa-cogs fa-fw"></i>'),
visible_when=_has_admin_access,
order=100) |
def command(self, verb, args=None):
"""Call a command on the server.
If the user has not authenticated then authentication will be done
as part of calling the command on the server.
For commands that don't return a status message the status message
will default to an empty string.
Args:
verb: The verb of the command to call.
args: The arguments of the command as a string (default None).
Returns:
A tuple of status code (as an integer) and status message.
Note:
You can run raw commands by supplying the full command (including
args) in the verb.
Note: Although it is possible you shouldn't issue more than one command
at a time by adding newlines to the verb as it will most likely lead
to undesirable results.
"""
if self.__generating:
raise NNTPSyncError("Command issued while a generator is active")
cmd = verb
if args:
cmd += " " + args
cmd += "\r\n"
self.socket.sendall(cmd)
try:
code, message = self.status()
except NNTPTemporaryError as e:
if e.code() != 480:
raise e
code, message = self.command("AUTHINFO USER", self.username)
if code == 381:
code, message = self.command("AUTHINFO PASS", self.password)
if code != 281:
raise NNTPReplyError(code, message)
code, message = self.command(verb, args)
return code, message | Call a command on the server.
If the user has not authenticated then authentication will be done
as part of calling the command on the server.
For commands that don't return a status message the status message
will default to an empty string.
Args:
verb: The verb of the command to call.
args: The arguments of the command as a string (default None).
Returns:
A tuple of status code (as an integer) and status message.
Note:
You can run raw commands by supplying the full command (including
args) in the verb.
Note: Although it is possible you shouldn't issue more than one command
at a time by adding newlines to the verb as it will most likely lead
to undesirable results. | Below is the the instruction that describes the task:
### Input:
Call a command on the server.
If the user has not authenticated then authentication will be done
as part of calling the command on the server.
For commands that don't return a status message the status message
will default to an empty string.
Args:
verb: The verb of the command to call.
args: The arguments of the command as a string (default None).
Returns:
A tuple of status code (as an integer) and status message.
Note:
You can run raw commands by supplying the full command (including
args) in the verb.
Note: Although it is possible you shouldn't issue more than one command
at a time by adding newlines to the verb as it will most likely lead
to undesirable results.
### Response:
def command(self, verb, args=None):
"""Call a command on the server.
If the user has not authenticated then authentication will be done
as part of calling the command on the server.
For commands that don't return a status message the status message
will default to an empty string.
Args:
verb: The verb of the command to call.
args: The arguments of the command as a string (default None).
Returns:
A tuple of status code (as an integer) and status message.
Note:
You can run raw commands by supplying the full command (including
args) in the verb.
Note: Although it is possible you shouldn't issue more than one command
at a time by adding newlines to the verb as it will most likely lead
to undesirable results.
"""
if self.__generating:
raise NNTPSyncError("Command issued while a generator is active")
cmd = verb
if args:
cmd += " " + args
cmd += "\r\n"
self.socket.sendall(cmd)
try:
code, message = self.status()
except NNTPTemporaryError as e:
if e.code() != 480:
raise e
code, message = self.command("AUTHINFO USER", self.username)
if code == 381:
code, message = self.command("AUTHINFO PASS", self.password)
if code != 281:
raise NNTPReplyError(code, message)
code, message = self.command(verb, args)
return code, message |
def from_request(cls, request):
"""Create new TransientShardState from webapp request."""
mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec"))
mapper_spec = mapreduce_spec.mapper
input_reader_spec_dict = json.loads(request.get("input_reader_state"),
cls=json_util.JsonDecoder)
input_reader = mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
initial_input_reader_spec_dict = json.loads(
request.get("initial_input_reader_state"), cls=json_util.JsonDecoder)
initial_input_reader = mapper_spec.input_reader_class().from_json(
initial_input_reader_spec_dict)
output_writer = None
if mapper_spec.output_writer_class():
output_writer = mapper_spec.output_writer_class().from_json(
json.loads(request.get("output_writer_state", "{}"),
cls=json_util.JsonDecoder))
assert isinstance(output_writer, mapper_spec.output_writer_class()), (
"%s.from_json returned an instance of wrong class: %s" % (
mapper_spec.output_writer_class(),
output_writer.__class__))
handler = util.try_deserialize_handler(request.get("serialized_handler"))
if not handler:
handler = mapreduce_spec.mapper.handler
return cls(mapreduce_spec.params["base_path"],
mapreduce_spec,
str(request.get("shard_id")),
int(request.get("slice_id")),
input_reader,
initial_input_reader,
output_writer=output_writer,
retries=int(request.get("retries")),
handler=handler) | Create new TransientShardState from webapp request. | Below is the the instruction that describes the task:
### Input:
Create new TransientShardState from webapp request.
### Response:
def from_request(cls, request):
"""Create new TransientShardState from webapp request."""
mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec"))
mapper_spec = mapreduce_spec.mapper
input_reader_spec_dict = json.loads(request.get("input_reader_state"),
cls=json_util.JsonDecoder)
input_reader = mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
initial_input_reader_spec_dict = json.loads(
request.get("initial_input_reader_state"), cls=json_util.JsonDecoder)
initial_input_reader = mapper_spec.input_reader_class().from_json(
initial_input_reader_spec_dict)
output_writer = None
if mapper_spec.output_writer_class():
output_writer = mapper_spec.output_writer_class().from_json(
json.loads(request.get("output_writer_state", "{}"),
cls=json_util.JsonDecoder))
assert isinstance(output_writer, mapper_spec.output_writer_class()), (
"%s.from_json returned an instance of wrong class: %s" % (
mapper_spec.output_writer_class(),
output_writer.__class__))
handler = util.try_deserialize_handler(request.get("serialized_handler"))
if not handler:
handler = mapreduce_spec.mapper.handler
return cls(mapreduce_spec.params["base_path"],
mapreduce_spec,
str(request.get("shard_id")),
int(request.get("slice_id")),
input_reader,
initial_input_reader,
output_writer=output_writer,
retries=int(request.get("retries")),
handler=handler) |
def _read_conf_file(path):
'''
Read in a config file from a given path and process it into a dictionary
'''
log.debug('Reading configuration from %s', path)
with salt.utils.files.fopen(path, 'r') as conf_file:
try:
conf_opts = salt.utils.yaml.safe_load(conf_file) or {}
except salt.utils.yaml.YAMLError as err:
message = 'Error parsing configuration file: {0} - {1}'.format(path, err)
log.error(message)
raise salt.exceptions.SaltConfigurationError(message)
# only interpret documents as a valid conf, not things like strings,
# which might have been caused by invalid yaml syntax
if not isinstance(conf_opts, dict):
message = 'Error parsing configuration file: {0} - conf ' \
'should be a document, not {1}.'.format(path, type(conf_opts))
log.error(message)
raise salt.exceptions.SaltConfigurationError(message)
# allow using numeric ids: convert int to string
if 'id' in conf_opts:
if not isinstance(conf_opts['id'], six.string_types):
conf_opts['id'] = six.text_type(conf_opts['id'])
else:
conf_opts['id'] = salt.utils.data.decode(conf_opts['id'])
return conf_opts | Read in a config file from a given path and process it into a dictionary | Below is the the instruction that describes the task:
### Input:
Read in a config file from a given path and process it into a dictionary
### Response:
def _read_conf_file(path):
'''
Read in a config file from a given path and process it into a dictionary
'''
log.debug('Reading configuration from %s', path)
with salt.utils.files.fopen(path, 'r') as conf_file:
try:
conf_opts = salt.utils.yaml.safe_load(conf_file) or {}
except salt.utils.yaml.YAMLError as err:
message = 'Error parsing configuration file: {0} - {1}'.format(path, err)
log.error(message)
raise salt.exceptions.SaltConfigurationError(message)
# only interpret documents as a valid conf, not things like strings,
# which might have been caused by invalid yaml syntax
if not isinstance(conf_opts, dict):
message = 'Error parsing configuration file: {0} - conf ' \
'should be a document, not {1}.'.format(path, type(conf_opts))
log.error(message)
raise salt.exceptions.SaltConfigurationError(message)
# allow using numeric ids: convert int to string
if 'id' in conf_opts:
if not isinstance(conf_opts['id'], six.string_types):
conf_opts['id'] = six.text_type(conf_opts['id'])
else:
conf_opts['id'] = salt.utils.data.decode(conf_opts['id'])
return conf_opts |
def _persist(self) -> None:
"""
Persists the current data group
"""
if self._store:
self._store.save(self._key, self._snapshot) | Persists the current data group | Below is the the instruction that describes the task:
### Input:
Persists the current data group
### Response:
def _persist(self) -> None:
"""
Persists the current data group
"""
if self._store:
self._store.save(self._key, self._snapshot) |
def _handle_history_reply(self, msg):
""" Implemented to handle history tail replies, which are only supported
by the IPython kernel.
"""
content = msg['content']
if 'history' not in content:
self.log.error("History request failed: %r"%content)
if content.get('status', '') == 'aborted' and \
not self._retrying_history_request:
# a *different* action caused this request to be aborted, so
# we should try again.
self.log.error("Retrying aborted history request")
# prevent multiple retries of aborted requests:
self._retrying_history_request = True
# wait out the kernel's queue flush, which is currently timed at 0.1s
time.sleep(0.25)
self.kernel_manager.shell_channel.history(hist_access_type='tail',n=1000)
else:
self._retrying_history_request = False
return
# reset retry flag
self._retrying_history_request = False
history_items = content['history']
self.log.debug("Received history reply with %i entries", len(history_items))
items = []
last_cell = u""
for _, _, cell in history_items:
cell = cell.rstrip()
if cell != last_cell:
items.append(cell)
last_cell = cell
self._set_history(items) | Implemented to handle history tail replies, which are only supported
by the IPython kernel. | Below is the the instruction that describes the task:
### Input:
Implemented to handle history tail replies, which are only supported
by the IPython kernel.
### Response:
def _handle_history_reply(self, msg):
""" Implemented to handle history tail replies, which are only supported
by the IPython kernel.
"""
content = msg['content']
if 'history' not in content:
self.log.error("History request failed: %r"%content)
if content.get('status', '') == 'aborted' and \
not self._retrying_history_request:
# a *different* action caused this request to be aborted, so
# we should try again.
self.log.error("Retrying aborted history request")
# prevent multiple retries of aborted requests:
self._retrying_history_request = True
# wait out the kernel's queue flush, which is currently timed at 0.1s
time.sleep(0.25)
self.kernel_manager.shell_channel.history(hist_access_type='tail',n=1000)
else:
self._retrying_history_request = False
return
# reset retry flag
self._retrying_history_request = False
history_items = content['history']
self.log.debug("Received history reply with %i entries", len(history_items))
items = []
last_cell = u""
for _, _, cell in history_items:
cell = cell.rstrip()
if cell != last_cell:
items.append(cell)
last_cell = cell
self._set_history(items) |
def render_mako_template_to(
template, outpath, subsd, only_update=False, cwd=None,
prev_subsd=None, create_dest_dirs=False, logger=None,
pass_warn_string=True, **kwargs):
"""
template: either string of path or file like obj.
Beware of the only_update option, it pays no attention to
an updated subsd.
pass_warn_string: defult True
if True or instance of str:
an extra vairable named '_warning_in_the_generated_file_not_to_edit'
is passed with a preset (True) or string warning not to
directly edit the generated file.
"""
if cwd:
template = os.path.join(cwd, template)
outpath = os.path.join(cwd, outpath)
outdir = os.path.dirname(outpath) or '.' # avoid ''
if not os.path.exists(outdir):
if create_dest_dirs:
make_dirs(outdir, logger=logger)
else:
raise FileNotFoundError(
"Dest. dir. non-existent: {}".format(outdir))
msg = None
if pass_warn_string is True:
subsd['_warning_in_the_generated_file_not_to_edit'] = (
"DO NOT EDIT THIS FILE! (Generated from template: {} using" +
" Mako python templating engine)"
).format(os.path.basename(template))
elif isinstance(pass_warn_string, str):
subsd['_warning_in_the_generated_file_not_to_edit'] =\
pass_warn_string
if only_update:
if prev_subsd == subsd and not \
missing_or_other_newer(outpath, template):
if logger:
msg = "Did not re-render {}. (destination newer + same dict)"
logger.info(msg.format(template))
return
if hasattr(template, 'read'):
# set in-file handle to provided template
ifh = template
else:
# Assume template is a string of the path to the template
ifh = open(template, 'rt')
template_str = ifh.read()
kwargs_Template = {'input_encoding': 'utf-8', 'output_encoding': 'utf-8'}
kwargs_Template.update(kwargs)
with open(outpath, 'wb') as ofh:
from mako.template import Template
from mako.exceptions import text_error_template
try:
rendered = Template(
template_str, **kwargs_Template).render(**subsd)
except:
if logger:
logger.error(text_error_template().render())
else:
print(text_error_template().render())
raise
if logger:
logger.info("Rendering '{}' to '{}'...".format(
ifh.name, outpath))
ofh.write(rendered)
return outpath | template: either string of path or file like obj.
Beware of the only_update option, it pays no attention to
an updated subsd.
pass_warn_string: defult True
if True or instance of str:
an extra vairable named '_warning_in_the_generated_file_not_to_edit'
is passed with a preset (True) or string warning not to
directly edit the generated file. | Below is the the instruction that describes the task:
### Input:
template: either string of path or file like obj.
Beware of the only_update option, it pays no attention to
an updated subsd.
pass_warn_string: defult True
if True or instance of str:
an extra vairable named '_warning_in_the_generated_file_not_to_edit'
is passed with a preset (True) or string warning not to
directly edit the generated file.
### Response:
def render_mako_template_to(
template, outpath, subsd, only_update=False, cwd=None,
prev_subsd=None, create_dest_dirs=False, logger=None,
pass_warn_string=True, **kwargs):
"""
template: either string of path or file like obj.
Beware of the only_update option, it pays no attention to
an updated subsd.
pass_warn_string: defult True
if True or instance of str:
an extra vairable named '_warning_in_the_generated_file_not_to_edit'
is passed with a preset (True) or string warning not to
directly edit the generated file.
"""
if cwd:
template = os.path.join(cwd, template)
outpath = os.path.join(cwd, outpath)
outdir = os.path.dirname(outpath) or '.' # avoid ''
if not os.path.exists(outdir):
if create_dest_dirs:
make_dirs(outdir, logger=logger)
else:
raise FileNotFoundError(
"Dest. dir. non-existent: {}".format(outdir))
msg = None
if pass_warn_string is True:
subsd['_warning_in_the_generated_file_not_to_edit'] = (
"DO NOT EDIT THIS FILE! (Generated from template: {} using" +
" Mako python templating engine)"
).format(os.path.basename(template))
elif isinstance(pass_warn_string, str):
subsd['_warning_in_the_generated_file_not_to_edit'] =\
pass_warn_string
if only_update:
if prev_subsd == subsd and not \
missing_or_other_newer(outpath, template):
if logger:
msg = "Did not re-render {}. (destination newer + same dict)"
logger.info(msg.format(template))
return
if hasattr(template, 'read'):
# set in-file handle to provided template
ifh = template
else:
# Assume template is a string of the path to the template
ifh = open(template, 'rt')
template_str = ifh.read()
kwargs_Template = {'input_encoding': 'utf-8', 'output_encoding': 'utf-8'}
kwargs_Template.update(kwargs)
with open(outpath, 'wb') as ofh:
from mako.template import Template
from mako.exceptions import text_error_template
try:
rendered = Template(
template_str, **kwargs_Template).render(**subsd)
except:
if logger:
logger.error(text_error_template().render())
else:
print(text_error_template().render())
raise
if logger:
logger.info("Rendering '{}' to '{}'...".format(
ifh.name, outpath))
ofh.write(rendered)
return outpath |
def delete(self, file_id):
"""
Remove a specific file from the File Manager.
:param file_id: The unique id for the File Manager file.
:type file_id: :py:class:`str`
"""
self.file_id = file_id
return self._mc_client._delete(url=self._build_path(file_id)) | Remove a specific file from the File Manager.
:param file_id: The unique id for the File Manager file.
:type file_id: :py:class:`str` | Below is the the instruction that describes the task:
### Input:
Remove a specific file from the File Manager.
:param file_id: The unique id for the File Manager file.
:type file_id: :py:class:`str`
### Response:
def delete(self, file_id):
"""
Remove a specific file from the File Manager.
:param file_id: The unique id for the File Manager file.
:type file_id: :py:class:`str`
"""
self.file_id = file_id
return self._mc_client._delete(url=self._build_path(file_id)) |
def _edges_classify_intersection9():
"""The edges for the curved polygon intersection used below.
Helper for :func:`classify_intersection9`.
"""
edges1 = (
bezier.Curve.from_nodes(
np.asfortranarray([[32.0, 30.0], [20.0, 25.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[30.0, 25.0, 20.0], [25.0, 20.0, 20.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[20.0, 25.0, 30.0], [20.0, 20.0, 15.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[30.0, 32.0], [15.0, 20.0]])
),
)
edges2 = (
bezier.Curve.from_nodes(
np.asfortranarray([[8.0, 10.0], [20.0, 15.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[10.0, 15.0, 20.0], [15.0, 20.0, 20.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[20.0, 15.0, 10.0], [20.0, 20.0, 25.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[10.0, 8.0], [25.0, 20.0]])
),
)
return edges1, edges2 | The edges for the curved polygon intersection used below.
Helper for :func:`classify_intersection9`. | Below is the the instruction that describes the task:
### Input:
The edges for the curved polygon intersection used below.
Helper for :func:`classify_intersection9`.
### Response:
def _edges_classify_intersection9():
"""The edges for the curved polygon intersection used below.
Helper for :func:`classify_intersection9`.
"""
edges1 = (
bezier.Curve.from_nodes(
np.asfortranarray([[32.0, 30.0], [20.0, 25.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[30.0, 25.0, 20.0], [25.0, 20.0, 20.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[20.0, 25.0, 30.0], [20.0, 20.0, 15.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[30.0, 32.0], [15.0, 20.0]])
),
)
edges2 = (
bezier.Curve.from_nodes(
np.asfortranarray([[8.0, 10.0], [20.0, 15.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[10.0, 15.0, 20.0], [15.0, 20.0, 20.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[20.0, 15.0, 10.0], [20.0, 20.0, 25.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[10.0, 8.0], [25.0, 20.0]])
),
)
return edges1, edges2 |
def vagrant_settings(self, name='', *args, **kwargs):
"""
Context manager that sets a vagrant VM
as the remote host.
Use this context manager inside a task to run commands
on your current Vagrant box::
from burlap.vagrant import vagrant_settings
with vagrant_settings():
run('hostname')
"""
config = self.ssh_config(name)
extra_args = self._settings_dict(config)
kwargs.update(extra_args)
return self.settings(*args, **kwargs) | Context manager that sets a vagrant VM
as the remote host.
Use this context manager inside a task to run commands
on your current Vagrant box::
from burlap.vagrant import vagrant_settings
with vagrant_settings():
run('hostname') | Below is the the instruction that describes the task:
### Input:
Context manager that sets a vagrant VM
as the remote host.
Use this context manager inside a task to run commands
on your current Vagrant box::
from burlap.vagrant import vagrant_settings
with vagrant_settings():
run('hostname')
### Response:
def vagrant_settings(self, name='', *args, **kwargs):
"""
Context manager that sets a vagrant VM
as the remote host.
Use this context manager inside a task to run commands
on your current Vagrant box::
from burlap.vagrant import vagrant_settings
with vagrant_settings():
run('hostname')
"""
config = self.ssh_config(name)
extra_args = self._settings_dict(config)
kwargs.update(extra_args)
return self.settings(*args, **kwargs) |
def listDatasetParents(self, dataset=''):
"""
API to list A datasets parents in DBS.
:param dataset: dataset (Required)
:type dataset: str
:returns: List of dictionaries containing the following keys (this_dataset, parent_dataset_id, parent_dataset)
:rtype: list of dicts
"""
try:
return self.dbsDataset.listDatasetParents(dataset)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDatasetParents. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | API to list A datasets parents in DBS.
:param dataset: dataset (Required)
:type dataset: str
:returns: List of dictionaries containing the following keys (this_dataset, parent_dataset_id, parent_dataset)
:rtype: list of dicts | Below is the the instruction that describes the task:
### Input:
API to list A datasets parents in DBS.
:param dataset: dataset (Required)
:type dataset: str
:returns: List of dictionaries containing the following keys (this_dataset, parent_dataset_id, parent_dataset)
:rtype: list of dicts
### Response:
def listDatasetParents(self, dataset=''):
"""
API to list A datasets parents in DBS.
:param dataset: dataset (Required)
:type dataset: str
:returns: List of dictionaries containing the following keys (this_dataset, parent_dataset_id, parent_dataset)
:rtype: list of dicts
"""
try:
return self.dbsDataset.listDatasetParents(dataset)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDatasetParents. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) |
def _wrap_attribute(self, attr):
"""Wrap the empty attributes of the Slice in a Const node."""
if not attr:
const = const_factory(attr)
const.parent = self
return const
return attr | Wrap the empty attributes of the Slice in a Const node. | Below is the the instruction that describes the task:
### Input:
Wrap the empty attributes of the Slice in a Const node.
### Response:
def _wrap_attribute(self, attr):
"""Wrap the empty attributes of the Slice in a Const node."""
if not attr:
const = const_factory(attr)
const.parent = self
return const
return attr |
def _fusion_range_to_dsl(tokens) -> FusionRangeBase:
"""Convert a PyParsing data dictionary into a PyBEL.
:type tokens: ParseResult
"""
if FUSION_MISSING in tokens:
return missing_fusion_range()
return fusion_range(
reference=tokens[FUSION_REFERENCE],
start=tokens[FUSION_START],
stop=tokens[FUSION_STOP]
) | Convert a PyParsing data dictionary into a PyBEL.
:type tokens: ParseResult | Below is the the instruction that describes the task:
### Input:
Convert a PyParsing data dictionary into a PyBEL.
:type tokens: ParseResult
### Response:
def _fusion_range_to_dsl(tokens) -> FusionRangeBase:
"""Convert a PyParsing data dictionary into a PyBEL.
:type tokens: ParseResult
"""
if FUSION_MISSING in tokens:
return missing_fusion_range()
return fusion_range(
reference=tokens[FUSION_REFERENCE],
start=tokens[FUSION_START],
stop=tokens[FUSION_STOP]
) |
def matcher(self):
"""
Matcher engine: ruled, unruled, unparsed.
"""
if self.args.matcher is None:
return 'ruled'
elif self.args.matcher.startswith('-'):
matcher = self.args.matcher.strip('-').replace('-', '_')
else:
matcher = self.args.matcher
if matcher not in ['ruled', 'unruled', 'unparsed']:
raise LogRaptorArgumentError('matcher', 'unknown matcher argument %r' % matcher)
return matcher | Matcher engine: ruled, unruled, unparsed. | Below is the the instruction that describes the task:
### Input:
Matcher engine: ruled, unruled, unparsed.
### Response:
def matcher(self):
"""
Matcher engine: ruled, unruled, unparsed.
"""
if self.args.matcher is None:
return 'ruled'
elif self.args.matcher.startswith('-'):
matcher = self.args.matcher.strip('-').replace('-', '_')
else:
matcher = self.args.matcher
if matcher not in ['ruled', 'unruled', 'unparsed']:
raise LogRaptorArgumentError('matcher', 'unknown matcher argument %r' % matcher)
return matcher |
def gen_stack_patches(patch_list,
nr_row=None, nr_col=None, border=None,
max_width=1000, max_height=1000,
bgcolor=255, viz=False, lclick_cb=None):
"""
Similar to :func:`stack_patches` but with a generator interface.
It takes a much-longer list and yields stacked results one by one.
For example, if ``patch_list`` contains 1000 images and ``nr_row==nr_col==10``,
this generator yields 10 stacked images.
Args:
nr_row(int), nr_col(int): rows and cols of each result.
max_width(int), max_height(int): Maximum allowed size of the
stacked image. If ``nr_row/nr_col`` are None, this number
will be used to infer the rows and cols. Otherwise the option is
ignored.
patch_list, border, viz, lclick_cb: same as in :func:`stack_patches`.
Yields:
np.ndarray: the stacked image.
"""
# setup parameters
patch_list = _preprocess_patch_list(patch_list)
if lclick_cb is not None:
viz = True
ph, pw = patch_list.shape[1:3]
if border is None:
border = int(0.05 * min(ph, pw))
if nr_row is None:
nr_row = int(max_height / (ph + border))
if nr_col is None:
nr_col = int(max_width / (pw + border))
canvas = Canvas(ph, pw, nr_row, nr_col, patch_list.shape[-1], border, bgcolor)
nr_patch = nr_row * nr_col
start = 0
if lclick_cb is not None:
def lclick_callback(img, x, y):
idx = canvas.get_patchid_from_coord(x, y)
idx = idx + start
if idx < end:
lclick_cb(patch_list[idx], idx)
else:
lclick_callback = None
while True:
end = start + nr_patch
cur_list = patch_list[start:end]
if not len(cur_list):
return
canvas.draw_patches(cur_list)
if viz:
interactive_imshow(canvas.canvas, lclick_cb=lclick_callback)
yield canvas.canvas
start = end | Similar to :func:`stack_patches` but with a generator interface.
It takes a much-longer list and yields stacked results one by one.
For example, if ``patch_list`` contains 1000 images and ``nr_row==nr_col==10``,
this generator yields 10 stacked images.
Args:
nr_row(int), nr_col(int): rows and cols of each result.
max_width(int), max_height(int): Maximum allowed size of the
stacked image. If ``nr_row/nr_col`` are None, this number
will be used to infer the rows and cols. Otherwise the option is
ignored.
patch_list, border, viz, lclick_cb: same as in :func:`stack_patches`.
Yields:
np.ndarray: the stacked image. | Below is the the instruction that describes the task:
### Input:
Similar to :func:`stack_patches` but with a generator interface.
It takes a much-longer list and yields stacked results one by one.
For example, if ``patch_list`` contains 1000 images and ``nr_row==nr_col==10``,
this generator yields 10 stacked images.
Args:
nr_row(int), nr_col(int): rows and cols of each result.
max_width(int), max_height(int): Maximum allowed size of the
stacked image. If ``nr_row/nr_col`` are None, this number
will be used to infer the rows and cols. Otherwise the option is
ignored.
patch_list, border, viz, lclick_cb: same as in :func:`stack_patches`.
Yields:
np.ndarray: the stacked image.
### Response:
def gen_stack_patches(patch_list,
nr_row=None, nr_col=None, border=None,
max_width=1000, max_height=1000,
bgcolor=255, viz=False, lclick_cb=None):
"""
Similar to :func:`stack_patches` but with a generator interface.
It takes a much-longer list and yields stacked results one by one.
For example, if ``patch_list`` contains 1000 images and ``nr_row==nr_col==10``,
this generator yields 10 stacked images.
Args:
nr_row(int), nr_col(int): rows and cols of each result.
max_width(int), max_height(int): Maximum allowed size of the
stacked image. If ``nr_row/nr_col`` are None, this number
will be used to infer the rows and cols. Otherwise the option is
ignored.
patch_list, border, viz, lclick_cb: same as in :func:`stack_patches`.
Yields:
np.ndarray: the stacked image.
"""
# setup parameters
patch_list = _preprocess_patch_list(patch_list)
if lclick_cb is not None:
viz = True
ph, pw = patch_list.shape[1:3]
if border is None:
border = int(0.05 * min(ph, pw))
if nr_row is None:
nr_row = int(max_height / (ph + border))
if nr_col is None:
nr_col = int(max_width / (pw + border))
canvas = Canvas(ph, pw, nr_row, nr_col, patch_list.shape[-1], border, bgcolor)
nr_patch = nr_row * nr_col
start = 0
if lclick_cb is not None:
def lclick_callback(img, x, y):
idx = canvas.get_patchid_from_coord(x, y)
idx = idx + start
if idx < end:
lclick_cb(patch_list[idx], idx)
else:
lclick_callback = None
while True:
end = start + nr_patch
cur_list = patch_list[start:end]
if not len(cur_list):
return
canvas.draw_patches(cur_list)
if viz:
interactive_imshow(canvas.canvas, lclick_cb=lclick_callback)
yield canvas.canvas
start = end |
def create_key(policy=None, description=None, key_usage=None, region=None,
key=None, keyid=None, profile=None):
'''
Creates a master key.
CLI example::
salt myminion boto_kms.create_key '{"Statement":...}' "My master key"
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
_policy = salt.serializers.json.serialize(policy)
try:
key_metadata = conn.create_key(
_policy,
description=description,
key_usage=key_usage
)
r['key_metadata'] = key_metadata['KeyMetadata']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r | Creates a master key.
CLI example::
salt myminion boto_kms.create_key '{"Statement":...}' "My master key" | Below is the the instruction that describes the task:
### Input:
Creates a master key.
CLI example::
salt myminion boto_kms.create_key '{"Statement":...}' "My master key"
### Response:
def create_key(policy=None, description=None, key_usage=None, region=None,
key=None, keyid=None, profile=None):
'''
Creates a master key.
CLI example::
salt myminion boto_kms.create_key '{"Statement":...}' "My master key"
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
_policy = salt.serializers.json.serialize(policy)
try:
key_metadata = conn.create_key(
_policy,
description=description,
key_usage=key_usage
)
r['key_metadata'] = key_metadata['KeyMetadata']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r |
def send_backspace(self, count):
"""
Sends the given number of backspace key presses.
"""
for i in range(count):
self.interface.send_key(Key.BACKSPACE) | Sends the given number of backspace key presses. | Below is the the instruction that describes the task:
### Input:
Sends the given number of backspace key presses.
### Response:
def send_backspace(self, count):
"""
Sends the given number of backspace key presses.
"""
for i in range(count):
self.interface.send_key(Key.BACKSPACE) |
def index_bamfile(job, bamfile, sample_type, univ_options, samtools_options, sample_info=None,
export=True):
"""
Index `bamfile` using samtools
:param toil.fileStore.FileID bamfile: fsID for the bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param str sample_info: Information regarding the sample that will beinjected into the filename
as `sample_type`_`sample_info`.bam(.bai)
:param bool export: Should the bam and bai be exported to the output directory?
:return: Dict containing input bam and the generated index (.bam.bai)
output_files:
|- '<sample_type>(_<sample_info>).bam': fsID
+- '<sample_type>(_<sample_info>).bam.bai': fsID
:rtype: dict
"""
work_dir = os.getcwd()
in_bamfile = sample_type
if sample_info is not None:
assert isinstance(sample_info, str)
in_bamfile = '_'.join([in_bamfile, sample_info])
in_bamfile += '.bam'
input_files = {
in_bamfile: bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['index',
input_files[in_bamfile]]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=samtools_options['version'])
out_bai = '/'.join([work_dir, in_bamfile + '.bai'])
output_files = {in_bamfile: bamfile,
in_bamfile + '.bai': job.fileStore.writeGlobalFile(out_bai)}
if export:
export_results(job, bamfile, os.path.splitext(out_bai)[0], univ_options,
subfolder='alignments')
export_results(job, output_files[in_bamfile + '.bai'], out_bai, univ_options,
subfolder='alignments')
job.fileStore.logToMaster('Ran samtools-index on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_files | Index `bamfile` using samtools
:param toil.fileStore.FileID bamfile: fsID for the bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param str sample_info: Information regarding the sample that will beinjected into the filename
as `sample_type`_`sample_info`.bam(.bai)
:param bool export: Should the bam and bai be exported to the output directory?
:return: Dict containing input bam and the generated index (.bam.bai)
output_files:
|- '<sample_type>(_<sample_info>).bam': fsID
+- '<sample_type>(_<sample_info>).bam.bai': fsID
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Index `bamfile` using samtools
:param toil.fileStore.FileID bamfile: fsID for the bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param str sample_info: Information regarding the sample that will beinjected into the filename
as `sample_type`_`sample_info`.bam(.bai)
:param bool export: Should the bam and bai be exported to the output directory?
:return: Dict containing input bam and the generated index (.bam.bai)
output_files:
|- '<sample_type>(_<sample_info>).bam': fsID
+- '<sample_type>(_<sample_info>).bam.bai': fsID
:rtype: dict
### Response:
def index_bamfile(job, bamfile, sample_type, univ_options, samtools_options, sample_info=None,
export=True):
"""
Index `bamfile` using samtools
:param toil.fileStore.FileID bamfile: fsID for the bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param str sample_info: Information regarding the sample that will beinjected into the filename
as `sample_type`_`sample_info`.bam(.bai)
:param bool export: Should the bam and bai be exported to the output directory?
:return: Dict containing input bam and the generated index (.bam.bai)
output_files:
|- '<sample_type>(_<sample_info>).bam': fsID
+- '<sample_type>(_<sample_info>).bam.bai': fsID
:rtype: dict
"""
work_dir = os.getcwd()
in_bamfile = sample_type
if sample_info is not None:
assert isinstance(sample_info, str)
in_bamfile = '_'.join([in_bamfile, sample_info])
in_bamfile += '.bam'
input_files = {
in_bamfile: bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['index',
input_files[in_bamfile]]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=samtools_options['version'])
out_bai = '/'.join([work_dir, in_bamfile + '.bai'])
output_files = {in_bamfile: bamfile,
in_bamfile + '.bai': job.fileStore.writeGlobalFile(out_bai)}
if export:
export_results(job, bamfile, os.path.splitext(out_bai)[0], univ_options,
subfolder='alignments')
export_results(job, output_files[in_bamfile + '.bai'], out_bai, univ_options,
subfolder='alignments')
job.fileStore.logToMaster('Ran samtools-index on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_files |
def mboxes(self):
"""Get the mboxes managed by this mailing list.
Returns the archives sorted by date in ascending order.
:returns: a list of `.MBoxArchive` objects
"""
archives = []
for mbox in super().mboxes:
dt = self._parse_date_from_filepath(mbox.filepath)
archives.append((dt, mbox))
archives.sort(key=lambda x: x[0])
return [a[1] for a in archives] | Get the mboxes managed by this mailing list.
Returns the archives sorted by date in ascending order.
:returns: a list of `.MBoxArchive` objects | Below is the the instruction that describes the task:
### Input:
Get the mboxes managed by this mailing list.
Returns the archives sorted by date in ascending order.
:returns: a list of `.MBoxArchive` objects
### Response:
def mboxes(self):
"""Get the mboxes managed by this mailing list.
Returns the archives sorted by date in ascending order.
:returns: a list of `.MBoxArchive` objects
"""
archives = []
for mbox in super().mboxes:
dt = self._parse_date_from_filepath(mbox.filepath)
archives.append((dt, mbox))
archives.sort(key=lambda x: x[0])
return [a[1] for a in archives] |
def getNextRecord(self):
"""
Get the next record to encode. Includes getting a record from the
`dataSource` and applying filters. If the filters request more data from the
`dataSource` continue to get data from the `dataSource` until all filters
are satisfied. This method is separate from :meth:`.RecordSensor.compute` so that we can
use a standalone :class:`.RecordSensor` to get filtered data.
"""
allFiltersHaveEnoughData = False
while not allFiltersHaveEnoughData:
# Get the data from the dataSource
data = self.dataSource.getNextRecordDict()
if not data:
raise StopIteration("Datasource has no more data")
# temporary check
if "_reset" not in data:
data["_reset"] = 0
if "_sequenceId" not in data:
data["_sequenceId"] = 0
if "_category" not in data:
data["_category"] = [None]
data, allFiltersHaveEnoughData = self.applyFilters(data)
self.lastRecord = data
return data | Get the next record to encode. Includes getting a record from the
`dataSource` and applying filters. If the filters request more data from the
`dataSource` continue to get data from the `dataSource` until all filters
are satisfied. This method is separate from :meth:`.RecordSensor.compute` so that we can
use a standalone :class:`.RecordSensor` to get filtered data. | Below is the the instruction that describes the task:
### Input:
Get the next record to encode. Includes getting a record from the
`dataSource` and applying filters. If the filters request more data from the
`dataSource` continue to get data from the `dataSource` until all filters
are satisfied. This method is separate from :meth:`.RecordSensor.compute` so that we can
use a standalone :class:`.RecordSensor` to get filtered data.
### Response:
def getNextRecord(self):
"""
Get the next record to encode. Includes getting a record from the
`dataSource` and applying filters. If the filters request more data from the
`dataSource` continue to get data from the `dataSource` until all filters
are satisfied. This method is separate from :meth:`.RecordSensor.compute` so that we can
use a standalone :class:`.RecordSensor` to get filtered data.
"""
allFiltersHaveEnoughData = False
while not allFiltersHaveEnoughData:
# Get the data from the dataSource
data = self.dataSource.getNextRecordDict()
if not data:
raise StopIteration("Datasource has no more data")
# temporary check
if "_reset" not in data:
data["_reset"] = 0
if "_sequenceId" not in data:
data["_sequenceId"] = 0
if "_category" not in data:
data["_category"] = [None]
data, allFiltersHaveEnoughData = self.applyFilters(data)
self.lastRecord = data
return data |
def autoclean_cv(training_dataframe, testing_dataframe, drop_nans=False, copy=False,
encoder=None, encoder_kwargs=None, ignore_update_check=False):
"""Performs a series of automated data cleaning transformations on the provided training and testing data sets
Unlike `autoclean()`, this function takes cross-validation into account by learning the data transformations
from only the training set, then applying those transformations to both the training and testing set.
By doing so, this function will prevent information leak from the training set into the testing set.
Parameters
----------
training_dataframe: pandas.DataFrame
Training data set
testing_dataframe: pandas.DataFrame
Testing data set
drop_nans: bool
Drop all rows that have a NaN in any column (default: False)
copy: bool
Make a copy of the data set (default: False)
encoder: category_encoders transformer
The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder)
encoder_kwargs: category_encoders
The a valid sklearn transformer to encode categorical features. Default (None)
ignore_update_check: bool
Do not check for the latest version of datacleaner
Returns
----------
output_training_dataframe: pandas.DataFrame
Cleaned training data set
output_testing_dataframe: pandas.DataFrame
Cleaned testing data set
"""
global update_checked
if ignore_update_check:
update_checked = True
if not update_checked:
update_check('datacleaner', __version__)
update_checked = True
if set(training_dataframe.columns.values) != set(testing_dataframe.columns.values):
raise ValueError('The training and testing DataFrames do not have the same columns. '
'Make sure that you are providing the same columns.')
if copy:
training_dataframe = training_dataframe.copy()
testing_dataframe = testing_dataframe.copy()
if drop_nans:
training_dataframe.dropna(inplace=True)
testing_dataframe.dropna(inplace=True)
if encoder_kwargs is None:
encoder_kwargs = {}
for column in training_dataframe.columns.values:
# Replace NaNs with the median or mode of the column depending on the column type
try:
column_median = training_dataframe[column].median()
training_dataframe[column].fillna(column_median, inplace=True)
testing_dataframe[column].fillna(column_median, inplace=True)
except TypeError:
column_mode = training_dataframe[column].mode()[0]
training_dataframe[column].fillna(column_mode, inplace=True)
testing_dataframe[column].fillna(column_mode, inplace=True)
# Encode all strings with numerical equivalents
if str(training_dataframe[column].values.dtype) == 'object':
if encoder is not None:
column_encoder = encoder(**encoder_kwargs).fit(training_dataframe[column].values)
else:
column_encoder = LabelEncoder().fit(training_dataframe[column].values)
training_dataframe[column] = column_encoder.transform(training_dataframe[column].values)
testing_dataframe[column] = column_encoder.transform(testing_dataframe[column].values)
return training_dataframe, testing_dataframe | Performs a series of automated data cleaning transformations on the provided training and testing data sets
Unlike `autoclean()`, this function takes cross-validation into account by learning the data transformations
from only the training set, then applying those transformations to both the training and testing set.
By doing so, this function will prevent information leak from the training set into the testing set.
Parameters
----------
training_dataframe: pandas.DataFrame
Training data set
testing_dataframe: pandas.DataFrame
Testing data set
drop_nans: bool
Drop all rows that have a NaN in any column (default: False)
copy: bool
Make a copy of the data set (default: False)
encoder: category_encoders transformer
The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder)
encoder_kwargs: category_encoders
The a valid sklearn transformer to encode categorical features. Default (None)
ignore_update_check: bool
Do not check for the latest version of datacleaner
Returns
----------
output_training_dataframe: pandas.DataFrame
Cleaned training data set
output_testing_dataframe: pandas.DataFrame
Cleaned testing data set | Below is the the instruction that describes the task:
### Input:
Performs a series of automated data cleaning transformations on the provided training and testing data sets
Unlike `autoclean()`, this function takes cross-validation into account by learning the data transformations
from only the training set, then applying those transformations to both the training and testing set.
By doing so, this function will prevent information leak from the training set into the testing set.
Parameters
----------
training_dataframe: pandas.DataFrame
Training data set
testing_dataframe: pandas.DataFrame
Testing data set
drop_nans: bool
Drop all rows that have a NaN in any column (default: False)
copy: bool
Make a copy of the data set (default: False)
encoder: category_encoders transformer
The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder)
encoder_kwargs: category_encoders
The a valid sklearn transformer to encode categorical features. Default (None)
ignore_update_check: bool
Do not check for the latest version of datacleaner
Returns
----------
output_training_dataframe: pandas.DataFrame
Cleaned training data set
output_testing_dataframe: pandas.DataFrame
Cleaned testing data set
### Response:
def autoclean_cv(training_dataframe, testing_dataframe, drop_nans=False, copy=False,
encoder=None, encoder_kwargs=None, ignore_update_check=False):
"""Performs a series of automated data cleaning transformations on the provided training and testing data sets
Unlike `autoclean()`, this function takes cross-validation into account by learning the data transformations
from only the training set, then applying those transformations to both the training and testing set.
By doing so, this function will prevent information leak from the training set into the testing set.
Parameters
----------
training_dataframe: pandas.DataFrame
Training data set
testing_dataframe: pandas.DataFrame
Testing data set
drop_nans: bool
Drop all rows that have a NaN in any column (default: False)
copy: bool
Make a copy of the data set (default: False)
encoder: category_encoders transformer
The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder)
encoder_kwargs: category_encoders
The a valid sklearn transformer to encode categorical features. Default (None)
ignore_update_check: bool
Do not check for the latest version of datacleaner
Returns
----------
output_training_dataframe: pandas.DataFrame
Cleaned training data set
output_testing_dataframe: pandas.DataFrame
Cleaned testing data set
"""
global update_checked
if ignore_update_check:
update_checked = True
if not update_checked:
update_check('datacleaner', __version__)
update_checked = True
if set(training_dataframe.columns.values) != set(testing_dataframe.columns.values):
raise ValueError('The training and testing DataFrames do not have the same columns. '
'Make sure that you are providing the same columns.')
if copy:
training_dataframe = training_dataframe.copy()
testing_dataframe = testing_dataframe.copy()
if drop_nans:
training_dataframe.dropna(inplace=True)
testing_dataframe.dropna(inplace=True)
if encoder_kwargs is None:
encoder_kwargs = {}
for column in training_dataframe.columns.values:
# Replace NaNs with the median or mode of the column depending on the column type
try:
column_median = training_dataframe[column].median()
training_dataframe[column].fillna(column_median, inplace=True)
testing_dataframe[column].fillna(column_median, inplace=True)
except TypeError:
column_mode = training_dataframe[column].mode()[0]
training_dataframe[column].fillna(column_mode, inplace=True)
testing_dataframe[column].fillna(column_mode, inplace=True)
# Encode all strings with numerical equivalents
if str(training_dataframe[column].values.dtype) == 'object':
if encoder is not None:
column_encoder = encoder(**encoder_kwargs).fit(training_dataframe[column].values)
else:
column_encoder = LabelEncoder().fit(training_dataframe[column].values)
training_dataframe[column] = column_encoder.transform(training_dataframe[column].values)
testing_dataframe[column] = column_encoder.transform(testing_dataframe[column].values)
return training_dataframe, testing_dataframe |
def copy_data(data_length, blocksize, infp, outfp):
# type: (int, int, BinaryIO, BinaryIO) -> None
'''
A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing.
'''
use_sendfile = False
if have_sendfile:
# Python 3 implements the fileno method for all file-like objects, so
# we can't just use the existence of the method to tell whether it is
# available. Instead, we try to assign it, and if we fail, then we
# assume it is not available.
try:
x_unused = infp.fileno() # NOQA
y_unused = outfp.fileno() # NOQA
use_sendfile = True
except (AttributeError, io.UnsupportedOperation):
pass
if use_sendfile:
# This is one of those instances where using the file object and the
# file descriptor causes problems. The sendfile() call actually updates
# the underlying file descriptor, but the file object does not know
# about it. To get around this, we instead get the offset, allow
# sendfile() to update the offset, then manually seek the file object
# to the right location. This ensures that the file object gets updated
# properly.
in_offset = infp.tell()
out_offset = outfp.tell()
sendfile(outfp.fileno(), infp.fileno(), in_offset, data_length)
infp.seek(in_offset + data_length)
outfp.seek(out_offset + data_length)
else:
left = data_length
readsize = blocksize
while left > 0:
if left < readsize:
readsize = left
data = infp.read(readsize)
# We have seen ISOs in the wild (Tribes Vengeance 1of4.iso) that
# lie about the size of their files, causing reads to fail (since
# we hit EOF before the supposed end of the file). If we are using
# sendfile above, sendfile just silently returns as much data as it
# can, with no additional checking. We should do the same here, so
# if we got less data than we asked for, abort the loop silently.
data_len = len(data)
if data_len != readsize:
data_len = left
outfp.write(data)
left -= data_len | A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing.
### Response:
def copy_data(data_length, blocksize, infp, outfp):
# type: (int, int, BinaryIO, BinaryIO) -> None
'''
A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing.
'''
use_sendfile = False
if have_sendfile:
# Python 3 implements the fileno method for all file-like objects, so
# we can't just use the existence of the method to tell whether it is
# available. Instead, we try to assign it, and if we fail, then we
# assume it is not available.
try:
x_unused = infp.fileno() # NOQA
y_unused = outfp.fileno() # NOQA
use_sendfile = True
except (AttributeError, io.UnsupportedOperation):
pass
if use_sendfile:
# This is one of those instances where using the file object and the
# file descriptor causes problems. The sendfile() call actually updates
# the underlying file descriptor, but the file object does not know
# about it. To get around this, we instead get the offset, allow
# sendfile() to update the offset, then manually seek the file object
# to the right location. This ensures that the file object gets updated
# properly.
in_offset = infp.tell()
out_offset = outfp.tell()
sendfile(outfp.fileno(), infp.fileno(), in_offset, data_length)
infp.seek(in_offset + data_length)
outfp.seek(out_offset + data_length)
else:
left = data_length
readsize = blocksize
while left > 0:
if left < readsize:
readsize = left
data = infp.read(readsize)
# We have seen ISOs in the wild (Tribes Vengeance 1of4.iso) that
# lie about the size of their files, causing reads to fail (since
# we hit EOF before the supposed end of the file). If we are using
# sendfile above, sendfile just silently returns as much data as it
# can, with no additional checking. We should do the same here, so
# if we got less data than we asked for, abort the loop silently.
data_len = len(data)
if data_len != readsize:
data_len = left
outfp.write(data)
left -= data_len |
def get_all_apps():
"""Get a list of all applications in Spinnaker.
Returns:
requests.models.Response: Response from Gate containing list of all apps.
"""
LOG.info('Retreiving list of all Spinnaker applications')
url = '{}/applications'.format(API_URL)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok, 'Could not retrieve application list'
pipelines = response.json()
LOG.debug('All Applications:\n%s', pipelines)
return pipelines | Get a list of all applications in Spinnaker.
Returns:
requests.models.Response: Response from Gate containing list of all apps. | Below is the the instruction that describes the task:
### Input:
Get a list of all applications in Spinnaker.
Returns:
requests.models.Response: Response from Gate containing list of all apps.
### Response:
def get_all_apps():
"""Get a list of all applications in Spinnaker.
Returns:
requests.models.Response: Response from Gate containing list of all apps.
"""
LOG.info('Retreiving list of all Spinnaker applications')
url = '{}/applications'.format(API_URL)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok, 'Could not retrieve application list'
pipelines = response.json()
LOG.debug('All Applications:\n%s', pipelines)
return pipelines |
def get_devices_by_parent(self, hid_filter=None):
"""Group devices returned from filter query in order \
by devcice parent id.
"""
all_devs = self.get_devices(hid_filter)
dev_group = dict()
for hid_device in all_devs:
#keep a list of known devices matching parent device Ids
parent_id = hid_device.get_parent_instance_id()
device_set = dev_group.get(parent_id, [])
device_set.append(hid_device)
if parent_id not in dev_group:
#add new
dev_group[parent_id] = device_set
return dev_group | Group devices returned from filter query in order \
by devcice parent id. | Below is the the instruction that describes the task:
### Input:
Group devices returned from filter query in order \
by devcice parent id.
### Response:
def get_devices_by_parent(self, hid_filter=None):
"""Group devices returned from filter query in order \
by devcice parent id.
"""
all_devs = self.get_devices(hid_filter)
dev_group = dict()
for hid_device in all_devs:
#keep a list of known devices matching parent device Ids
parent_id = hid_device.get_parent_instance_id()
device_set = dev_group.get(parent_id, [])
device_set.append(hid_device)
if parent_id not in dev_group:
#add new
dev_group[parent_id] = device_set
return dev_group |
def _calc(cls, **kwargs):
"""
Calculate sunrise or sunset based on:
Parameters:
jd: Julian Day
lat: latitude
lon: longitude
stage: sunrise or sunset
"""
zenith = 90.833333 # offical value
jd = kwargs.get("jd", None)
lat = kwargs.get("lat", None)
lon = kwargs.get("lon", None)
stage = kwargs.get("stage", None)
if jd is None or stage is None or lat is None or lon is None:
raise ValueError("Must supply an 'jd', 'lat, 'lon', and 'stage' parameter")
if stage != SunCycles.RISING and stage != SunCycles.SETTING:
raise ValueError("'stage' parameter must be %s or %s" % (SunCycles.RISING, SunCycles.SETTING))
longhr = lon / 15.
if stage == SunCycles.RISING:
apx = jd + ( (6 - longhr) / 24 )
elif stage == SunCycles.SETTING:
apx = jd + ( (18 - longhr) / 24 )
sun_mean_anom = ( 0.9856 * apx ) - 3.289 # sun's mean anomaly
#sun's longitude
sun_lon = sun_mean_anom + (1.916 * np.sin( np.radians(sun_mean_anom) )) \
+ (0.02 * np.sin( np.radians(2 * sun_mean_anom) )) + 282.634
if sun_lon > 360:
sun_lon = sun_lon - 360
elif sun_lon < 0:
sun_lon = sun_lon + 360
right_ascension = np.degrees(np.arctan( 0.91764 * np.tan( np.radians(sun_lon) ) )) # sun's right ascension
if right_ascension > 360:
right_ascension = right_ascension - 360
elif right_ascension < 0:
right_ascension = right_ascension + 360
# put sun's right ascension value in the same quadrant as the sun's
# true longitude
lQuad = 90. * np.floor(sun_lon / 90.)
raQuad = 90. * np.floor(right_ascension / 90.)
right_ascension = right_ascension + ( lQuad - raQuad)
right_ascension = right_ascension / 15. # Convert to hours
# Sun's declination
sinDecl = 0.39782 * np.sin( np.radians(sun_lon) )
cosDecl = np.cos( np.arcsin( sinDecl ) )
# Sun's local hour angle
cosHr = (np.cos( np.radians(zenith) ) - ( sinDecl * np.sin(np.radians(lat)) )) \
/ ( cosDecl * np.cos( np.radians(lat) ) )
if cosHr > 1: # Sun doesnt rise on this loc on this date
return -1, -1
elif cosHr < -1: # Sun doesnt set on this location on this date
return -1, -1
elif stage == SunCycles.RISING: # Sunrise
hr = 360 - np.degrees(np.arccos(cosHr))
elif stage == SunCycles.SETTING: # Sunset
hr = np.degrees(np.arccos(cosHr))
hr = hr / 15. # Convert angle to hours
localTime = hr + right_ascension - ( 0.06571 * apx ) - 6.622# local meantime of rise/set
UTtime = localTime - longhr # adjust to UTC
if UTtime < 0:
UTtime = UTtime + 24
elif UTtime > 24:
UTtime = UTtime - 24
hour = np.floor(UTtime)
minute = (UTtime - hour) * 60
if minute == 60:
hour = hour + 1
minute = 0
return hour, minute | Calculate sunrise or sunset based on:
Parameters:
jd: Julian Day
lat: latitude
lon: longitude
stage: sunrise or sunset | Below is the the instruction that describes the task:
### Input:
Calculate sunrise or sunset based on:
Parameters:
jd: Julian Day
lat: latitude
lon: longitude
stage: sunrise or sunset
### Response:
def _calc(cls, **kwargs):
"""
Calculate sunrise or sunset based on:
Parameters:
jd: Julian Day
lat: latitude
lon: longitude
stage: sunrise or sunset
"""
zenith = 90.833333 # offical value
jd = kwargs.get("jd", None)
lat = kwargs.get("lat", None)
lon = kwargs.get("lon", None)
stage = kwargs.get("stage", None)
if jd is None or stage is None or lat is None or lon is None:
raise ValueError("Must supply an 'jd', 'lat, 'lon', and 'stage' parameter")
if stage != SunCycles.RISING and stage != SunCycles.SETTING:
raise ValueError("'stage' parameter must be %s or %s" % (SunCycles.RISING, SunCycles.SETTING))
longhr = lon / 15.
if stage == SunCycles.RISING:
apx = jd + ( (6 - longhr) / 24 )
elif stage == SunCycles.SETTING:
apx = jd + ( (18 - longhr) / 24 )
sun_mean_anom = ( 0.9856 * apx ) - 3.289 # sun's mean anomaly
#sun's longitude
sun_lon = sun_mean_anom + (1.916 * np.sin( np.radians(sun_mean_anom) )) \
+ (0.02 * np.sin( np.radians(2 * sun_mean_anom) )) + 282.634
if sun_lon > 360:
sun_lon = sun_lon - 360
elif sun_lon < 0:
sun_lon = sun_lon + 360
right_ascension = np.degrees(np.arctan( 0.91764 * np.tan( np.radians(sun_lon) ) )) # sun's right ascension
if right_ascension > 360:
right_ascension = right_ascension - 360
elif right_ascension < 0:
right_ascension = right_ascension + 360
# put sun's right ascension value in the same quadrant as the sun's
# true longitude
lQuad = 90. * np.floor(sun_lon / 90.)
raQuad = 90. * np.floor(right_ascension / 90.)
right_ascension = right_ascension + ( lQuad - raQuad)
right_ascension = right_ascension / 15. # Convert to hours
# Sun's declination
sinDecl = 0.39782 * np.sin( np.radians(sun_lon) )
cosDecl = np.cos( np.arcsin( sinDecl ) )
# Sun's local hour angle
cosHr = (np.cos( np.radians(zenith) ) - ( sinDecl * np.sin(np.radians(lat)) )) \
/ ( cosDecl * np.cos( np.radians(lat) ) )
if cosHr > 1: # Sun doesnt rise on this loc on this date
return -1, -1
elif cosHr < -1: # Sun doesnt set on this location on this date
return -1, -1
elif stage == SunCycles.RISING: # Sunrise
hr = 360 - np.degrees(np.arccos(cosHr))
elif stage == SunCycles.SETTING: # Sunset
hr = np.degrees(np.arccos(cosHr))
hr = hr / 15. # Convert angle to hours
localTime = hr + right_ascension - ( 0.06571 * apx ) - 6.622# local meantime of rise/set
UTtime = localTime - longhr # adjust to UTC
if UTtime < 0:
UTtime = UTtime + 24
elif UTtime > 24:
UTtime = UTtime - 24
hour = np.floor(UTtime)
minute = (UTtime - hour) * 60
if minute == 60:
hour = hour + 1
minute = 0
return hour, minute |
def dust(args):
"""
%prog dust assembly.fasta
Remove low-complexity contigs within assembly.
"""
p = OptionParser(dust.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
dustfastafile = fastafile.rsplit(".", 1)[0] + ".dust.fasta"
if need_update(fastafile, dustfastafile):
cmd = "dustmasker -in {0}".format(fastafile)
cmd += " -out {0} -outfmt fasta".format(dustfastafile)
sh(cmd)
for name, seq in parse_fasta(dustfastafile):
nlow = sum(1 for x in seq if x in "acgtnN")
pctlow = nlow * 100. / len(seq)
if pctlow < 98:
continue
#print "{0}\t{1:.1f}".format(name, pctlow)
print(name) | %prog dust assembly.fasta
Remove low-complexity contigs within assembly. | Below is the the instruction that describes the task:
### Input:
%prog dust assembly.fasta
Remove low-complexity contigs within assembly.
### Response:
def dust(args):
"""
%prog dust assembly.fasta
Remove low-complexity contigs within assembly.
"""
p = OptionParser(dust.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
dustfastafile = fastafile.rsplit(".", 1)[0] + ".dust.fasta"
if need_update(fastafile, dustfastafile):
cmd = "dustmasker -in {0}".format(fastafile)
cmd += " -out {0} -outfmt fasta".format(dustfastafile)
sh(cmd)
for name, seq in parse_fasta(dustfastafile):
nlow = sum(1 for x in seq if x in "acgtnN")
pctlow = nlow * 100. / len(seq)
if pctlow < 98:
continue
#print "{0}\t{1:.1f}".format(name, pctlow)
print(name) |
def biomaRtTOkegg(df):
"""
Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
"""
df=df.dropna()
ECcols=df.columns.tolist()
df.reset_index(inplace=True,drop=True)
# field = ECsb[['kegg_enzyme']]
field = pd.DataFrame(df['kegg_enzyme'].str.split('+',1).tolist())[1]
field = pd.DataFrame(field)
df=pd.concat([df[['ensembl_gene_id']],field],axis=1)
df.columns=ECcols
df.drop_duplicates(inplace=True)
df.reset_index(inplace=True,drop=True)
plus=df['kegg_enzyme'].tolist()
plus=[ s for s in plus if "+" in s ]
noPlus=df[~df['kegg_enzyme'].isin(plus)]
plus=df[df['kegg_enzyme'].isin(plus)]
noPlus.reset_index(inplace=True, drop=True)
plus.reset_index(inplace=True, drop=True)
for p in range(0,len(plus)):
enz=plus.ix[p]['kegg_enzyme']
enz=enz.split("+")
enz=pd.DataFrame(enz)
enz.colums=['kegg_enzyme']
enz['ensembl_gene_id']=plus.ix[p]['kegg_enzyme']
noPlus=pd.concat([noPlus,enz])
noPlus=noPlus.drop_duplicates()
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
noPlus['fake']='ec:'
noPlus['kegg_enzyme']=noPlus['fake']+noPlus['kegg_enzyme']
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
return noPlus | Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme' | Below is the the instruction that describes the task:
### Input:
Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
### Response:
def biomaRtTOkegg(df):
"""
Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
"""
df=df.dropna()
ECcols=df.columns.tolist()
df.reset_index(inplace=True,drop=True)
# field = ECsb[['kegg_enzyme']]
field = pd.DataFrame(df['kegg_enzyme'].str.split('+',1).tolist())[1]
field = pd.DataFrame(field)
df=pd.concat([df[['ensembl_gene_id']],field],axis=1)
df.columns=ECcols
df.drop_duplicates(inplace=True)
df.reset_index(inplace=True,drop=True)
plus=df['kegg_enzyme'].tolist()
plus=[ s for s in plus if "+" in s ]
noPlus=df[~df['kegg_enzyme'].isin(plus)]
plus=df[df['kegg_enzyme'].isin(plus)]
noPlus.reset_index(inplace=True, drop=True)
plus.reset_index(inplace=True, drop=True)
for p in range(0,len(plus)):
enz=plus.ix[p]['kegg_enzyme']
enz=enz.split("+")
enz=pd.DataFrame(enz)
enz.colums=['kegg_enzyme']
enz['ensembl_gene_id']=plus.ix[p]['kegg_enzyme']
noPlus=pd.concat([noPlus,enz])
noPlus=noPlus.drop_duplicates()
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
noPlus['fake']='ec:'
noPlus['kegg_enzyme']=noPlus['fake']+noPlus['kegg_enzyme']
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
return noPlus |
def snapshot (self):
"""Take a snapshot of the experiment.
Returns `self`."""
nextSnapshotNum = self.nextSnapshotNum
nextSnapshotPath = self.getFullPathToSnapshot(nextSnapshotNum)
if os.path.lexists(nextSnapshotPath):
self.rmR(nextSnapshotPath)
self.mkdirp(os.path.join(nextSnapshotPath, ".experiment"))
return self.dump(nextSnapshotPath).__markLatest(nextSnapshotNum) | Take a snapshot of the experiment.
Returns `self`. | Below is the the instruction that describes the task:
### Input:
Take a snapshot of the experiment.
Returns `self`.
### Response:
def snapshot (self):
"""Take a snapshot of the experiment.
Returns `self`."""
nextSnapshotNum = self.nextSnapshotNum
nextSnapshotPath = self.getFullPathToSnapshot(nextSnapshotNum)
if os.path.lexists(nextSnapshotPath):
self.rmR(nextSnapshotPath)
self.mkdirp(os.path.join(nextSnapshotPath, ".experiment"))
return self.dump(nextSnapshotPath).__markLatest(nextSnapshotNum) |
def create(vm_=None, call=None):
'''
Create a single VM from a data dict
'''
if call:
raise SaltCloudSystemExit(
'You cannot create an instance with -a or -f.'
)
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'ec2',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
# Check for private_key and keyfile name for bootstrapping new instances
deploy = config.get_cloud_config_value(
'deploy', vm_, __opts__, default=True
)
win_password = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
key_filename = config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False, default=None
)
if deploy:
# The private_key and keyname settings are only needed for bootstrapping
# new instances when deploy is True
_validate_key_path_and_mode(key_filename)
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'ec2', vm_['driver']
)
vm_['key_filename'] = key_filename
# wait_for_instance requires private_key
vm_['private_key'] = key_filename
# Get SSH Gateway config early to verify the private_key,
# if used, exists or not. We don't want to deploy an instance
# and not be able to access it via the gateway.
vm_['gateway'] = get_ssh_gateway_config(vm_)
location = get_location(vm_)
vm_['location'] = location
log.info('Creating Cloud VM %s in %s', vm_['name'], location)
vm_['usernames'] = salt.utils.cloud.ssh_usernames(
vm_,
__opts__,
default_users=(
'ec2-user', # Amazon Linux, Fedora, RHEL; FreeBSD
'centos', # CentOS AMIs from AWS Marketplace
'ubuntu', # Ubuntu
'admin', # Debian GNU/Linux
'bitnami', # BitNami AMIs
'root' # Last resort, default user on RHEL 5, SUSE
)
)
if 'instance_id' in vm_:
# This was probably created via another process, and doesn't have
# things like salt keys created yet, so let's create them now.
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
else:
# Put together all of the information required to request the instance,
# and then fire off the request for it
if keyname(vm_) is None:
raise SaltCloudSystemExit(
'The required \'keyname\' configuration setting is missing from the '
'\'ec2\' driver.'
)
data, vm_ = request_instance(vm_, location)
# If data is a str, it's an error
if isinstance(data, six.string_types):
log.error('Error requesting instance: %s', data)
return {}
# Pull the instance ID, valid for both spot and normal instances
# Multiple instances may have been spun up, get all their IDs
vm_['instance_id_list'] = []
for instance in data:
vm_['instance_id_list'].append(instance['instanceId'])
vm_['instance_id'] = vm_['instance_id_list'].pop()
if vm_['instance_id_list']:
# Multiple instances were spun up, get one now, and queue the rest
queue_instances(vm_['instance_id_list'])
# Wait for vital information, such as IP addresses, to be available
# for the new instance
data = query_instance(vm_)
# Now that the instance is available, tag it appropriately. Should
# mitigate race conditions with tags
tags = config.get_cloud_config_value('tag',
vm_,
__opts__,
{},
search_global=False)
if not isinstance(tags, dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
for value in six.itervalues(tags):
if not isinstance(value, six.string_types):
raise SaltCloudConfigError(
'\'tag\' values must be strings. Try quoting the values. '
'e.g. "2013-09-19T20:09:46Z".'
)
tags['Name'] = vm_['name']
__utils__['cloud.fire_event'](
'event',
'setting tags',
'salt/cloud/{0}/tagging'.format(vm_['name']),
args={'tags': tags},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
salt.utils.cloud.wait_for_fun(
set_tags,
timeout=30,
name=vm_['name'],
tags=tags,
instance_id=vm_['instance_id'],
call='action',
location=location
)
# Once instance tags are set, tag the spot request if configured
if 'spot_config' in vm_ and 'tag' in vm_['spot_config']:
if not isinstance(vm_['spot_config']['tag'], dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
for value in six.itervalues(vm_['spot_config']['tag']):
if not isinstance(value, str):
raise SaltCloudConfigError(
'\'tag\' values must be strings. Try quoting the values. '
'e.g. "2013-09-19T20:09:46Z".'
)
spot_request_tags = {}
if 'spotRequestId' not in vm_:
raise SaltCloudConfigError('Failed to find spotRequestId')
sir_id = vm_['spotRequestId']
spot_request_tags['Name'] = vm_['name']
for k, v in six.iteritems(vm_['spot_config']['tag']):
spot_request_tags[k] = v
__utils__['cloud.fire_event'](
'event',
'setting tags',
'salt/cloud/spot_request_{0}/tagging'.format(sir_id),
args={'tags': spot_request_tags},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
salt.utils.cloud.wait_for_fun(
set_tags,
timeout=30,
name=vm_['name'],
tags=spot_request_tags,
instance_id=sir_id,
call='action',
location=location
)
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
_update_enis(network_interfaces, data, vm_)
# At this point, the node is created and tagged, and now needs to be
# bootstrapped, once the necessary port is available.
log.info('Created node %s', vm_['name'])
instance = data[0]['instancesSet']['item']
# Wait for the necessary port to become available to bootstrap
if ssh_interface(vm_) == 'private_ips':
ip_address = instance['privateIpAddress']
log.info('Salt node data. Private_ip: %s', ip_address)
else:
ip_address = instance['ipAddress']
log.info('Salt node data. Public_ip: %s', ip_address)
vm_['ssh_host'] = ip_address
if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
salt_ip_address = instance['privateIpAddress']
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = instance['ipAddress']
log.debug('Salt interface set to: %s', salt_ip_address)
vm_['salt_host'] = salt_ip_address
if deploy:
display_ssh_output = config.get_cloud_config_value(
'display_ssh_output', vm_, __opts__, default=True
)
vm_ = wait_for_instance(
vm_, data, ip_address, display_ssh_output
)
# The instance is booted and accessible, let's Salt it!
ret = instance.copy()
# Get ANY defined volumes settings, merging data, in the following order
# 1. VM config
# 2. Profile config
# 3. Global configuration
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
__utils__['cloud.fire_event'](
'event',
'attaching volumes',
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
args={'volumes': volumes},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Create and attach volumes to node %s', vm_['name'])
created = create_attach_volumes(
vm_['name'],
{
'volumes': volumes,
'zone': ret['placement']['availabilityZone'],
'instance_id': ret['instanceId'],
'del_all_vols_on_destroy': vm_.get('del_all_vols_on_destroy', False)
},
call='action'
)
ret['Attached Volumes'] = created
# Associate instance with a ssm document, if present
ssm_document = config.get_cloud_config_value(
'ssm_document', vm_, __opts__, None, search_global=False
)
if ssm_document:
log.debug('Associating with ssm document: %s', ssm_document)
assoc = ssm_create_association(
vm_['name'],
{'ssm_document': ssm_document},
instance_id=vm_['instance_id'],
call='action'
)
if isinstance(assoc, dict) and assoc.get('error', None):
log.error(
'Failed to associate instance %s with ssm document %s',
vm_['instance_id'], ssm_document
)
return {}
for key, value in six.iteritems(__utils__['cloud.bootstrap'](vm_, __opts__)):
ret.setdefault(key, value)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(instance)
)
event_data = {
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
'instance_id': vm_['instance_id'],
}
if volumes:
event_data['volumes'] = volumes
if ssm_document:
event_data['ssm_document'] = ssm_document
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', event_data, list(event_data)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# Ensure that the latest node data is returned
node = _get_node(instance_id=vm_['instance_id'])
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
ret.update(node)
# Add any block device tags specified
ex_blockdevicetags = {}
blockdevicemappings_holder = block_device_mappings(vm_)
if blockdevicemappings_holder:
for _bd in blockdevicemappings_holder:
if 'tag' in _bd:
ex_blockdevicetags[_bd['DeviceName']] = _bd['tag']
block_device_volume_id_map = {}
if ex_blockdevicetags:
for _device, _map in six.iteritems(ret['blockDeviceMapping']):
bd_items = []
if isinstance(_map, dict):
bd_items.append(_map)
else:
for mapitem in _map:
bd_items.append(mapitem)
for blockitem in bd_items:
if blockitem['deviceName'] in ex_blockdevicetags and 'Name' not in ex_blockdevicetags[blockitem['deviceName']]:
ex_blockdevicetags[blockitem['deviceName']]['Name'] = vm_['name']
if blockitem['deviceName'] in ex_blockdevicetags:
block_device_volume_id_map[blockitem[ret['rootDeviceType']]['volumeId']] = ex_blockdevicetags[blockitem['deviceName']]
if block_device_volume_id_map:
for volid, tags in six.iteritems(block_device_volume_id_map):
__utils__['cloud.fire_event'](
'event',
'setting tags',
'salt/cloud/block_volume_{0}/tagging'.format(str(volid)),
args={'tags': tags},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.wait_for_fun'](
set_tags,
timeout=30,
name=vm_['name'],
tags=tags,
resource_id=volid,
call='action',
location=location
)
return ret | Create a single VM from a data dict | Below is the the instruction that describes the task:
### Input:
Create a single VM from a data dict
### Response:
def create(vm_=None, call=None):
'''
Create a single VM from a data dict
'''
if call:
raise SaltCloudSystemExit(
'You cannot create an instance with -a or -f.'
)
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'ec2',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
# Check for private_key and keyfile name for bootstrapping new instances
deploy = config.get_cloud_config_value(
'deploy', vm_, __opts__, default=True
)
win_password = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
key_filename = config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False, default=None
)
if deploy:
# The private_key and keyname settings are only needed for bootstrapping
# new instances when deploy is True
_validate_key_path_and_mode(key_filename)
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'ec2', vm_['driver']
)
vm_['key_filename'] = key_filename
# wait_for_instance requires private_key
vm_['private_key'] = key_filename
# Get SSH Gateway config early to verify the private_key,
# if used, exists or not. We don't want to deploy an instance
# and not be able to access it via the gateway.
vm_['gateway'] = get_ssh_gateway_config(vm_)
location = get_location(vm_)
vm_['location'] = location
log.info('Creating Cloud VM %s in %s', vm_['name'], location)
vm_['usernames'] = salt.utils.cloud.ssh_usernames(
vm_,
__opts__,
default_users=(
'ec2-user', # Amazon Linux, Fedora, RHEL; FreeBSD
'centos', # CentOS AMIs from AWS Marketplace
'ubuntu', # Ubuntu
'admin', # Debian GNU/Linux
'bitnami', # BitNami AMIs
'root' # Last resort, default user on RHEL 5, SUSE
)
)
if 'instance_id' in vm_:
# This was probably created via another process, and doesn't have
# things like salt keys created yet, so let's create them now.
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
else:
# Put together all of the information required to request the instance,
# and then fire off the request for it
if keyname(vm_) is None:
raise SaltCloudSystemExit(
'The required \'keyname\' configuration setting is missing from the '
'\'ec2\' driver.'
)
data, vm_ = request_instance(vm_, location)
# If data is a str, it's an error
if isinstance(data, six.string_types):
log.error('Error requesting instance: %s', data)
return {}
# Pull the instance ID, valid for both spot and normal instances
# Multiple instances may have been spun up, get all their IDs
vm_['instance_id_list'] = []
for instance in data:
vm_['instance_id_list'].append(instance['instanceId'])
vm_['instance_id'] = vm_['instance_id_list'].pop()
if vm_['instance_id_list']:
# Multiple instances were spun up, get one now, and queue the rest
queue_instances(vm_['instance_id_list'])
# Wait for vital information, such as IP addresses, to be available
# for the new instance
data = query_instance(vm_)
# Now that the instance is available, tag it appropriately. Should
# mitigate race conditions with tags
tags = config.get_cloud_config_value('tag',
vm_,
__opts__,
{},
search_global=False)
if not isinstance(tags, dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
for value in six.itervalues(tags):
if not isinstance(value, six.string_types):
raise SaltCloudConfigError(
'\'tag\' values must be strings. Try quoting the values. '
'e.g. "2013-09-19T20:09:46Z".'
)
tags['Name'] = vm_['name']
__utils__['cloud.fire_event'](
'event',
'setting tags',
'salt/cloud/{0}/tagging'.format(vm_['name']),
args={'tags': tags},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
salt.utils.cloud.wait_for_fun(
set_tags,
timeout=30,
name=vm_['name'],
tags=tags,
instance_id=vm_['instance_id'],
call='action',
location=location
)
# Once instance tags are set, tag the spot request if configured
if 'spot_config' in vm_ and 'tag' in vm_['spot_config']:
if not isinstance(vm_['spot_config']['tag'], dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
for value in six.itervalues(vm_['spot_config']['tag']):
if not isinstance(value, str):
raise SaltCloudConfigError(
'\'tag\' values must be strings. Try quoting the values. '
'e.g. "2013-09-19T20:09:46Z".'
)
spot_request_tags = {}
if 'spotRequestId' not in vm_:
raise SaltCloudConfigError('Failed to find spotRequestId')
sir_id = vm_['spotRequestId']
spot_request_tags['Name'] = vm_['name']
for k, v in six.iteritems(vm_['spot_config']['tag']):
spot_request_tags[k] = v
__utils__['cloud.fire_event'](
'event',
'setting tags',
'salt/cloud/spot_request_{0}/tagging'.format(sir_id),
args={'tags': spot_request_tags},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
salt.utils.cloud.wait_for_fun(
set_tags,
timeout=30,
name=vm_['name'],
tags=spot_request_tags,
instance_id=sir_id,
call='action',
location=location
)
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
_update_enis(network_interfaces, data, vm_)
# At this point, the node is created and tagged, and now needs to be
# bootstrapped, once the necessary port is available.
log.info('Created node %s', vm_['name'])
instance = data[0]['instancesSet']['item']
# Wait for the necessary port to become available to bootstrap
if ssh_interface(vm_) == 'private_ips':
ip_address = instance['privateIpAddress']
log.info('Salt node data. Private_ip: %s', ip_address)
else:
ip_address = instance['ipAddress']
log.info('Salt node data. Public_ip: %s', ip_address)
vm_['ssh_host'] = ip_address
if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
salt_ip_address = instance['privateIpAddress']
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = instance['ipAddress']
log.debug('Salt interface set to: %s', salt_ip_address)
vm_['salt_host'] = salt_ip_address
if deploy:
display_ssh_output = config.get_cloud_config_value(
'display_ssh_output', vm_, __opts__, default=True
)
vm_ = wait_for_instance(
vm_, data, ip_address, display_ssh_output
)
# The instance is booted and accessible, let's Salt it!
ret = instance.copy()
# Get ANY defined volumes settings, merging data, in the following order
# 1. VM config
# 2. Profile config
# 3. Global configuration
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
__utils__['cloud.fire_event'](
'event',
'attaching volumes',
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
args={'volumes': volumes},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Create and attach volumes to node %s', vm_['name'])
created = create_attach_volumes(
vm_['name'],
{
'volumes': volumes,
'zone': ret['placement']['availabilityZone'],
'instance_id': ret['instanceId'],
'del_all_vols_on_destroy': vm_.get('del_all_vols_on_destroy', False)
},
call='action'
)
ret['Attached Volumes'] = created
# Associate instance with a ssm document, if present
ssm_document = config.get_cloud_config_value(
'ssm_document', vm_, __opts__, None, search_global=False
)
if ssm_document:
log.debug('Associating with ssm document: %s', ssm_document)
assoc = ssm_create_association(
vm_['name'],
{'ssm_document': ssm_document},
instance_id=vm_['instance_id'],
call='action'
)
if isinstance(assoc, dict) and assoc.get('error', None):
log.error(
'Failed to associate instance %s with ssm document %s',
vm_['instance_id'], ssm_document
)
return {}
for key, value in six.iteritems(__utils__['cloud.bootstrap'](vm_, __opts__)):
ret.setdefault(key, value)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(instance)
)
event_data = {
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
'instance_id': vm_['instance_id'],
}
if volumes:
event_data['volumes'] = volumes
if ssm_document:
event_data['ssm_document'] = ssm_document
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', event_data, list(event_data)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# Ensure that the latest node data is returned
node = _get_node(instance_id=vm_['instance_id'])
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
ret.update(node)
# Add any block device tags specified
ex_blockdevicetags = {}
blockdevicemappings_holder = block_device_mappings(vm_)
if blockdevicemappings_holder:
for _bd in blockdevicemappings_holder:
if 'tag' in _bd:
ex_blockdevicetags[_bd['DeviceName']] = _bd['tag']
block_device_volume_id_map = {}
if ex_blockdevicetags:
for _device, _map in six.iteritems(ret['blockDeviceMapping']):
bd_items = []
if isinstance(_map, dict):
bd_items.append(_map)
else:
for mapitem in _map:
bd_items.append(mapitem)
for blockitem in bd_items:
if blockitem['deviceName'] in ex_blockdevicetags and 'Name' not in ex_blockdevicetags[blockitem['deviceName']]:
ex_blockdevicetags[blockitem['deviceName']]['Name'] = vm_['name']
if blockitem['deviceName'] in ex_blockdevicetags:
block_device_volume_id_map[blockitem[ret['rootDeviceType']]['volumeId']] = ex_blockdevicetags[blockitem['deviceName']]
if block_device_volume_id_map:
for volid, tags in six.iteritems(block_device_volume_id_map):
__utils__['cloud.fire_event'](
'event',
'setting tags',
'salt/cloud/block_volume_{0}/tagging'.format(str(volid)),
args={'tags': tags},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.wait_for_fun'](
set_tags,
timeout=30,
name=vm_['name'],
tags=tags,
resource_id=volid,
call='action',
location=location
)
return ret |
def rename_tables(db, table_mapping, reverse=False):
"""
renames tables from source to destination name, if the source exists and the destination does
not exist yet.
"""
from django.db import connection
if reverse:
table_mapping = [(dst, src) for src, dst in table_mapping]
table_names = connection.introspection.table_names()
for source, destination in table_mapping:
if source in table_names and destination in table_names:
print(u" WARNING: not renaming {0} to {1}, because both tables already exist.".format(source, destination))
elif source in table_names and destination not in table_names:
print(u" - renaming {0} to {1}".format(source, destination))
db.rename_table(source, destination) | renames tables from source to destination name, if the source exists and the destination does
not exist yet. | Below is the the instruction that describes the task:
### Input:
renames tables from source to destination name, if the source exists and the destination does
not exist yet.
### Response:
def rename_tables(db, table_mapping, reverse=False):
"""
renames tables from source to destination name, if the source exists and the destination does
not exist yet.
"""
from django.db import connection
if reverse:
table_mapping = [(dst, src) for src, dst in table_mapping]
table_names = connection.introspection.table_names()
for source, destination in table_mapping:
if source in table_names and destination in table_names:
print(u" WARNING: not renaming {0} to {1}, because both tables already exist.".format(source, destination))
elif source in table_names and destination not in table_names:
print(u" - renaming {0} to {1}".format(source, destination))
db.rename_table(source, destination) |
def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False):
"""Create X-axis"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
if format == 'AM_PM':
axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d)); }"
else:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
axis['axisLabel'] = "'" + label + "'"
# date format : see https://github.com/mbostock/d3/wiki/Time-Formatting
if date:
self.dateformat = format
axis['tickFormat'] = ("function(d) { return d3.time.format('%s')"
"(new Date(parseInt(d))) }\n"
"" % self.dateformat)
# flag is the x Axis is a date
if name[0] == 'x':
self.x_axis_date = True
# Add new axis to list of axis
self.axislist[name] = axis
# Create x2Axis if focus_enable
if name == "xAxis" and self.focus_enable:
self.axislist['x2Axis'] = axis | Create X-axis | Below is the the instruction that describes the task:
### Input:
Create X-axis
### Response:
def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False):
"""Create X-axis"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
if format == 'AM_PM':
axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d)); }"
else:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
axis['axisLabel'] = "'" + label + "'"
# date format : see https://github.com/mbostock/d3/wiki/Time-Formatting
if date:
self.dateformat = format
axis['tickFormat'] = ("function(d) { return d3.time.format('%s')"
"(new Date(parseInt(d))) }\n"
"" % self.dateformat)
# flag is the x Axis is a date
if name[0] == 'x':
self.x_axis_date = True
# Add new axis to list of axis
self.axislist[name] = axis
# Create x2Axis if focus_enable
if name == "xAxis" and self.focus_enable:
self.axislist['x2Axis'] = axis |
def thub(data, n):
"""
Tee or "T" hub auto-copier to help working with Stream instances as well as
with numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Number of copies.
Returns
-------
A StreamTeeHub instance, if input data is iterable.
The data itself, otherwise.
Examples
--------
>>> def sub_sum(x, y):
... x = thub(x, 2) # Casts to StreamTeeHub, when needed
... y = thub(y, 2)
... return (x - y) / (x + y) # Return type might be number or Stream
With numbers:
>>> sub_sum(1, 1.)
0.0
Combining number with iterable:
>>> sub_sum(3., [1, 2, 3])
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(sub_sum(3., [1, 2, 3]))
[0.5, 0.2, 0.0]
Both iterables (the Stream input behaves like an endless [6, 1, 6, 1, ...]):
>>> list(sub_sum([4., 3., 2., 1.], [1, 2, 3]))
[0.6, 0.2, -0.2]
>>> list(sub_sum([4., 3., 2., 1.], Stream(6, 1)))
[-0.2, 0.5, -0.5, 0.0]
This function can also be used as a an alternative to the Stream
constructor when your function has only one parameter, to avoid casting
when that's not needed:
>>> func = lambda x: 250 * thub(x, 1)
>>> func(1)
250
>>> func([2] * 10)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> func([2] * 10).take(5)
[500, 500, 500, 500, 500]
"""
return StreamTeeHub(data, n) if isinstance(data, Iterable) else data | Tee or "T" hub auto-copier to help working with Stream instances as well as
with numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Number of copies.
Returns
-------
A StreamTeeHub instance, if input data is iterable.
The data itself, otherwise.
Examples
--------
>>> def sub_sum(x, y):
... x = thub(x, 2) # Casts to StreamTeeHub, when needed
... y = thub(y, 2)
... return (x - y) / (x + y) # Return type might be number or Stream
With numbers:
>>> sub_sum(1, 1.)
0.0
Combining number with iterable:
>>> sub_sum(3., [1, 2, 3])
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(sub_sum(3., [1, 2, 3]))
[0.5, 0.2, 0.0]
Both iterables (the Stream input behaves like an endless [6, 1, 6, 1, ...]):
>>> list(sub_sum([4., 3., 2., 1.], [1, 2, 3]))
[0.6, 0.2, -0.2]
>>> list(sub_sum([4., 3., 2., 1.], Stream(6, 1)))
[-0.2, 0.5, -0.5, 0.0]
This function can also be used as a an alternative to the Stream
constructor when your function has only one parameter, to avoid casting
when that's not needed:
>>> func = lambda x: 250 * thub(x, 1)
>>> func(1)
250
>>> func([2] * 10)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> func([2] * 10).take(5)
[500, 500, 500, 500, 500] | Below is the the instruction that describes the task:
### Input:
Tee or "T" hub auto-copier to help working with Stream instances as well as
with numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Number of copies.
Returns
-------
A StreamTeeHub instance, if input data is iterable.
The data itself, otherwise.
Examples
--------
>>> def sub_sum(x, y):
... x = thub(x, 2) # Casts to StreamTeeHub, when needed
... y = thub(y, 2)
... return (x - y) / (x + y) # Return type might be number or Stream
With numbers:
>>> sub_sum(1, 1.)
0.0
Combining number with iterable:
>>> sub_sum(3., [1, 2, 3])
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(sub_sum(3., [1, 2, 3]))
[0.5, 0.2, 0.0]
Both iterables (the Stream input behaves like an endless [6, 1, 6, 1, ...]):
>>> list(sub_sum([4., 3., 2., 1.], [1, 2, 3]))
[0.6, 0.2, -0.2]
>>> list(sub_sum([4., 3., 2., 1.], Stream(6, 1)))
[-0.2, 0.5, -0.5, 0.0]
This function can also be used as a an alternative to the Stream
constructor when your function has only one parameter, to avoid casting
when that's not needed:
>>> func = lambda x: 250 * thub(x, 1)
>>> func(1)
250
>>> func([2] * 10)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> func([2] * 10).take(5)
[500, 500, 500, 500, 500]
### Response:
def thub(data, n):
"""
Tee or "T" hub auto-copier to help working with Stream instances as well as
with numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Number of copies.
Returns
-------
A StreamTeeHub instance, if input data is iterable.
The data itself, otherwise.
Examples
--------
>>> def sub_sum(x, y):
... x = thub(x, 2) # Casts to StreamTeeHub, when needed
... y = thub(y, 2)
... return (x - y) / (x + y) # Return type might be number or Stream
With numbers:
>>> sub_sum(1, 1.)
0.0
Combining number with iterable:
>>> sub_sum(3., [1, 2, 3])
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(sub_sum(3., [1, 2, 3]))
[0.5, 0.2, 0.0]
Both iterables (the Stream input behaves like an endless [6, 1, 6, 1, ...]):
>>> list(sub_sum([4., 3., 2., 1.], [1, 2, 3]))
[0.6, 0.2, -0.2]
>>> list(sub_sum([4., 3., 2., 1.], Stream(6, 1)))
[-0.2, 0.5, -0.5, 0.0]
This function can also be used as a an alternative to the Stream
constructor when your function has only one parameter, to avoid casting
when that's not needed:
>>> func = lambda x: 250 * thub(x, 1)
>>> func(1)
250
>>> func([2] * 10)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> func([2] * 10).take(5)
[500, 500, 500, 500, 500]
"""
return StreamTeeHub(data, n) if isinstance(data, Iterable) else data |
def add_resources(self, resources):
"""
Adds new resources to the event.
*resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects.
"""
new_resources = self._build_resource_dictionary(resources)
for key in new_resources:
self._resources[key] = new_resources[key]
self._dirty_attributes.add(u'resources') | Adds new resources to the event.
*resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. | Below is the the instruction that describes the task:
### Input:
Adds new resources to the event.
*resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects.
### Response:
def add_resources(self, resources):
"""
Adds new resources to the event.
*resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects.
"""
new_resources = self._build_resource_dictionary(resources)
for key in new_resources:
self._resources[key] = new_resources[key]
self._dirty_attributes.add(u'resources') |
def edit(self, resource):
"""Edit a job.
:param resource: :class:`jobs.Job <jobs.Job>` object
:return: :class:`jobs.Job <jobs.Job>` object
:rtype: jobs.Job
"""
schema = JobSchema(exclude=('id', 'status', 'options', 'package_name', 'config_name', 'device_name', 'result_id', 'user_id', 'created', 'updated', 'automatic', 'run_at'))
json = self.service.encode(schema, resource)
schema = JobSchema()
resp = self.service.edit(self.base, resource.name, json)
return self.service.decode(schema, resp) | Edit a job.
:param resource: :class:`jobs.Job <jobs.Job>` object
:return: :class:`jobs.Job <jobs.Job>` object
:rtype: jobs.Job | Below is the the instruction that describes the task:
### Input:
Edit a job.
:param resource: :class:`jobs.Job <jobs.Job>` object
:return: :class:`jobs.Job <jobs.Job>` object
:rtype: jobs.Job
### Response:
def edit(self, resource):
"""Edit a job.
:param resource: :class:`jobs.Job <jobs.Job>` object
:return: :class:`jobs.Job <jobs.Job>` object
:rtype: jobs.Job
"""
schema = JobSchema(exclude=('id', 'status', 'options', 'package_name', 'config_name', 'device_name', 'result_id', 'user_id', 'created', 'updated', 'automatic', 'run_at'))
json = self.service.encode(schema, resource)
schema = JobSchema()
resp = self.service.edit(self.base, resource.name, json)
return self.service.decode(schema, resp) |
def get_embedded_items(result_collection):
"""
Given a result_collection (returned by a previous API call that
returns a collection, like get_bundle_list() or search()), return a
list of embedded items with each item in the returned list
considered a result object.
'result_collection' a JSON object returned by a previous API
call. The parameter 'embed_items' must have been True when the
result_collection was originally requested.May not be None.
Returns a list, which may be empty if no embedded items were found.
"""
# Argument error checking.
assert result_collection is not None
result = []
embedded_objects = result_collection.get('_embedded')
if embedded_objects is not None:
# Handle being passed a non-collection gracefully.
result = embedded_objects.get('items', result)
return result | Given a result_collection (returned by a previous API call that
returns a collection, like get_bundle_list() or search()), return a
list of embedded items with each item in the returned list
considered a result object.
'result_collection' a JSON object returned by a previous API
call. The parameter 'embed_items' must have been True when the
result_collection was originally requested.May not be None.
Returns a list, which may be empty if no embedded items were found. | Below is the the instruction that describes the task:
### Input:
Given a result_collection (returned by a previous API call that
returns a collection, like get_bundle_list() or search()), return a
list of embedded items with each item in the returned list
considered a result object.
'result_collection' a JSON object returned by a previous API
call. The parameter 'embed_items' must have been True when the
result_collection was originally requested.May not be None.
Returns a list, which may be empty if no embedded items were found.
### Response:
def get_embedded_items(result_collection):
"""
Given a result_collection (returned by a previous API call that
returns a collection, like get_bundle_list() or search()), return a
list of embedded items with each item in the returned list
considered a result object.
'result_collection' a JSON object returned by a previous API
call. The parameter 'embed_items' must have been True when the
result_collection was originally requested.May not be None.
Returns a list, which may be empty if no embedded items were found.
"""
# Argument error checking.
assert result_collection is not None
result = []
embedded_objects = result_collection.get('_embedded')
if embedded_objects is not None:
# Handle being passed a non-collection gracefully.
result = embedded_objects.get('items', result)
return result |
def _item_to_metric(iterator, log_metric_pb):
"""Convert a metric protobuf to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type log_metric_pb:
:class:`.logging_metrics_pb2.LogMetric`
:param log_metric_pb: Metric protobuf returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page.
"""
# NOTE: LogMetric message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
resource = MessageToDict(log_metric_pb)
return Metric.from_api_repr(resource, iterator.client) | Convert a metric protobuf to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type log_metric_pb:
:class:`.logging_metrics_pb2.LogMetric`
:param log_metric_pb: Metric protobuf returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page. | Below is the the instruction that describes the task:
### Input:
Convert a metric protobuf to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type log_metric_pb:
:class:`.logging_metrics_pb2.LogMetric`
:param log_metric_pb: Metric protobuf returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page.
### Response:
def _item_to_metric(iterator, log_metric_pb):
"""Convert a metric protobuf to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type log_metric_pb:
:class:`.logging_metrics_pb2.LogMetric`
:param log_metric_pb: Metric protobuf returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page.
"""
# NOTE: LogMetric message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
resource = MessageToDict(log_metric_pb)
return Metric.from_api_repr(resource, iterator.client) |
def switch_to_window(self, window_name=None, title=None, url=None):
"""
WebDriver implements switching to other window only by it's name. With
wrapper there is also option to switch by title of window or URL. URL
can be also relative path.
"""
if window_name:
self.switch_to.window(window_name)
return
if url:
url = self.get_url(path=url)
for window_handle in self.window_handles:
self.switch_to.window(window_handle)
if title and self.title == title:
return
if url and self.current_url == url:
return
raise selenium_exc.NoSuchWindowException('Window (title=%s, url=%s) not found.' % (title, url)) | WebDriver implements switching to other window only by it's name. With
wrapper there is also option to switch by title of window or URL. URL
can be also relative path. | Below is the the instruction that describes the task:
### Input:
WebDriver implements switching to other window only by it's name. With
wrapper there is also option to switch by title of window or URL. URL
can be also relative path.
### Response:
def switch_to_window(self, window_name=None, title=None, url=None):
"""
WebDriver implements switching to other window only by it's name. With
wrapper there is also option to switch by title of window or URL. URL
can be also relative path.
"""
if window_name:
self.switch_to.window(window_name)
return
if url:
url = self.get_url(path=url)
for window_handle in self.window_handles:
self.switch_to.window(window_handle)
if title and self.title == title:
return
if url and self.current_url == url:
return
raise selenium_exc.NoSuchWindowException('Window (title=%s, url=%s) not found.' % (title, url)) |
def _complete_execution(self, g):
"""Forward any raised exceptions across a channel."""
# Triggered via completion callback.
#
# Runs in its own greenlet, so take care to forward the
# exception, if any, to fail the entire transfer in event of
# trouble.
assert g.ready()
self.greenlets.remove(g)
placed = UserCritical(msg='placeholder bogus exception',
hint='report a bug')
if g.successful():
try:
segment = g.get()
if not segment.explicit:
segment.mark_done()
except BaseException as e:
# Absorb and forward exceptions across the channel.
placed = e
else:
placed = None
else:
placed = g.exception
self.wait_change.put(placed) | Forward any raised exceptions across a channel. | Below is the the instruction that describes the task:
### Input:
Forward any raised exceptions across a channel.
### Response:
def _complete_execution(self, g):
"""Forward any raised exceptions across a channel."""
# Triggered via completion callback.
#
# Runs in its own greenlet, so take care to forward the
# exception, if any, to fail the entire transfer in event of
# trouble.
assert g.ready()
self.greenlets.remove(g)
placed = UserCritical(msg='placeholder bogus exception',
hint='report a bug')
if g.successful():
try:
segment = g.get()
if not segment.explicit:
segment.mark_done()
except BaseException as e:
# Absorb and forward exceptions across the channel.
placed = e
else:
placed = None
else:
placed = g.exception
self.wait_change.put(placed) |
def normalize_example(self, example, hparams):
"""Assumes that example contains both inputs and targets."""
length = self.max_length(hparams)
def _to_constant_shape(tensor):
tensor = tensor[:length]
tensor = tf.pad(tensor, [(0, length - tf.shape(tensor)[0])])
return tf.reshape(tensor, [length])
if self.has_inputs:
example['inputs'] = _to_constant_shape(example['inputs'])
example['targets'] = _to_constant_shape(example['targets'])
elif 'inputs' in example:
if self.packed_length:
raise ValueError('cannot concatenate packed examples on the fly.')
inputs = example.pop('inputs')[:-1] # Remove EOS token.
targets = tf.concat([inputs, example['targets']], 0)
example['targets'] = _to_constant_shape(targets)
else:
example['targets'] = _to_constant_shape(example['targets'])
if self.packed_length:
if self.has_inputs:
if 'inputs_segmentation' in example:
example['inputs_segmentation'] = _to_constant_shape(
example['inputs_segmentation'])
example['inputs_position'] = _to_constant_shape(
example['inputs_position'])
else:
example['inputs_segmentation'] = tf.to_int64(
tf.not_equal(example['inputs'], 0))
example['inputs_position'] = (
example['inputs_segmentation'] * tf.range(length, dtype=tf.int64))
if 'targets_segmentation' in example:
example['targets_segmentation'] = _to_constant_shape(
example['targets_segmentation'])
example['targets_position'] = _to_constant_shape(
example['targets_position'])
else:
example['targets_segmentation'] = tf.to_int64(
tf.not_equal(example['targets'], 0))
example['targets_position'] = (
example['targets_segmentation'] * tf.range(length, dtype=tf.int64))
return example | Assumes that example contains both inputs and targets. | Below is the the instruction that describes the task:
### Input:
Assumes that example contains both inputs and targets.
### Response:
def normalize_example(self, example, hparams):
"""Assumes that example contains both inputs and targets."""
length = self.max_length(hparams)
def _to_constant_shape(tensor):
tensor = tensor[:length]
tensor = tf.pad(tensor, [(0, length - tf.shape(tensor)[0])])
return tf.reshape(tensor, [length])
if self.has_inputs:
example['inputs'] = _to_constant_shape(example['inputs'])
example['targets'] = _to_constant_shape(example['targets'])
elif 'inputs' in example:
if self.packed_length:
raise ValueError('cannot concatenate packed examples on the fly.')
inputs = example.pop('inputs')[:-1] # Remove EOS token.
targets = tf.concat([inputs, example['targets']], 0)
example['targets'] = _to_constant_shape(targets)
else:
example['targets'] = _to_constant_shape(example['targets'])
if self.packed_length:
if self.has_inputs:
if 'inputs_segmentation' in example:
example['inputs_segmentation'] = _to_constant_shape(
example['inputs_segmentation'])
example['inputs_position'] = _to_constant_shape(
example['inputs_position'])
else:
example['inputs_segmentation'] = tf.to_int64(
tf.not_equal(example['inputs'], 0))
example['inputs_position'] = (
example['inputs_segmentation'] * tf.range(length, dtype=tf.int64))
if 'targets_segmentation' in example:
example['targets_segmentation'] = _to_constant_shape(
example['targets_segmentation'])
example['targets_position'] = _to_constant_shape(
example['targets_position'])
else:
example['targets_segmentation'] = tf.to_int64(
tf.not_equal(example['targets'], 0))
example['targets_position'] = (
example['targets_segmentation'] * tf.range(length, dtype=tf.int64))
return example |
def clear_messages(self):
"""
Clears all messages.
"""
while len(self._messages):
msg = self._messages.pop(0)
usd = msg.block.userData()
if usd and hasattr(usd, 'messages'):
usd.messages[:] = []
if msg.decoration:
self.editor.decorations.remove(msg.decoration) | Clears all messages. | Below is the the instruction that describes the task:
### Input:
Clears all messages.
### Response:
def clear_messages(self):
"""
Clears all messages.
"""
while len(self._messages):
msg = self._messages.pop(0)
usd = msg.block.userData()
if usd and hasattr(usd, 'messages'):
usd.messages[:] = []
if msg.decoration:
self.editor.decorations.remove(msg.decoration) |
def create_optparser(progname=None):
"""
an OptionParser instance filled with options and groups
appropriate for use with the distdiff command
"""
from . import report
parser = ArgumentParser(prog=progname)
parser.add_argument("dist", nargs=2,
help="distributions to compare")
add_general_optgroup(parser)
add_distdiff_optgroup(parser)
add_jardiff_optgroup(parser)
add_classdiff_optgroup(parser)
report.add_general_report_optgroup(parser)
report.add_json_report_optgroup(parser)
report.add_html_report_optgroup(parser)
return parser | an OptionParser instance filled with options and groups
appropriate for use with the distdiff command | Below is the the instruction that describes the task:
### Input:
an OptionParser instance filled with options and groups
appropriate for use with the distdiff command
### Response:
def create_optparser(progname=None):
"""
an OptionParser instance filled with options and groups
appropriate for use with the distdiff command
"""
from . import report
parser = ArgumentParser(prog=progname)
parser.add_argument("dist", nargs=2,
help="distributions to compare")
add_general_optgroup(parser)
add_distdiff_optgroup(parser)
add_jardiff_optgroup(parser)
add_classdiff_optgroup(parser)
report.add_general_report_optgroup(parser)
report.add_json_report_optgroup(parser)
report.add_html_report_optgroup(parser)
return parser |
def level_grouper(text, getreffs, level=None, groupby=20):
""" Alternative to level_chunker: groups levels together at the latest level
:param text: Text object
:param getreffs: GetValidReff query callback
:param level: Level of citation to retrieve
:param groupby: Number of level to groupby
:return: Automatically curated references
"""
if level is None or level > len(text.citation):
level = len(text.citation)
references = [ref.split(":")[-1] for ref in getreffs(level=level)]
_refs = OrderedDict()
for key in references:
k = ".".join(key.split(".")[:level-1])
if k not in _refs:
_refs[k] = []
_refs[k].append(key)
del k
return [
(
join_or_single(ref[0], ref[-1]),
join_or_single(ref[0], ref[-1])
)
for sublist in _refs.values()
for ref in [
sublist[i:i+groupby]
for i in range(0, len(sublist), groupby)
]
] | Alternative to level_chunker: groups levels together at the latest level
:param text: Text object
:param getreffs: GetValidReff query callback
:param level: Level of citation to retrieve
:param groupby: Number of level to groupby
:return: Automatically curated references | Below is the the instruction that describes the task:
### Input:
Alternative to level_chunker: groups levels together at the latest level
:param text: Text object
:param getreffs: GetValidReff query callback
:param level: Level of citation to retrieve
:param groupby: Number of level to groupby
:return: Automatically curated references
### Response:
def level_grouper(text, getreffs, level=None, groupby=20):
""" Alternative to level_chunker: groups levels together at the latest level
:param text: Text object
:param getreffs: GetValidReff query callback
:param level: Level of citation to retrieve
:param groupby: Number of level to groupby
:return: Automatically curated references
"""
if level is None or level > len(text.citation):
level = len(text.citation)
references = [ref.split(":")[-1] for ref in getreffs(level=level)]
_refs = OrderedDict()
for key in references:
k = ".".join(key.split(".")[:level-1])
if k not in _refs:
_refs[k] = []
_refs[k].append(key)
del k
return [
(
join_or_single(ref[0], ref[-1]),
join_or_single(ref[0], ref[-1])
)
for sublist in _refs.values()
for ref in [
sublist[i:i+groupby]
for i in range(0, len(sublist), groupby)
]
] |
def _put_pages(self):
""" First, the Document object does the heavy-lifting for the
individual page objects and content.
Then, the overall "Pages" object is generated.
"""
self.document._get_orientation_changes()
self.document._output_pages()
# Pages Object, provides reference to page objects (Kids list).
self.session._add_object(1)
self.session._out('<</Type /Pages')
kids = '/Kids ['
for i in xrange(0, len(self.document.pages)):
kids += str(3 + 2 * i) + ' 0 R '
self.session._out(kids + ']')
self.session._out('/Count %s' % len(self.document.pages))
# Overall size of the default PDF page
self.session._out('/MediaBox [0 0 %.2f %.2f]' %
(self.document.page.width,
self.document.page.height))
self.session._out('>>')
self.session._out('endobj') | First, the Document object does the heavy-lifting for the
individual page objects and content.
Then, the overall "Pages" object is generated. | Below is the the instruction that describes the task:
### Input:
First, the Document object does the heavy-lifting for the
individual page objects and content.
Then, the overall "Pages" object is generated.
### Response:
def _put_pages(self):
""" First, the Document object does the heavy-lifting for the
individual page objects and content.
Then, the overall "Pages" object is generated.
"""
self.document._get_orientation_changes()
self.document._output_pages()
# Pages Object, provides reference to page objects (Kids list).
self.session._add_object(1)
self.session._out('<</Type /Pages')
kids = '/Kids ['
for i in xrange(0, len(self.document.pages)):
kids += str(3 + 2 * i) + ' 0 R '
self.session._out(kids + ']')
self.session._out('/Count %s' % len(self.document.pages))
# Overall size of the default PDF page
self.session._out('/MediaBox [0 0 %.2f %.2f]' %
(self.document.page.width,
self.document.page.height))
self.session._out('>>')
self.session._out('endobj') |
def on_person_new(self, people):
"""
Add new people
All people supported need to be added simultaneously,
since on every call a unjoin() followed by a join() is issued
:param people: People to add
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception)
"""
try:
self.on_person_leave([])
except:
# Already caught and logged
pass
try:
self.sensor_client.join(people)
except:
self.exception("Failed to join audience")
raise Exception("Joining audience failed") | Add new people
All people supported need to be added simultaneously,
since on every call a unjoin() followed by a join() is issued
:param people: People to add
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception) | Below is the the instruction that describes the task:
### Input:
Add new people
All people supported need to be added simultaneously,
since on every call a unjoin() followed by a join() is issued
:param people: People to add
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception)
### Response:
def on_person_new(self, people):
"""
Add new people
All people supported need to be added simultaneously,
since on every call a unjoin() followed by a join() is issued
:param people: People to add
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception)
"""
try:
self.on_person_leave([])
except:
# Already caught and logged
pass
try:
self.sensor_client.join(people)
except:
self.exception("Failed to join audience")
raise Exception("Joining audience failed") |
def sign( self, hash, random_k ):
"""Return a signature for the provided hash, using the provided
random nonce. It is absolutely vital that random_k be an unpredictable
number in the range [1, self.public_key.point.order()-1]. If
an attacker can guess random_k, he can compute our private key from a
single signature. Also, if an attacker knows a few high-order
bits (or a few low-order bits) of random_k, he can compute our private
key from many signatures. The generation of nonces with adequate
cryptographic strength is very difficult and far beyond the scope
of this comment.
May raise RuntimeError, in which case retrying with a new
random value k is in order.
"""
G = self.public_key.generator
n = G.order()
k = random_k % n
p1 = k * G
r = p1.x()
if r == 0: raise RuntimeError("amazingly unlucky random number r")
s = ( numbertheory.inverse_mod( k, n ) * \
( hash + ( self.secret_multiplier * r ) % n ) ) % n
if s == 0: raise RuntimeError("amazingly unlucky random number s")
return Signature( r, s ) | Return a signature for the provided hash, using the provided
random nonce. It is absolutely vital that random_k be an unpredictable
number in the range [1, self.public_key.point.order()-1]. If
an attacker can guess random_k, he can compute our private key from a
single signature. Also, if an attacker knows a few high-order
bits (or a few low-order bits) of random_k, he can compute our private
key from many signatures. The generation of nonces with adequate
cryptographic strength is very difficult and far beyond the scope
of this comment.
May raise RuntimeError, in which case retrying with a new
random value k is in order. | Below is the the instruction that describes the task:
### Input:
Return a signature for the provided hash, using the provided
random nonce. It is absolutely vital that random_k be an unpredictable
number in the range [1, self.public_key.point.order()-1]. If
an attacker can guess random_k, he can compute our private key from a
single signature. Also, if an attacker knows a few high-order
bits (or a few low-order bits) of random_k, he can compute our private
key from many signatures. The generation of nonces with adequate
cryptographic strength is very difficult and far beyond the scope
of this comment.
May raise RuntimeError, in which case retrying with a new
random value k is in order.
### Response:
def sign( self, hash, random_k ):
"""Return a signature for the provided hash, using the provided
random nonce. It is absolutely vital that random_k be an unpredictable
number in the range [1, self.public_key.point.order()-1]. If
an attacker can guess random_k, he can compute our private key from a
single signature. Also, if an attacker knows a few high-order
bits (or a few low-order bits) of random_k, he can compute our private
key from many signatures. The generation of nonces with adequate
cryptographic strength is very difficult and far beyond the scope
of this comment.
May raise RuntimeError, in which case retrying with a new
random value k is in order.
"""
G = self.public_key.generator
n = G.order()
k = random_k % n
p1 = k * G
r = p1.x()
if r == 0: raise RuntimeError("amazingly unlucky random number r")
s = ( numbertheory.inverse_mod( k, n ) * \
( hash + ( self.secret_multiplier * r ) % n ) ) % n
if s == 0: raise RuntimeError("amazingly unlucky random number s")
return Signature( r, s ) |
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results | Helper to iteratively yield the matches. | Below is the the instruction that describes the task:
### Input:
Helper to iteratively yield the matches.
### Response:
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results |
def letter_set(self):
"""
Return the letter set of this node.
"""
end_str = ctypes.create_string_buffer(MAX_CHARS)
cgaddag.gdg_letter_set(self.gdg, self.node, end_str)
return [char for char in end_str.value.decode("ascii")] | Return the letter set of this node. | Below is the the instruction that describes the task:
### Input:
Return the letter set of this node.
### Response:
def letter_set(self):
"""
Return the letter set of this node.
"""
end_str = ctypes.create_string_buffer(MAX_CHARS)
cgaddag.gdg_letter_set(self.gdg, self.node, end_str)
return [char for char in end_str.value.decode("ascii")] |
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result | Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned. | Below is the the instruction that describes the task:
### Input:
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
### Response:
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result |
def Burr(c, k, tag=None):
"""
A Burr random variate
Parameters
----------
c : scalar
The first shape parameter
k : scalar
The second shape parameter
"""
assert c > 0 and k > 0, 'Burr "c" and "k" parameters must be greater than zero'
return uv(ss.burr(c, k), tag=tag) | A Burr random variate
Parameters
----------
c : scalar
The first shape parameter
k : scalar
The second shape parameter | Below is the the instruction that describes the task:
### Input:
A Burr random variate
Parameters
----------
c : scalar
The first shape parameter
k : scalar
The second shape parameter
### Response:
def Burr(c, k, tag=None):
"""
A Burr random variate
Parameters
----------
c : scalar
The first shape parameter
k : scalar
The second shape parameter
"""
assert c > 0 and k > 0, 'Burr "c" and "k" parameters must be greater than zero'
return uv(ss.burr(c, k), tag=tag) |
def get_trading_days(self, market, start=None, end=None):
"""获取交易日
:param market: 市场类型,Market_
:param start: 起始日期。例如'2018-01-01'。
:param end: 结束日期。例如'2018-01-01'。
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:return: 成功时返回(RET_OK, data),data是字符串数组;失败时返回(RET_ERROR, data),其中data是错误描述字符串
"""
if market is None or is_str(market) is False:
error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
return RET_ERROR, error_str
ret, msg, start, end = normalize_start_end_date(start, end, 365)
if ret != RET_OK:
return ret, msg
query_processor = self._get_sync_query_processor(
TradeDayQuery.pack_req, TradeDayQuery.unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {
'market': market,
'start_date': start,
'end_date': end,
'conn_id': self.get_sync_conn_id()
}
ret_code, msg, trade_day_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, trade_day_list | 获取交易日
:param market: 市场类型,Market_
:param start: 起始日期。例如'2018-01-01'。
:param end: 结束日期。例如'2018-01-01'。
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:return: 成功时返回(RET_OK, data),data是字符串数组;失败时返回(RET_ERROR, data),其中data是错误描述字符串 | Below is the the instruction that describes the task:
### Input:
获取交易日
:param market: 市场类型,Market_
:param start: 起始日期。例如'2018-01-01'。
:param end: 结束日期。例如'2018-01-01'。
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:return: 成功时返回(RET_OK, data),data是字符串数组;失败时返回(RET_ERROR, data),其中data是错误描述字符串
### Response:
def get_trading_days(self, market, start=None, end=None):
"""获取交易日
:param market: 市场类型,Market_
:param start: 起始日期。例如'2018-01-01'。
:param end: 结束日期。例如'2018-01-01'。
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:return: 成功时返回(RET_OK, data),data是字符串数组;失败时返回(RET_ERROR, data),其中data是错误描述字符串
"""
if market is None or is_str(market) is False:
error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
return RET_ERROR, error_str
ret, msg, start, end = normalize_start_end_date(start, end, 365)
if ret != RET_OK:
return ret, msg
query_processor = self._get_sync_query_processor(
TradeDayQuery.pack_req, TradeDayQuery.unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {
'market': market,
'start_date': start,
'end_date': end,
'conn_id': self.get_sync_conn_id()
}
ret_code, msg, trade_day_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, trade_day_list |
def show_edge(self, edge_id):
"""Displays edge with ce_ratio.
:param edge_id: Edge ID for which to show the ce_ratio.
:type edge_id: int
"""
# pylint: disable=unused-variable,relative-import
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
if "faces" not in self.cells:
self.create_cell_face_relationships()
if "edges" not in self.faces:
self.create_face_edge_relationships()
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
plt.axis("equal")
# find all faces with this edge
adj_face_ids = numpy.where((self.faces["edges"] == edge_id).any(axis=1))[0]
# find all cells with the faces
# https://stackoverflow.com/a/38481969/353337
adj_cell_ids = numpy.where(
numpy.in1d(self.cells["faces"], adj_face_ids)
.reshape(self.cells["faces"].shape)
.any(axis=1)
)[0]
# plot all those adjacent cells; first collect all edges
adj_edge_ids = numpy.unique(
[
adj_edge_id
for adj_cell_id in adj_cell_ids
for face_id in self.cells["faces"][adj_cell_id]
for adj_edge_id in self.faces["edges"][face_id]
]
)
col = "k"
for adj_edge_id in adj_edge_ids:
x = self.node_coords[self.edges["nodes"][adj_edge_id]]
ax.plot(x[:, 0], x[:, 1], x[:, 2], col)
# make clear which is edge_id
x = self.node_coords[self.edges["nodes"][edge_id]]
ax.plot(x[:, 0], x[:, 1], x[:, 2], color=col, linewidth=3.0)
# connect the face circumcenters with the corresponding cell
# circumcenters
X = self.node_coords
for cell_id in adj_cell_ids:
cc = self._circumcenters[cell_id]
#
x = X[self.node_face_cells[..., [cell_id]]]
face_ccs = compute_triangle_circumcenters(x, self.ei_dot_ei, self.ei_dot_ej)
# draw the face circumcenters
ax.plot(
face_ccs[..., 0].flatten(),
face_ccs[..., 1].flatten(),
face_ccs[..., 2].flatten(),
"go",
)
# draw the connections
# tet circumcenter---face circumcenter
for face_cc in face_ccs:
ax.plot(
[cc[..., 0], face_cc[..., 0]],
[cc[..., 1], face_cc[..., 1]],
[cc[..., 2], face_cc[..., 2]],
"b-",
)
# draw the cell circumcenters
cc = self._circumcenters[adj_cell_ids]
ax.plot(cc[:, 0], cc[:, 1], cc[:, 2], "ro")
return | Displays edge with ce_ratio.
:param edge_id: Edge ID for which to show the ce_ratio.
:type edge_id: int | Below is the the instruction that describes the task:
### Input:
Displays edge with ce_ratio.
:param edge_id: Edge ID for which to show the ce_ratio.
:type edge_id: int
### Response:
def show_edge(self, edge_id):
"""Displays edge with ce_ratio.
:param edge_id: Edge ID for which to show the ce_ratio.
:type edge_id: int
"""
# pylint: disable=unused-variable,relative-import
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
if "faces" not in self.cells:
self.create_cell_face_relationships()
if "edges" not in self.faces:
self.create_face_edge_relationships()
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
plt.axis("equal")
# find all faces with this edge
adj_face_ids = numpy.where((self.faces["edges"] == edge_id).any(axis=1))[0]
# find all cells with the faces
# https://stackoverflow.com/a/38481969/353337
adj_cell_ids = numpy.where(
numpy.in1d(self.cells["faces"], adj_face_ids)
.reshape(self.cells["faces"].shape)
.any(axis=1)
)[0]
# plot all those adjacent cells; first collect all edges
adj_edge_ids = numpy.unique(
[
adj_edge_id
for adj_cell_id in adj_cell_ids
for face_id in self.cells["faces"][adj_cell_id]
for adj_edge_id in self.faces["edges"][face_id]
]
)
col = "k"
for adj_edge_id in adj_edge_ids:
x = self.node_coords[self.edges["nodes"][adj_edge_id]]
ax.plot(x[:, 0], x[:, 1], x[:, 2], col)
# make clear which is edge_id
x = self.node_coords[self.edges["nodes"][edge_id]]
ax.plot(x[:, 0], x[:, 1], x[:, 2], color=col, linewidth=3.0)
# connect the face circumcenters with the corresponding cell
# circumcenters
X = self.node_coords
for cell_id in adj_cell_ids:
cc = self._circumcenters[cell_id]
#
x = X[self.node_face_cells[..., [cell_id]]]
face_ccs = compute_triangle_circumcenters(x, self.ei_dot_ei, self.ei_dot_ej)
# draw the face circumcenters
ax.plot(
face_ccs[..., 0].flatten(),
face_ccs[..., 1].flatten(),
face_ccs[..., 2].flatten(),
"go",
)
# draw the connections
# tet circumcenter---face circumcenter
for face_cc in face_ccs:
ax.plot(
[cc[..., 0], face_cc[..., 0]],
[cc[..., 1], face_cc[..., 1]],
[cc[..., 2], face_cc[..., 2]],
"b-",
)
# draw the cell circumcenters
cc = self._circumcenters[adj_cell_ids]
ax.plot(cc[:, 0], cc[:, 1], cc[:, 2], "ro")
return |
def setMovie(self, movie):
"""
Sets the movie that will play for the given column.
:param movie | <QtGui.QMovie> || None
"""
mov = self.movie()
if mov is not None:
mov.frameChanged.disconnect(self._updateFrame)
if movie is not None:
self._movie = movie
self.setIcon(QtGui.QIcon(movie.currentPixmap()))
movie.frameChanged.connect(self._updateFrame)
widget = self.listWidget()
widget.destroyed.connect(self.cleanup)
else:
self._movie = None | Sets the movie that will play for the given column.
:param movie | <QtGui.QMovie> || None | Below is the the instruction that describes the task:
### Input:
Sets the movie that will play for the given column.
:param movie | <QtGui.QMovie> || None
### Response:
def setMovie(self, movie):
"""
Sets the movie that will play for the given column.
:param movie | <QtGui.QMovie> || None
"""
mov = self.movie()
if mov is not None:
mov.frameChanged.disconnect(self._updateFrame)
if movie is not None:
self._movie = movie
self.setIcon(QtGui.QIcon(movie.currentPixmap()))
movie.frameChanged.connect(self._updateFrame)
widget = self.listWidget()
widget.destroyed.connect(self.cleanup)
else:
self._movie = None |
def _write_min_gradient(self)->None:
"Writes the minimum of the gradients to Tensorboard."
min_gradient = min(x.data.min() for x in self.gradients)
self._add_gradient_scalar('min_gradient', scalar_value=min_gradient) | Writes the minimum of the gradients to Tensorboard. | Below is the the instruction that describes the task:
### Input:
Writes the minimum of the gradients to Tensorboard.
### Response:
def _write_min_gradient(self)->None:
"Writes the minimum of the gradients to Tensorboard."
min_gradient = min(x.data.min() for x in self.gradients)
self._add_gradient_scalar('min_gradient', scalar_value=min_gradient) |
def _text_attr(self, attr):
"""
Given a text attribute, set the current cursor appropriately.
"""
attr = text[attr]
if attr == "reset":
self.cursor_attributes = self.default_attributes
elif attr == "underline-off":
self.cursor_attributes = self._remove_text_attr("underline")
elif attr == "blink-off":
self.cursor_attributes = self._remove_text_attr("blink")
elif attr == "reverse-off":
self.cursor_attributes = self._remove_text_attr("reverse")
else:
self.cursor_attributes = self._add_text_attr(attr) | Given a text attribute, set the current cursor appropriately. | Below is the the instruction that describes the task:
### Input:
Given a text attribute, set the current cursor appropriately.
### Response:
def _text_attr(self, attr):
"""
Given a text attribute, set the current cursor appropriately.
"""
attr = text[attr]
if attr == "reset":
self.cursor_attributes = self.default_attributes
elif attr == "underline-off":
self.cursor_attributes = self._remove_text_attr("underline")
elif attr == "blink-off":
self.cursor_attributes = self._remove_text_attr("blink")
elif attr == "reverse-off":
self.cursor_attributes = self._remove_text_attr("reverse")
else:
self.cursor_attributes = self._add_text_attr(attr) |
def find_pingback_href(self, content):
"""
Try to find LINK markups to pingback URL.
"""
soup = BeautifulSoup(content, 'html.parser')
for link in soup.find_all('link'):
dict_attr = dict(link.attrs)
if 'rel' in dict_attr and 'href' in dict_attr:
for rel_type in dict_attr['rel']:
if rel_type.lower() == PINGBACK:
return dict_attr.get('href') | Try to find LINK markups to pingback URL. | Below is the the instruction that describes the task:
### Input:
Try to find LINK markups to pingback URL.
### Response:
def find_pingback_href(self, content):
"""
Try to find LINK markups to pingback URL.
"""
soup = BeautifulSoup(content, 'html.parser')
for link in soup.find_all('link'):
dict_attr = dict(link.attrs)
if 'rel' in dict_attr and 'href' in dict_attr:
for rel_type in dict_attr['rel']:
if rel_type.lower() == PINGBACK:
return dict_attr.get('href') |
def build_pyfile_path_from_docname(self, docfile):
"""Build the expected Python file name based on the given documentation file name.
:param str docfile: The documentation file name from which to build the Python file name.
:rtype: str
"""
name, ext = os.path.splitext(docfile)
expected_py_name = name.replace('.', '/') + '.py'
return expected_py_name | Build the expected Python file name based on the given documentation file name.
:param str docfile: The documentation file name from which to build the Python file name.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Build the expected Python file name based on the given documentation file name.
:param str docfile: The documentation file name from which to build the Python file name.
:rtype: str
### Response:
def build_pyfile_path_from_docname(self, docfile):
"""Build the expected Python file name based on the given documentation file name.
:param str docfile: The documentation file name from which to build the Python file name.
:rtype: str
"""
name, ext = os.path.splitext(docfile)
expected_py_name = name.replace('.', '/') + '.py'
return expected_py_name |
def parse_options(files, env_prefix='CONFPY', strict=True):
"""Parse configuration options and return a configuration object.
Args:
files (iter of str): File paths which identify configuration files.
These files are processed in order with values in later files
overwriting values in earlier files.
env_prefix (str): The static prefix prepended to all options when set
as environment variables. The default is CONFPY.
strict (bool): Whether or not to parse the files in strict mode.
Returns:
confpy.core.config.Configuration: The loaded configuration object.
Raises:
MissingRequiredOption: If a required option is not defined in any file.
NamespaceNotRegistered: If a file contains a namespace which is not
defined.
OptionNotRegistered: If a file contains an option which is not defined
but resides under a valid namespace.
UnrecognizedFileExtension: If there is no loader for a path.
"""
return check_for_missing_options(
config=set_cli_options(
config=set_environment_var_options(
config=configuration_from_paths(
paths=files,
strict=strict,
),
prefix=env_prefix,
),
)
) | Parse configuration options and return a configuration object.
Args:
files (iter of str): File paths which identify configuration files.
These files are processed in order with values in later files
overwriting values in earlier files.
env_prefix (str): The static prefix prepended to all options when set
as environment variables. The default is CONFPY.
strict (bool): Whether or not to parse the files in strict mode.
Returns:
confpy.core.config.Configuration: The loaded configuration object.
Raises:
MissingRequiredOption: If a required option is not defined in any file.
NamespaceNotRegistered: If a file contains a namespace which is not
defined.
OptionNotRegistered: If a file contains an option which is not defined
but resides under a valid namespace.
UnrecognizedFileExtension: If there is no loader for a path. | Below is the the instruction that describes the task:
### Input:
Parse configuration options and return a configuration object.
Args:
files (iter of str): File paths which identify configuration files.
These files are processed in order with values in later files
overwriting values in earlier files.
env_prefix (str): The static prefix prepended to all options when set
as environment variables. The default is CONFPY.
strict (bool): Whether or not to parse the files in strict mode.
Returns:
confpy.core.config.Configuration: The loaded configuration object.
Raises:
MissingRequiredOption: If a required option is not defined in any file.
NamespaceNotRegistered: If a file contains a namespace which is not
defined.
OptionNotRegistered: If a file contains an option which is not defined
but resides under a valid namespace.
UnrecognizedFileExtension: If there is no loader for a path.
### Response:
def parse_options(files, env_prefix='CONFPY', strict=True):
"""Parse configuration options and return a configuration object.
Args:
files (iter of str): File paths which identify configuration files.
These files are processed in order with values in later files
overwriting values in earlier files.
env_prefix (str): The static prefix prepended to all options when set
as environment variables. The default is CONFPY.
strict (bool): Whether or not to parse the files in strict mode.
Returns:
confpy.core.config.Configuration: The loaded configuration object.
Raises:
MissingRequiredOption: If a required option is not defined in any file.
NamespaceNotRegistered: If a file contains a namespace which is not
defined.
OptionNotRegistered: If a file contains an option which is not defined
but resides under a valid namespace.
UnrecognizedFileExtension: If there is no loader for a path.
"""
return check_for_missing_options(
config=set_cli_options(
config=set_environment_var_options(
config=configuration_from_paths(
paths=files,
strict=strict,
),
prefix=env_prefix,
),
)
) |
def _config(self, args, config):
""" Get configuration for the current used listing.
"""
listings = dict((x.args, x) for x in config.subsections('listing'))
listing = listings.get(args.listing)
if listing is None:
if args.listing == u'default':
return {'pattern': self._profile.list_default_pattern,
'order': self._profile.list_default_order}
else:
raise KolektoRuntimeError('Unknown listing %r' % args.listing)
else:
return {'pattern': listing.get('pattern'),
'order': listing.get('order')} | Get configuration for the current used listing. | Below is the the instruction that describes the task:
### Input:
Get configuration for the current used listing.
### Response:
def _config(self, args, config):
""" Get configuration for the current used listing.
"""
listings = dict((x.args, x) for x in config.subsections('listing'))
listing = listings.get(args.listing)
if listing is None:
if args.listing == u'default':
return {'pattern': self._profile.list_default_pattern,
'order': self._profile.list_default_order}
else:
raise KolektoRuntimeError('Unknown listing %r' % args.listing)
else:
return {'pattern': listing.get('pattern'),
'order': listing.get('order')} |
def _get_group_randomstate(rs, seed, group):
"""Return a RandomState, equal to the input unless rs is None.
When rs is None, try to get the random state from the
'last_random_state' attribute in `group`. When not available,
use `seed` to generate a random state. When seed is None the returned
random state will have a random seed.
"""
if rs is None:
rs = np.random.RandomState(seed=seed)
# Try to set the random state from the last session to preserve
# a single random stream when simulating timestamps multiple times
if 'last_random_state' in group._v_attrs:
rs.set_state(group._v_attrs['last_random_state'])
print("INFO: Random state set to last saved state in '%s'." %
group._v_name)
else:
print("INFO: Random state initialized from seed (%d)." % seed)
return rs | Return a RandomState, equal to the input unless rs is None.
When rs is None, try to get the random state from the
'last_random_state' attribute in `group`. When not available,
use `seed` to generate a random state. When seed is None the returned
random state will have a random seed. | Below is the the instruction that describes the task:
### Input:
Return a RandomState, equal to the input unless rs is None.
When rs is None, try to get the random state from the
'last_random_state' attribute in `group`. When not available,
use `seed` to generate a random state. When seed is None the returned
random state will have a random seed.
### Response:
def _get_group_randomstate(rs, seed, group):
"""Return a RandomState, equal to the input unless rs is None.
When rs is None, try to get the random state from the
'last_random_state' attribute in `group`. When not available,
use `seed` to generate a random state. When seed is None the returned
random state will have a random seed.
"""
if rs is None:
rs = np.random.RandomState(seed=seed)
# Try to set the random state from the last session to preserve
# a single random stream when simulating timestamps multiple times
if 'last_random_state' in group._v_attrs:
rs.set_state(group._v_attrs['last_random_state'])
print("INFO: Random state set to last saved state in '%s'." %
group._v_name)
else:
print("INFO: Random state initialized from seed (%d)." % seed)
return rs |
def preprocess_image(image_buffer, output_height, output_width,
num_channels, is_training=False):
"""Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
# For training, we want to randomize some of the distortions.
image = _decode_crop_and_flip(image_buffer, num_channels)
mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE,
value=[output_height, output_width])
image = _resize_image(image, output_height, output_width)
else:
# For validation, we want to decode, resize, then just crop the middle.
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
image = _aspect_preserving_resize(image, _RESIZE_MIN)
mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE,
value=[output_height, output_width])
image = _central_crop(image, output_height, output_width)
image.set_shape([output_height, output_width, num_channels])
return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels) | Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image. | Below is the the instruction that describes the task:
### Input:
Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
### Response:
def preprocess_image(image_buffer, output_height, output_width,
num_channels, is_training=False):
"""Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
# For training, we want to randomize some of the distortions.
image = _decode_crop_and_flip(image_buffer, num_channels)
mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE,
value=[output_height, output_width])
image = _resize_image(image, output_height, output_width)
else:
# For validation, we want to decode, resize, then just crop the middle.
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
image = _aspect_preserving_resize(image, _RESIZE_MIN)
mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE,
value=[output_height, output_width])
image = _central_crop(image, output_height, output_width)
image.set_shape([output_height, output_width, num_channels])
return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels) |
def lines(self) -> str:
"""Return the source code lines for this error."""
if self.definition is None:
return ''
source = ''
lines = self.definition.source
offset = self.definition.start # type: ignore
lines_stripped = list(reversed(list(dropwhile(is_blank,
reversed(lines)))))
numbers_width = len(str(offset + len(lines_stripped)))
line_format = '{{:{}}}:{{}}'.format(numbers_width)
for n, line in enumerate(lines_stripped):
if line:
line = ' ' + line
source += line_format.format(n + offset, line)
if n > 5:
source += ' ...\n'
break
return source | Return the source code lines for this error. | Below is the the instruction that describes the task:
### Input:
Return the source code lines for this error.
### Response:
def lines(self) -> str:
"""Return the source code lines for this error."""
if self.definition is None:
return ''
source = ''
lines = self.definition.source
offset = self.definition.start # type: ignore
lines_stripped = list(reversed(list(dropwhile(is_blank,
reversed(lines)))))
numbers_width = len(str(offset + len(lines_stripped)))
line_format = '{{:{}}}:{{}}'.format(numbers_width)
for n, line in enumerate(lines_stripped):
if line:
line = ' ' + line
source += line_format.format(n + offset, line)
if n > 5:
source += ' ...\n'
break
return source |
def get_dataset(self, datasetid):
"""The method is getting information about dataset byt it's id"""
path = '/api/1.0/meta/dataset/{}'
return self._api_get(definition.Dataset, path.format(datasetid)) | The method is getting information about dataset byt it's id | Below is the the instruction that describes the task:
### Input:
The method is getting information about dataset byt it's id
### Response:
def get_dataset(self, datasetid):
"""The method is getting information about dataset byt it's id"""
path = '/api/1.0/meta/dataset/{}'
return self._api_get(definition.Dataset, path.format(datasetid)) |
def update(self, validate=False):
"""
Update the data associated with this snapshot by querying EC2.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
snapshot the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_snapshots([self.id])
if len(rs) > 0:
self._update(rs[0])
elif validate:
raise ValueError('%s is not a valid Snapshot ID' % self.id)
return self.progress | Update the data associated with this snapshot by querying EC2.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
snapshot the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2. | Below is the the instruction that describes the task:
### Input:
Update the data associated with this snapshot by querying EC2.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
snapshot the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
### Response:
def update(self, validate=False):
"""
Update the data associated with this snapshot by querying EC2.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
snapshot the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_snapshots([self.id])
if len(rs) > 0:
self._update(rs[0])
elif validate:
raise ValueError('%s is not a valid Snapshot ID' % self.id)
return self.progress |
def FetchResponses(self, session_id):
"""Retrieves responses for a well known flow.
Args:
session_id: The session_id to get the requests/responses for.
Yields:
The retrieved responses.
"""
timestamp = (0, self.frozen_timestamp or rdfvalue.RDFDatetime.Now())
for response in self.data_store.FetchResponsesForWellKnownFlow(
session_id, self.response_limit, timestamp=timestamp):
yield response | Retrieves responses for a well known flow.
Args:
session_id: The session_id to get the requests/responses for.
Yields:
The retrieved responses. | Below is the the instruction that describes the task:
### Input:
Retrieves responses for a well known flow.
Args:
session_id: The session_id to get the requests/responses for.
Yields:
The retrieved responses.
### Response:
def FetchResponses(self, session_id):
"""Retrieves responses for a well known flow.
Args:
session_id: The session_id to get the requests/responses for.
Yields:
The retrieved responses.
"""
timestamp = (0, self.frozen_timestamp or rdfvalue.RDFDatetime.Now())
for response in self.data_store.FetchResponsesForWellKnownFlow(
session_id, self.response_limit, timestamp=timestamp):
yield response |
def adsPortOpenEx():
# type: () -> int
"""Connect to the TwinCAT message router.
:rtype: int
:return: port number
"""
port_open_ex = _adsDLL.AdsPortOpenEx
port_open_ex.restype = ctypes.c_long
port = port_open_ex()
if port == 0:
raise RuntimeError("Failed to open port on AMS router.")
return port | Connect to the TwinCAT message router.
:rtype: int
:return: port number | Below is the the instruction that describes the task:
### Input:
Connect to the TwinCAT message router.
:rtype: int
:return: port number
### Response:
def adsPortOpenEx():
# type: () -> int
"""Connect to the TwinCAT message router.
:rtype: int
:return: port number
"""
port_open_ex = _adsDLL.AdsPortOpenEx
port_open_ex.restype = ctypes.c_long
port = port_open_ex()
if port == 0:
raise RuntimeError("Failed to open port on AMS router.")
return port |
def convert_date_columns(df, date_format='epoch'):
"""Convert dates/datetimes to preferred string format if specified
i.e. '%Y-%m-%d', 'epoch', 'iso'
"""
if date_format not in ['epoch', 'iso']:
if '%' in date_format:
try:
datetime.datetime.now().strftime(date_format)
except:
raise DateConversionError('Error serializing dates in DataFrame using format {}.'.format(date_format))
finally:
for column, data_type in df.dtypes.to_dict().items():
if 'date' in str(data_type):
df[column] = df[column].dt.strftime(date_format)
else:
raise DateConversionError('Error serializing dates in DataFrame using format {}.'.format(date_format))
return df | Convert dates/datetimes to preferred string format if specified
i.e. '%Y-%m-%d', 'epoch', 'iso' | Below is the the instruction that describes the task:
### Input:
Convert dates/datetimes to preferred string format if specified
i.e. '%Y-%m-%d', 'epoch', 'iso'
### Response:
def convert_date_columns(df, date_format='epoch'):
"""Convert dates/datetimes to preferred string format if specified
i.e. '%Y-%m-%d', 'epoch', 'iso'
"""
if date_format not in ['epoch', 'iso']:
if '%' in date_format:
try:
datetime.datetime.now().strftime(date_format)
except:
raise DateConversionError('Error serializing dates in DataFrame using format {}.'.format(date_format))
finally:
for column, data_type in df.dtypes.to_dict().items():
if 'date' in str(data_type):
df[column] = df[column].dt.strftime(date_format)
else:
raise DateConversionError('Error serializing dates in DataFrame using format {}.'.format(date_format))
return df |
def print_stats(self):
"""
Print a series of relevant stats about a full execution. This function
is meant to be called at the end of the program.
"""
stats = self.calculate()
total_time = '%d:%02d:%02d' % (stats['total_time'] / 3600,
(stats['total_time'] / 3600) / 60,
(stats['total_time'] % 3600) % 60)
output = """\
Total runtime: {total_time}
Lyrics found: {found}
Lyrics not found:{notfound}
Most useful source:\
{best} ({best_count} lyrics found) ({best_rate:.2f}% success rate)
Least useful source:\
{worst} ({worst_count} lyrics found) ({worst_rate:.2f}% success rate)
Fastest website to scrape: {fastest} (Avg: {fastest_time:.2f}s per search)
Slowest website to scrape: {slowest} (Avg: {slowest_time:.2f}s per search)
Average time per website: {avg_time:.2f}s
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxx PER WEBSITE STATS: xxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
"""
output = output.format(total_time=total_time,
found=stats['found'],
notfound=stats['notfound'],
best=stats['best'][0].capitalize(),
best_count=stats['best'][1],
best_rate=stats['best'][2],
worst=stats['worst'][0].capitalize(),
worst_count=stats['worst'][1],
worst_rate=stats['worst'][2],
fastest=stats['fastest'][0].capitalize(),
fastest_time=stats['fastest'][1],
slowest=stats['slowest'][0].capitalize(),
slowest_time=stats['slowest'][1],
avg_time=self.avg_time())
for source in sources:
stat = str(self.source_stats[source.__name__])
output += f'\n{source.__name__.upper()}\n{stat}\n'
print(output) | Print a series of relevant stats about a full execution. This function
is meant to be called at the end of the program. | Below is the the instruction that describes the task:
### Input:
Print a series of relevant stats about a full execution. This function
is meant to be called at the end of the program.
### Response:
def print_stats(self):
"""
Print a series of relevant stats about a full execution. This function
is meant to be called at the end of the program.
"""
stats = self.calculate()
total_time = '%d:%02d:%02d' % (stats['total_time'] / 3600,
(stats['total_time'] / 3600) / 60,
(stats['total_time'] % 3600) % 60)
output = """\
Total runtime: {total_time}
Lyrics found: {found}
Lyrics not found:{notfound}
Most useful source:\
{best} ({best_count} lyrics found) ({best_rate:.2f}% success rate)
Least useful source:\
{worst} ({worst_count} lyrics found) ({worst_rate:.2f}% success rate)
Fastest website to scrape: {fastest} (Avg: {fastest_time:.2f}s per search)
Slowest website to scrape: {slowest} (Avg: {slowest_time:.2f}s per search)
Average time per website: {avg_time:.2f}s
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxx PER WEBSITE STATS: xxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
"""
output = output.format(total_time=total_time,
found=stats['found'],
notfound=stats['notfound'],
best=stats['best'][0].capitalize(),
best_count=stats['best'][1],
best_rate=stats['best'][2],
worst=stats['worst'][0].capitalize(),
worst_count=stats['worst'][1],
worst_rate=stats['worst'][2],
fastest=stats['fastest'][0].capitalize(),
fastest_time=stats['fastest'][1],
slowest=stats['slowest'][0].capitalize(),
slowest_time=stats['slowest'][1],
avg_time=self.avg_time())
for source in sources:
stat = str(self.source_stats[source.__name__])
output += f'\n{source.__name__.upper()}\n{stat}\n'
print(output) |
def load(self, code, setup='', teardown=''):
"""Prepares a set of setup, test, and teardown code to be
run in the console.
PARAMETERS:
code -- list; processed lines of code. Elements in the list are
either strings (input) or CodeAnswer objects (output)
setup -- str; raw setup code
teardown -- str; raw teardown code
"""
self._setup = textwrap.dedent(setup).splitlines()
self._code = code
self._teardown = textwrap.dedent(teardown).splitlines() | Prepares a set of setup, test, and teardown code to be
run in the console.
PARAMETERS:
code -- list; processed lines of code. Elements in the list are
either strings (input) or CodeAnswer objects (output)
setup -- str; raw setup code
teardown -- str; raw teardown code | Below is the the instruction that describes the task:
### Input:
Prepares a set of setup, test, and teardown code to be
run in the console.
PARAMETERS:
code -- list; processed lines of code. Elements in the list are
either strings (input) or CodeAnswer objects (output)
setup -- str; raw setup code
teardown -- str; raw teardown code
### Response:
def load(self, code, setup='', teardown=''):
"""Prepares a set of setup, test, and teardown code to be
run in the console.
PARAMETERS:
code -- list; processed lines of code. Elements in the list are
either strings (input) or CodeAnswer objects (output)
setup -- str; raw setup code
teardown -- str; raw teardown code
"""
self._setup = textwrap.dedent(setup).splitlines()
self._code = code
self._teardown = textwrap.dedent(teardown).splitlines() |
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response | 获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000 | Below is the the instruction that describes the task:
### Input:
获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
### Response:
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response |
def add(self, urls):
"""
Add the provided urls to this purge request
The urls argument can be a single string, a list of strings, a queryset
or model instance. Models must implement `get_absolute_url()`.
"""
if isinstance(urls, (list, tuple)):
self.urls.extend(urls)
elif isinstance(urls, basestring):
self.urls.append(urls)
elif isinstance(urls, QuerySet):
for obj in urls:
self.urls.append(obj.get_absolute_url())
elif hasattr(urls, 'get_absolute_url'):
self.urls.append(urls.get_absolute_url())
else:
raise TypeError("Don't know how to handle %r" % urls) | Add the provided urls to this purge request
The urls argument can be a single string, a list of strings, a queryset
or model instance. Models must implement `get_absolute_url()`. | Below is the the instruction that describes the task:
### Input:
Add the provided urls to this purge request
The urls argument can be a single string, a list of strings, a queryset
or model instance. Models must implement `get_absolute_url()`.
### Response:
def add(self, urls):
"""
Add the provided urls to this purge request
The urls argument can be a single string, a list of strings, a queryset
or model instance. Models must implement `get_absolute_url()`.
"""
if isinstance(urls, (list, tuple)):
self.urls.extend(urls)
elif isinstance(urls, basestring):
self.urls.append(urls)
elif isinstance(urls, QuerySet):
for obj in urls:
self.urls.append(obj.get_absolute_url())
elif hasattr(urls, 'get_absolute_url'):
self.urls.append(urls.get_absolute_url())
else:
raise TypeError("Don't know how to handle %r" % urls) |
def delete_report(self, report):
"""
Deletes a generated report instance.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.destroy
"""
url = ACCOUNTS_API.format(report.account_id) + "/reports/{}/{}".format(
report.type, report.report_id)
response = self._delete_resource(url)
return True | Deletes a generated report instance.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.destroy | Below is the the instruction that describes the task:
### Input:
Deletes a generated report instance.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.destroy
### Response:
def delete_report(self, report):
"""
Deletes a generated report instance.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.destroy
"""
url = ACCOUNTS_API.format(report.account_id) + "/reports/{}/{}".format(
report.type, report.report_id)
response = self._delete_resource(url)
return True |
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account."""
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
account = RemoteAccount.get(user_id=current_user.get_id(),
client_id=remote.consumer_key)
external_id = account.extra_data.get('external_id')
if external_id:
oauth_unlink_external_id(dict(id=external_id, method='cern'))
if account:
with db.session.begin_nested():
account.delete()
disconnect_identity(g.identity)
return redirect(url_for('invenio_oauthclient_settings.index')) | Handle unlinking of remote account. | Below is the the instruction that describes the task:
### Input:
Handle unlinking of remote account.
### Response:
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account."""
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
account = RemoteAccount.get(user_id=current_user.get_id(),
client_id=remote.consumer_key)
external_id = account.extra_data.get('external_id')
if external_id:
oauth_unlink_external_id(dict(id=external_id, method='cern'))
if account:
with db.session.begin_nested():
account.delete()
disconnect_identity(g.identity)
return redirect(url_for('invenio_oauthclient_settings.index')) |
def _UpdateChildIndex(self, urn, mutation_pool):
"""Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
mutation_pool: A MutationPool object to write to.
"""
try:
# Create navigation aids by touching intermediate subject names.
while urn.Path() != "/":
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.Get(urn)
return
except KeyError:
extra_attributes = None
# This is a performance optimization. On the root there is no point
# setting the last access time since it gets accessed all the time.
# TODO(amoser): Can we get rid of the index in the root node entirely?
# It's too big to query anyways...
if dirname != u"/":
extra_attributes = {
AFF4Object.SchemaCls.LAST: [
rdfvalue.RDFDatetime.Now().SerializeToDataStore()
]
}
mutation_pool.AFF4AddChild(
dirname, basename, extra_attributes=extra_attributes)
self.intermediate_cache.Put(urn, 1)
urn = dirname
except access_control.UnauthorizedAccess:
pass | Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
mutation_pool: A MutationPool object to write to. | Below is the the instruction that describes the task:
### Input:
Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
mutation_pool: A MutationPool object to write to.
### Response:
def _UpdateChildIndex(self, urn, mutation_pool):
"""Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
mutation_pool: A MutationPool object to write to.
"""
try:
# Create navigation aids by touching intermediate subject names.
while urn.Path() != "/":
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.Get(urn)
return
except KeyError:
extra_attributes = None
# This is a performance optimization. On the root there is no point
# setting the last access time since it gets accessed all the time.
# TODO(amoser): Can we get rid of the index in the root node entirely?
# It's too big to query anyways...
if dirname != u"/":
extra_attributes = {
AFF4Object.SchemaCls.LAST: [
rdfvalue.RDFDatetime.Now().SerializeToDataStore()
]
}
mutation_pool.AFF4AddChild(
dirname, basename, extra_attributes=extra_attributes)
self.intermediate_cache.Put(urn, 1)
urn = dirname
except access_control.UnauthorizedAccess:
pass |
async def notify(self, *args):
"""
Notify handlers
:param args:
:return:
"""
from .filters import check_filters, FilterNotPassed
results = []
data = {}
ctx_data.set(data)
if self.middleware_key:
try:
await self.dispatcher.middleware.trigger(f"pre_process_{self.middleware_key}", args + (data,))
except CancelHandler: # Allow to cancel current event
return results
try:
for handler_obj in self.handlers:
try:
data.update(await check_filters(handler_obj.filters, args))
except FilterNotPassed:
continue
else:
ctx_token = current_handler.set(handler_obj.handler)
try:
if self.middleware_key:
await self.dispatcher.middleware.trigger(f"process_{self.middleware_key}", args + (data,))
partial_data = _check_spec(handler_obj.spec, data)
response = await handler_obj.handler(*args, **partial_data)
if response is not None:
results.append(response)
if self.once:
break
except SkipHandler:
continue
except CancelHandler:
break
finally:
current_handler.reset(ctx_token)
finally:
if self.middleware_key:
await self.dispatcher.middleware.trigger(f"post_process_{self.middleware_key}",
args + (results, data,))
return results | Notify handlers
:param args:
:return: | Below is the the instruction that describes the task:
### Input:
Notify handlers
:param args:
:return:
### Response:
async def notify(self, *args):
"""
Notify handlers
:param args:
:return:
"""
from .filters import check_filters, FilterNotPassed
results = []
data = {}
ctx_data.set(data)
if self.middleware_key:
try:
await self.dispatcher.middleware.trigger(f"pre_process_{self.middleware_key}", args + (data,))
except CancelHandler: # Allow to cancel current event
return results
try:
for handler_obj in self.handlers:
try:
data.update(await check_filters(handler_obj.filters, args))
except FilterNotPassed:
continue
else:
ctx_token = current_handler.set(handler_obj.handler)
try:
if self.middleware_key:
await self.dispatcher.middleware.trigger(f"process_{self.middleware_key}", args + (data,))
partial_data = _check_spec(handler_obj.spec, data)
response = await handler_obj.handler(*args, **partial_data)
if response is not None:
results.append(response)
if self.once:
break
except SkipHandler:
continue
except CancelHandler:
break
finally:
current_handler.reset(ctx_token)
finally:
if self.middleware_key:
await self.dispatcher.middleware.trigger(f"post_process_{self.middleware_key}",
args + (results, data,))
return results |
def _create_dir_path(self, file_hash, path=None, hash_list=None):
"""
Create proper filesystem paths for given `file_hash`.
Args:
file_hash (str): Hash of the file for which the path should be
created.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Created path.
"""
# first, non-recursive call - parse `file_hash`
if hash_list is None:
hash_list = list(file_hash)
if not hash_list:
raise IOError("Directory structure is too full!")
# first, non-recursive call - look for subpath of `self.path`
if not path:
path = os.path.join(
self.path,
hash_list.pop(0)
)
# if the path not yet exists, create it and work on it
if not os.path.exists(path):
os.mkdir(path)
return self._create_dir_path(
file_hash=file_hash,
path=path,
hash_list=hash_list
)
files = os.listdir(path)
# file is already in storage
if file_hash in files:
return path
# if the directory is not yet full, use it
if len(files) < self.dir_limit:
return path
# in full directories create new sub-directories
return self._create_dir_path(
file_hash=file_hash,
path=os.path.join(path, hash_list.pop(0)),
hash_list=hash_list
) | Create proper filesystem paths for given `file_hash`.
Args:
file_hash (str): Hash of the file for which the path should be
created.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Created path. | Below is the the instruction that describes the task:
### Input:
Create proper filesystem paths for given `file_hash`.
Args:
file_hash (str): Hash of the file for which the path should be
created.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Created path.
### Response:
def _create_dir_path(self, file_hash, path=None, hash_list=None):
"""
Create proper filesystem paths for given `file_hash`.
Args:
file_hash (str): Hash of the file for which the path should be
created.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Created path.
"""
# first, non-recursive call - parse `file_hash`
if hash_list is None:
hash_list = list(file_hash)
if not hash_list:
raise IOError("Directory structure is too full!")
# first, non-recursive call - look for subpath of `self.path`
if not path:
path = os.path.join(
self.path,
hash_list.pop(0)
)
# if the path not yet exists, create it and work on it
if not os.path.exists(path):
os.mkdir(path)
return self._create_dir_path(
file_hash=file_hash,
path=path,
hash_list=hash_list
)
files = os.listdir(path)
# file is already in storage
if file_hash in files:
return path
# if the directory is not yet full, use it
if len(files) < self.dir_limit:
return path
# in full directories create new sub-directories
return self._create_dir_path(
file_hash=file_hash,
path=os.path.join(path, hash_list.pop(0)),
hash_list=hash_list
) |
def gen_cisco_vdp_oui(self, oui_id, oui_data):
"""Cisco specific handler for constructing OUI arguments. """
oui_list = []
vm_name = oui_data.get('vm_name')
if vm_name is not None:
oui_str = "oui=%s," % oui_id
oui_name_str = oui_str + "vm_name=" + vm_name
oui_list.append(oui_name_str)
ip_addr = oui_data.get('ip_addr')
if ip_addr is not None:
oui_str = "oui=%s," % oui_id
ip_addr_str = oui_str + "ipv4_addr=" + ip_addr
oui_list.append(ip_addr_str)
vm_uuid = oui_data.get('vm_uuid')
if vm_uuid is not None:
oui_str = "oui=%s," % oui_id
vm_uuid_str = oui_str + "vm_uuid=" + vm_uuid
oui_list.append(vm_uuid_str)
return oui_list | Cisco specific handler for constructing OUI arguments. | Below is the the instruction that describes the task:
### Input:
Cisco specific handler for constructing OUI arguments.
### Response:
def gen_cisco_vdp_oui(self, oui_id, oui_data):
"""Cisco specific handler for constructing OUI arguments. """
oui_list = []
vm_name = oui_data.get('vm_name')
if vm_name is not None:
oui_str = "oui=%s," % oui_id
oui_name_str = oui_str + "vm_name=" + vm_name
oui_list.append(oui_name_str)
ip_addr = oui_data.get('ip_addr')
if ip_addr is not None:
oui_str = "oui=%s," % oui_id
ip_addr_str = oui_str + "ipv4_addr=" + ip_addr
oui_list.append(ip_addr_str)
vm_uuid = oui_data.get('vm_uuid')
if vm_uuid is not None:
oui_str = "oui=%s," % oui_id
vm_uuid_str = oui_str + "vm_uuid=" + vm_uuid
oui_list.append(vm_uuid_str)
return oui_list |
def get(self, sid):
"""
Constructs a IpAccessControlListContext
:param sid: A string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.sip.ip_access_control_list.IpAccessControlListContext
:rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.IpAccessControlListContext
"""
return IpAccessControlListContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) | Constructs a IpAccessControlListContext
:param sid: A string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.sip.ip_access_control_list.IpAccessControlListContext
:rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.IpAccessControlListContext | Below is the the instruction that describes the task:
### Input:
Constructs a IpAccessControlListContext
:param sid: A string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.sip.ip_access_control_list.IpAccessControlListContext
:rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.IpAccessControlListContext
### Response:
def get(self, sid):
"""
Constructs a IpAccessControlListContext
:param sid: A string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.sip.ip_access_control_list.IpAccessControlListContext
:rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.IpAccessControlListContext
"""
return IpAccessControlListContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) |
async def on_raw_410(self, message):
""" Unknown CAP subcommand or CAP error. Force-end negotiations. """
self.logger.error('Server sent "Unknown CAP subcommand: %s". Aborting capability negotiation.', message.params[0])
self._capabilities_requested = set()
self._capabilities_negotiating = set()
await self.rawmsg('CAP', 'END') | Unknown CAP subcommand or CAP error. Force-end negotiations. | Below is the the instruction that describes the task:
### Input:
Unknown CAP subcommand or CAP error. Force-end negotiations.
### Response:
async def on_raw_410(self, message):
""" Unknown CAP subcommand or CAP error. Force-end negotiations. """
self.logger.error('Server sent "Unknown CAP subcommand: %s". Aborting capability negotiation.', message.params[0])
self._capabilities_requested = set()
self._capabilities_negotiating = set()
await self.rawmsg('CAP', 'END') |
def docopt(doc, argv=None, help=True, version=None, options_first=False):
"""Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options preceed positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
Usage:
my_program tcp <host> <port> [--timeout=<seconds>]
my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
my_program (-h | --help | --version)
Options:
-h, --help Show this screen and exit.
--baud=<n> Baudrate [default: 9600]
'''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme
"""
if argv is None:
argv = sys.argv[1:]
DocoptExit.usage = printable_usage(doc)
options = parse_defaults(doc)
pattern = parse_pattern(formal_usage(DocoptExit.usage), options)
# [default] syntax for argument is disabled
#for a in pattern.flat(Argument):
# same_name = [d for d in arguments if d.name == a.name]
# if same_name:
# a.value = same_name[0].value
argv = parse_argv(TokenStream(argv, DocoptExit), list(options),
options_first)
pattern_options = set(pattern.flat(Option))
for ao in pattern.flat(AnyOptions):
doc_options = parse_defaults(doc)
ao.children = list(set(doc_options) - pattern_options)
#if any_options:
# ao.children += [Option(o.short, o.long, o.argcount)
# for o in argv if type(o) is Option]
extras(help, version, argv, doc)
matched, left, collected = pattern.fix().match(argv)
if matched:
return Dict((a.name, a.value) for a in (pattern.flat() + collected))
raise DocoptExit() | Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options preceed positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
Usage:
my_program tcp <host> <port> [--timeout=<seconds>]
my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
my_program (-h | --help | --version)
Options:
-h, --help Show this screen and exit.
--baud=<n> Baudrate [default: 9600]
'''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme | Below is the the instruction that describes the task:
### Input:
Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options preceed positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
Usage:
my_program tcp <host> <port> [--timeout=<seconds>]
my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
my_program (-h | --help | --version)
Options:
-h, --help Show this screen and exit.
--baud=<n> Baudrate [default: 9600]
'''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme
### Response:
def docopt(doc, argv=None, help=True, version=None, options_first=False):
"""Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options preceed positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
Usage:
my_program tcp <host> <port> [--timeout=<seconds>]
my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
my_program (-h | --help | --version)
Options:
-h, --help Show this screen and exit.
--baud=<n> Baudrate [default: 9600]
'''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme
"""
if argv is None:
argv = sys.argv[1:]
DocoptExit.usage = printable_usage(doc)
options = parse_defaults(doc)
pattern = parse_pattern(formal_usage(DocoptExit.usage), options)
# [default] syntax for argument is disabled
#for a in pattern.flat(Argument):
# same_name = [d for d in arguments if d.name == a.name]
# if same_name:
# a.value = same_name[0].value
argv = parse_argv(TokenStream(argv, DocoptExit), list(options),
options_first)
pattern_options = set(pattern.flat(Option))
for ao in pattern.flat(AnyOptions):
doc_options = parse_defaults(doc)
ao.children = list(set(doc_options) - pattern_options)
#if any_options:
# ao.children += [Option(o.short, o.long, o.argcount)
# for o in argv if type(o) is Option]
extras(help, version, argv, doc)
matched, left, collected = pattern.fix().match(argv)
if matched:
return Dict((a.name, a.value) for a in (pattern.flat() + collected))
raise DocoptExit() |
def suppress_exceptions(callables, *exceptions):
"""
Call each callable in callables, suppressing any exceptions supplied. If
no exception classes are supplied, all Exceptions will be suppressed.
>>> import functools
>>> c1 = functools.partial(int, 'a')
>>> c2 = functools.partial(int, '10')
>>> list(suppress_exceptions((c1, c2)))
[10]
>>> list(suppress_exceptions((c1, c2), KeyError))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'a'
"""
if not exceptions:
exceptions = Exception,
for callable in callables:
try:
yield callable()
except exceptions:
pass | Call each callable in callables, suppressing any exceptions supplied. If
no exception classes are supplied, all Exceptions will be suppressed.
>>> import functools
>>> c1 = functools.partial(int, 'a')
>>> c2 = functools.partial(int, '10')
>>> list(suppress_exceptions((c1, c2)))
[10]
>>> list(suppress_exceptions((c1, c2), KeyError))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'a' | Below is the the instruction that describes the task:
### Input:
Call each callable in callables, suppressing any exceptions supplied. If
no exception classes are supplied, all Exceptions will be suppressed.
>>> import functools
>>> c1 = functools.partial(int, 'a')
>>> c2 = functools.partial(int, '10')
>>> list(suppress_exceptions((c1, c2)))
[10]
>>> list(suppress_exceptions((c1, c2), KeyError))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'a'
### Response:
def suppress_exceptions(callables, *exceptions):
"""
Call each callable in callables, suppressing any exceptions supplied. If
no exception classes are supplied, all Exceptions will be suppressed.
>>> import functools
>>> c1 = functools.partial(int, 'a')
>>> c2 = functools.partial(int, '10')
>>> list(suppress_exceptions((c1, c2)))
[10]
>>> list(suppress_exceptions((c1, c2), KeyError))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'a'
"""
if not exceptions:
exceptions = Exception,
for callable in callables:
try:
yield callable()
except exceptions:
pass |
def pad_event_roll(event_roll, length):
"""Pad event roll's length to given length
Parameters
----------
event_roll: np.ndarray, shape=(m,k)
Event roll
length : int
Length to be padded
Returns
-------
event_roll: np.ndarray, shape=(m,k)
Padded event roll
"""
if length > event_roll.shape[0]:
padding = numpy.zeros((length-event_roll.shape[0], event_roll.shape[1]))
event_roll = numpy.vstack((event_roll, padding))
return event_roll | Pad event roll's length to given length
Parameters
----------
event_roll: np.ndarray, shape=(m,k)
Event roll
length : int
Length to be padded
Returns
-------
event_roll: np.ndarray, shape=(m,k)
Padded event roll | Below is the the instruction that describes the task:
### Input:
Pad event roll's length to given length
Parameters
----------
event_roll: np.ndarray, shape=(m,k)
Event roll
length : int
Length to be padded
Returns
-------
event_roll: np.ndarray, shape=(m,k)
Padded event roll
### Response:
def pad_event_roll(event_roll, length):
"""Pad event roll's length to given length
Parameters
----------
event_roll: np.ndarray, shape=(m,k)
Event roll
length : int
Length to be padded
Returns
-------
event_roll: np.ndarray, shape=(m,k)
Padded event roll
"""
if length > event_roll.shape[0]:
padding = numpy.zeros((length-event_roll.shape[0], event_roll.shape[1]))
event_roll = numpy.vstack((event_roll, padding))
return event_roll |
def update_visual_baseline(self):
"""Configure baseline directory after driver is created"""
# Update baseline with real platformVersion value
if '{PlatformVersion}' in self.baseline_name:
try:
platform_version = self.driver.desired_capabilities['platformVersion']
except KeyError:
platform_version = None
self.baseline_name = self.baseline_name.replace('{PlatformVersion}', str(platform_version))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
# Update baseline with real version value
if '{Version}' in self.baseline_name:
try:
splitted_version = self.driver.desired_capabilities['version'].split('.')
version = '.'.join(splitted_version[:2])
except KeyError:
version = None
self.baseline_name = self.baseline_name.replace('{Version}', str(version))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
# Update baseline with remote node value
if '{RemoteNode}' in self.baseline_name:
self.baseline_name = self.baseline_name.replace('{RemoteNode}', str(self.remote_node))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name) | Configure baseline directory after driver is created | Below is the the instruction that describes the task:
### Input:
Configure baseline directory after driver is created
### Response:
def update_visual_baseline(self):
"""Configure baseline directory after driver is created"""
# Update baseline with real platformVersion value
if '{PlatformVersion}' in self.baseline_name:
try:
platform_version = self.driver.desired_capabilities['platformVersion']
except KeyError:
platform_version = None
self.baseline_name = self.baseline_name.replace('{PlatformVersion}', str(platform_version))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
# Update baseline with real version value
if '{Version}' in self.baseline_name:
try:
splitted_version = self.driver.desired_capabilities['version'].split('.')
version = '.'.join(splitted_version[:2])
except KeyError:
version = None
self.baseline_name = self.baseline_name.replace('{Version}', str(version))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
# Update baseline with remote node value
if '{RemoteNode}' in self.baseline_name:
self.baseline_name = self.baseline_name.replace('{RemoteNode}', str(self.remote_node))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name) |
def json_obj_to_cursor(self, json):
"""(Deprecated) Converts a JSON object to a mongo db cursor
:param str json: A json string
:returns: dictionary with ObjectId type
:rtype: dict
"""
cursor = json_util.loads(json)
if "id" in json:
cursor["_id"] = ObjectId(cursor["id"])
del cursor["id"]
return cursor | (Deprecated) Converts a JSON object to a mongo db cursor
:param str json: A json string
:returns: dictionary with ObjectId type
:rtype: dict | Below is the the instruction that describes the task:
### Input:
(Deprecated) Converts a JSON object to a mongo db cursor
:param str json: A json string
:returns: dictionary with ObjectId type
:rtype: dict
### Response:
def json_obj_to_cursor(self, json):
"""(Deprecated) Converts a JSON object to a mongo db cursor
:param str json: A json string
:returns: dictionary with ObjectId type
:rtype: dict
"""
cursor = json_util.loads(json)
if "id" in json:
cursor["_id"] = ObjectId(cursor["id"])
del cursor["id"]
return cursor |
def start(self):
"""Start the consumer. This starts a listen loop on a zmq.PULL socket,
calling ``self.handle`` on each incoming request and pushing the response
on a zmq.PUSH socket back to the producer."""
if not self.initialized:
raise Exception("Consumer not initialized (no Producer).")
producer = self.producer
context = zmq._Context()
self.pull = context.socket(zmq.PULL)
self.push = context.socket(zmq.PUSH)
self.pull.connect('tcp://%s:%s' % (producer.host, producer.push_port))
self.push.connect('tcp://%s:%s' % (producer.host, producer.pull_port))
# TODO: notify the producer that this consumer's ready for work?
self.listen() | Start the consumer. This starts a listen loop on a zmq.PULL socket,
calling ``self.handle`` on each incoming request and pushing the response
on a zmq.PUSH socket back to the producer. | Below is the the instruction that describes the task:
### Input:
Start the consumer. This starts a listen loop on a zmq.PULL socket,
calling ``self.handle`` on each incoming request and pushing the response
on a zmq.PUSH socket back to the producer.
### Response:
def start(self):
"""Start the consumer. This starts a listen loop on a zmq.PULL socket,
calling ``self.handle`` on each incoming request and pushing the response
on a zmq.PUSH socket back to the producer."""
if not self.initialized:
raise Exception("Consumer not initialized (no Producer).")
producer = self.producer
context = zmq._Context()
self.pull = context.socket(zmq.PULL)
self.push = context.socket(zmq.PUSH)
self.pull.connect('tcp://%s:%s' % (producer.host, producer.push_port))
self.push.connect('tcp://%s:%s' % (producer.host, producer.pull_port))
# TODO: notify the producer that this consumer's ready for work?
self.listen() |
def put(self, instance, errors):
"""
Update a model instance.
:param instance: The model instance.
:param errors: Any errors.
:return: The updated model instance, or a dictionary of errors.
"""
if errors:
return self.errors(errors)
return self.updated(instance) | Update a model instance.
:param instance: The model instance.
:param errors: Any errors.
:return: The updated model instance, or a dictionary of errors. | Below is the the instruction that describes the task:
### Input:
Update a model instance.
:param instance: The model instance.
:param errors: Any errors.
:return: The updated model instance, or a dictionary of errors.
### Response:
def put(self, instance, errors):
"""
Update a model instance.
:param instance: The model instance.
:param errors: Any errors.
:return: The updated model instance, or a dictionary of errors.
"""
if errors:
return self.errors(errors)
return self.updated(instance) |
def is_alias_command(subcommands, args):
"""
Check if the user is invoking one of the comments in 'subcommands' in the from az alias .
Args:
subcommands: The list of subcommands to check through.
args: The CLI arguments to process.
Returns:
True if the user is invoking 'az alias {command}'.
"""
if not args:
return False
for subcommand in subcommands:
if args[:2] == ['alias', subcommand]:
return True
return False | Check if the user is invoking one of the comments in 'subcommands' in the from az alias .
Args:
subcommands: The list of subcommands to check through.
args: The CLI arguments to process.
Returns:
True if the user is invoking 'az alias {command}'. | Below is the the instruction that describes the task:
### Input:
Check if the user is invoking one of the comments in 'subcommands' in the from az alias .
Args:
subcommands: The list of subcommands to check through.
args: The CLI arguments to process.
Returns:
True if the user is invoking 'az alias {command}'.
### Response:
def is_alias_command(subcommands, args):
"""
Check if the user is invoking one of the comments in 'subcommands' in the from az alias .
Args:
subcommands: The list of subcommands to check through.
args: The CLI arguments to process.
Returns:
True if the user is invoking 'az alias {command}'.
"""
if not args:
return False
for subcommand in subcommands:
if args[:2] == ['alias', subcommand]:
return True
return False |
def importProfile(self, filename=''):
"""
Exports the current profile to a file.
:param filename | <str>
"""
if not (filename and isinstance(filename, basestring)):
filename = QtGui.QFileDialog.getOpenFileName(self,
'Import Layout from...',
QtCore.QDir.currentPath(),
'XView (*.xview)')
if type(filename) == tuple:
filename = nativestring(filename[0])
filename = nativestring(filename)
if not filename:
return
if not filename.endswith('.xview'):
filename += '.xview'
profile = XViewProfile.load(filename)
if not profile:
return
profile.restore(self) | Exports the current profile to a file.
:param filename | <str> | Below is the the instruction that describes the task:
### Input:
Exports the current profile to a file.
:param filename | <str>
### Response:
def importProfile(self, filename=''):
"""
Exports the current profile to a file.
:param filename | <str>
"""
if not (filename and isinstance(filename, basestring)):
filename = QtGui.QFileDialog.getOpenFileName(self,
'Import Layout from...',
QtCore.QDir.currentPath(),
'XView (*.xview)')
if type(filename) == tuple:
filename = nativestring(filename[0])
filename = nativestring(filename)
if not filename:
return
if not filename.endswith('.xview'):
filename += '.xview'
profile = XViewProfile.load(filename)
if not profile:
return
profile.restore(self) |
def copyto_at(self, n, src, m):
"""Copy to at a given location.
Parameters
----------
n: int
index where to copy
src: `ThetaParticles` object
source
m: int
index of the element to be copied
Note
----
Basically, does self[n] <- src[m]
"""
for k in self.containers:
self.__dict__[k][n] = src.__dict__[k][m] | Copy to at a given location.
Parameters
----------
n: int
index where to copy
src: `ThetaParticles` object
source
m: int
index of the element to be copied
Note
----
Basically, does self[n] <- src[m] | Below is the the instruction that describes the task:
### Input:
Copy to at a given location.
Parameters
----------
n: int
index where to copy
src: `ThetaParticles` object
source
m: int
index of the element to be copied
Note
----
Basically, does self[n] <- src[m]
### Response:
def copyto_at(self, n, src, m):
"""Copy to at a given location.
Parameters
----------
n: int
index where to copy
src: `ThetaParticles` object
source
m: int
index of the element to be copied
Note
----
Basically, does self[n] <- src[m]
"""
for k in self.containers:
self.__dict__[k][n] = src.__dict__[k][m] |
def get_template_attribute(template_name, attribute):
"""Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named `_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access
"""
return getattr(current_app.jinja_env.get_template(template_name).module,
attribute) | Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named `_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access | Below is the the instruction that describes the task:
### Input:
Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named `_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access
### Response:
def get_template_attribute(template_name, attribute):
"""Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named `_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access
"""
return getattr(current_app.jinja_env.get_template(template_name).module,
attribute) |
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr | Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>'] | Below is the the instruction that describes the task:
### Input:
Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
### Response:
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr |
def normalizeName(self,in_name):
"""
Takes in an object and normalizes it to its name/string.
Currently, only integers and strings may be passed in, else a :py:exc:`TypeError`
will be thrown.
"""
if isinstance(in_name,str):
assert in_name in self._data["reg"].inv
return in_name
elif isinstance(in_name,int):
assert in_name in self._data["reg"]
return self._data["reg"][in_name]
else:
raise TypeError("Only int and str can be converted to names") | Takes in an object and normalizes it to its name/string.
Currently, only integers and strings may be passed in, else a :py:exc:`TypeError`
will be thrown. | Below is the the instruction that describes the task:
### Input:
Takes in an object and normalizes it to its name/string.
Currently, only integers and strings may be passed in, else a :py:exc:`TypeError`
will be thrown.
### Response:
def normalizeName(self,in_name):
"""
Takes in an object and normalizes it to its name/string.
Currently, only integers and strings may be passed in, else a :py:exc:`TypeError`
will be thrown.
"""
if isinstance(in_name,str):
assert in_name in self._data["reg"].inv
return in_name
elif isinstance(in_name,int):
assert in_name in self._data["reg"]
return self._data["reg"][in_name]
else:
raise TypeError("Only int and str can be converted to names") |
def get_dns_servers(interface='Local Area Connection'):
'''
Return a list of the configured DNS servers of the specified interface
CLI Example:
.. code-block:: bash
salt '*' win_dns_client.get_dns_servers 'Local Area Connection'
'''
# remove any escape characters
interface = interface.split('\\')
interface = ''.join(interface)
with salt.utils.winapi.Com():
c = wmi.WMI()
for iface in c.Win32_NetworkAdapter(NetEnabled=True):
if interface == iface.NetConnectionID:
iface_config = c.Win32_NetworkAdapterConfiguration(Index=iface.Index).pop()
try:
return list(iface_config.DNSServerSearchOrder)
except TypeError:
return []
log.debug('Interface "%s" not found', interface)
return False | Return a list of the configured DNS servers of the specified interface
CLI Example:
.. code-block:: bash
salt '*' win_dns_client.get_dns_servers 'Local Area Connection' | Below is the the instruction that describes the task:
### Input:
Return a list of the configured DNS servers of the specified interface
CLI Example:
.. code-block:: bash
salt '*' win_dns_client.get_dns_servers 'Local Area Connection'
### Response:
def get_dns_servers(interface='Local Area Connection'):
'''
Return a list of the configured DNS servers of the specified interface
CLI Example:
.. code-block:: bash
salt '*' win_dns_client.get_dns_servers 'Local Area Connection'
'''
# remove any escape characters
interface = interface.split('\\')
interface = ''.join(interface)
with salt.utils.winapi.Com():
c = wmi.WMI()
for iface in c.Win32_NetworkAdapter(NetEnabled=True):
if interface == iface.NetConnectionID:
iface_config = c.Win32_NetworkAdapterConfiguration(Index=iface.Index).pop()
try:
return list(iface_config.DNSServerSearchOrder)
except TypeError:
return []
log.debug('Interface "%s" not found', interface)
return False |
def filter_host_by_tag(tpl):
"""Filter for host
Filter on tag
:param tpl: tag to filter
:type tpl: str
:return: Filter
:rtype: bool
"""
def inner_filter(items):
"""Inner filter for host. Accept if tag in host.tags"""
host = items["host"]
if host is None:
return False
return tpl in [t.strip() for t in host.tags]
return inner_filter | Filter for host
Filter on tag
:param tpl: tag to filter
:type tpl: str
:return: Filter
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Filter for host
Filter on tag
:param tpl: tag to filter
:type tpl: str
:return: Filter
:rtype: bool
### Response:
def filter_host_by_tag(tpl):
"""Filter for host
Filter on tag
:param tpl: tag to filter
:type tpl: str
:return: Filter
:rtype: bool
"""
def inner_filter(items):
"""Inner filter for host. Accept if tag in host.tags"""
host = items["host"]
if host is None:
return False
return tpl in [t.strip() for t in host.tags]
return inner_filter |
def geo_length(arg, use_spheroid=None):
"""
Compute length of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid : default None
Returns
-------
length : double scalar
"""
op = ops.GeoLength(arg, use_spheroid)
return op.to_expr() | Compute length of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid : default None
Returns
-------
length : double scalar | Below is the the instruction that describes the task:
### Input:
Compute length of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid : default None
Returns
-------
length : double scalar
### Response:
def geo_length(arg, use_spheroid=None):
"""
Compute length of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid : default None
Returns
-------
length : double scalar
"""
op = ops.GeoLength(arg, use_spheroid)
return op.to_expr() |
def _wait_for_disk_threads(self, terminate):
# type: (Uploader, bool) -> None
"""Wait for disk threads
:param Uploader self: this
:param bool terminate: terminate threads
"""
if terminate:
self._upload_terminate = terminate
for thr in self._disk_threads:
thr.join() | Wait for disk threads
:param Uploader self: this
:param bool terminate: terminate threads | Below is the the instruction that describes the task:
### Input:
Wait for disk threads
:param Uploader self: this
:param bool terminate: terminate threads
### Response:
def _wait_for_disk_threads(self, terminate):
# type: (Uploader, bool) -> None
"""Wait for disk threads
:param Uploader self: this
:param bool terminate: terminate threads
"""
if terminate:
self._upload_terminate = terminate
for thr in self._disk_threads:
thr.join() |
def image_get(fingerprint,
remote_addr=None,
cert=None,
key=None,
verify_cert=True,
_raw=False):
''' Get an image by its fingerprint
fingerprint :
The fingerprint of the image to retrieve
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
_raw : False
Return the raw pylxd object or a dict of it?
CLI Examples:
..code-block:: bash
$ salt '*' lxd.image_get <fingerprint>
'''
client = pylxd_client_get(remote_addr, cert, key, verify_cert)
image = None
try:
image = client.images.get(fingerprint)
except pylxd.exceptions.LXDAPIException:
raise SaltInvocationError(
'Image with fingerprint \'{0}\' not found'.format(fingerprint)
)
if _raw:
return image
return _pylxd_model_to_dict(image) | Get an image by its fingerprint
fingerprint :
The fingerprint of the image to retrieve
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
_raw : False
Return the raw pylxd object or a dict of it?
CLI Examples:
..code-block:: bash
$ salt '*' lxd.image_get <fingerprint> | Below is the the instruction that describes the task:
### Input:
Get an image by its fingerprint
fingerprint :
The fingerprint of the image to retrieve
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
_raw : False
Return the raw pylxd object or a dict of it?
CLI Examples:
..code-block:: bash
$ salt '*' lxd.image_get <fingerprint>
### Response:
def image_get(fingerprint,
remote_addr=None,
cert=None,
key=None,
verify_cert=True,
_raw=False):
''' Get an image by its fingerprint
fingerprint :
The fingerprint of the image to retrieve
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
_raw : False
Return the raw pylxd object or a dict of it?
CLI Examples:
..code-block:: bash
$ salt '*' lxd.image_get <fingerprint>
'''
client = pylxd_client_get(remote_addr, cert, key, verify_cert)
image = None
try:
image = client.images.get(fingerprint)
except pylxd.exceptions.LXDAPIException:
raise SaltInvocationError(
'Image with fingerprint \'{0}\' not found'.format(fingerprint)
)
if _raw:
return image
return _pylxd_model_to_dict(image) |
def email_action_view(self, id, action):
""" Perform action 'action' on UserEmail object 'id'
"""
# Retrieve UserEmail by id
user_email = self.db_manager.get_user_email_by_id(id=id)
# Users may only change their own UserEmails
if not user_email or user_email.user_id != current_user.id:
return self.unauthorized_view()
# Delete UserEmail
if action == 'delete':
# Primary UserEmail can not be deleted
if user_email.is_primary:
return self.unauthorized_view()
# Delete UserEmail
self.db_manager.delete_object(user_email)
self.db_manager.commit()
# Set UserEmail.is_primary
elif action == 'make-primary':
# Disable previously primary emails
user_emails = self.db_manager.find_user_emails(current_user)
for other_user_email in user_emails:
if other_user_email.is_primary:
other_user_email.is_primary=False
self.db_manager.save_object(other_user_email)
# Enable current primary email
user_email.is_primary=True
self.db_manager.save_object(user_email)
self.db_manager.commit()
# Send confirm email
elif action == 'confirm':
self._send_confirm_email_email(user_email.user, user_email)
else:
return self.unauthorized_view()
return redirect(url_for('user.manage_emails')) | Perform action 'action' on UserEmail object 'id' | Below is the the instruction that describes the task:
### Input:
Perform action 'action' on UserEmail object 'id'
### Response:
def email_action_view(self, id, action):
""" Perform action 'action' on UserEmail object 'id'
"""
# Retrieve UserEmail by id
user_email = self.db_manager.get_user_email_by_id(id=id)
# Users may only change their own UserEmails
if not user_email or user_email.user_id != current_user.id:
return self.unauthorized_view()
# Delete UserEmail
if action == 'delete':
# Primary UserEmail can not be deleted
if user_email.is_primary:
return self.unauthorized_view()
# Delete UserEmail
self.db_manager.delete_object(user_email)
self.db_manager.commit()
# Set UserEmail.is_primary
elif action == 'make-primary':
# Disable previously primary emails
user_emails = self.db_manager.find_user_emails(current_user)
for other_user_email in user_emails:
if other_user_email.is_primary:
other_user_email.is_primary=False
self.db_manager.save_object(other_user_email)
# Enable current primary email
user_email.is_primary=True
self.db_manager.save_object(user_email)
self.db_manager.commit()
# Send confirm email
elif action == 'confirm':
self._send_confirm_email_email(user_email.user, user_email)
else:
return self.unauthorized_view()
return redirect(url_for('user.manage_emails')) |
def refresh(self):
"""
Refresh this objects attributes to the newest values.
Attributes that weren't added to the object before, due to lazy
loading, will be added by calling refresh.
"""
resp = self._imgur._send_request(self._INFO_URL)
self._populate(resp)
self._has_fetched = True | Refresh this objects attributes to the newest values.
Attributes that weren't added to the object before, due to lazy
loading, will be added by calling refresh. | Below is the the instruction that describes the task:
### Input:
Refresh this objects attributes to the newest values.
Attributes that weren't added to the object before, due to lazy
loading, will be added by calling refresh.
### Response:
def refresh(self):
"""
Refresh this objects attributes to the newest values.
Attributes that weren't added to the object before, due to lazy
loading, will be added by calling refresh.
"""
resp = self._imgur._send_request(self._INFO_URL)
self._populate(resp)
self._has_fetched = True |
def cache_json(filename):
"""Caches the JSON-serializable output of the function to a given file
Args:
filename (str) The filename (sans directory) to store the output
Returns: decorator, applicable to a function that produces JSON-serializable output
"""
def cache_decorator(cacheable_function):
@wraps(cacheable_function)
def cache_wrapper(*args, **kwargs):
path = CACHE_DIRECTORY + filename
check_create_folder(path)
if os.path.exists(path):
with open(path) as infile:
return json.load(infile)
else:
function_output = cacheable_function(*args, **kwargs)
with open(path, 'w') as outfile:
json.dump(function_output, outfile)
return function_output
return cache_wrapper
return cache_decorator | Caches the JSON-serializable output of the function to a given file
Args:
filename (str) The filename (sans directory) to store the output
Returns: decorator, applicable to a function that produces JSON-serializable output | Below is the the instruction that describes the task:
### Input:
Caches the JSON-serializable output of the function to a given file
Args:
filename (str) The filename (sans directory) to store the output
Returns: decorator, applicable to a function that produces JSON-serializable output
### Response:
def cache_json(filename):
"""Caches the JSON-serializable output of the function to a given file
Args:
filename (str) The filename (sans directory) to store the output
Returns: decorator, applicable to a function that produces JSON-serializable output
"""
def cache_decorator(cacheable_function):
@wraps(cacheable_function)
def cache_wrapper(*args, **kwargs):
path = CACHE_DIRECTORY + filename
check_create_folder(path)
if os.path.exists(path):
with open(path) as infile:
return json.load(infile)
else:
function_output = cacheable_function(*args, **kwargs)
with open(path, 'w') as outfile:
json.dump(function_output, outfile)
return function_output
return cache_wrapper
return cache_decorator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.