code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _send_request(self):
"""Send a message containing the RPC method call
"""
msg = Message()
msg.subject = "An RPC call!"
msg.address = self._to
msg.reply_to = self._reply_to
msg.body = self._method
msg.correlation_id = 5 # whatever...
print("sending RPC call request: %s" % str(self._method))
# @todo send timeout self._sender.send(msg, self, None, time.time() +
# 10)
self._sender.send(msg, self) | Send a message containing the RPC method call | Below is the the instruction that describes the task:
### Input:
Send a message containing the RPC method call
### Response:
def _send_request(self):
"""Send a message containing the RPC method call
"""
msg = Message()
msg.subject = "An RPC call!"
msg.address = self._to
msg.reply_to = self._reply_to
msg.body = self._method
msg.correlation_id = 5 # whatever...
print("sending RPC call request: %s" % str(self._method))
# @todo send timeout self._sender.send(msg, self, None, time.time() +
# 10)
self._sender.send(msg, self) |
def dense_convolutional_network(units: tf.Tensor,
n_hidden_list: List,
filter_width=3,
use_dilation=False,
use_batch_norm=False,
training_ph=None):
""" Densely connected convolutional layers. Based on the paper:
[Gao 17] https://arxiv.org/abs/1608.06993
Args:
units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]
n_hidden_list: list with number of hidden units at the ouput of each layer
filter_width: width of the kernel in tokens
use_batch_norm: whether to use batch normalization between layers
use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ...
training_ph: boolean placeholder determining whether is training phase now or not.
It is used only for batch normalization to determine whether to use
current batch average (std) or memory stored average (std)
Returns:
units: tensor at the output of the last convolutional layer
with dimensionality [None, n_tokens, n_hidden_list[-1]]
"""
units_list = [units]
for n_layer, n_filters in enumerate(n_hidden_list):
total_units = tf.concat(units_list, axis=-1)
if use_dilation:
dilation_rate = 2 ** n_layer
else:
dilation_rate = 1
units = tf.layers.conv1d(total_units,
n_filters,
filter_width,
dilation_rate=dilation_rate,
padding='same',
kernel_initializer=INITIALIZER())
if use_batch_norm:
units = tf.layers.batch_normalization(units, training=training_ph)
units = tf.nn.relu(units)
units_list.append(units)
return units | Densely connected convolutional layers. Based on the paper:
[Gao 17] https://arxiv.org/abs/1608.06993
Args:
units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]
n_hidden_list: list with number of hidden units at the ouput of each layer
filter_width: width of the kernel in tokens
use_batch_norm: whether to use batch normalization between layers
use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ...
training_ph: boolean placeholder determining whether is training phase now or not.
It is used only for batch normalization to determine whether to use
current batch average (std) or memory stored average (std)
Returns:
units: tensor at the output of the last convolutional layer
with dimensionality [None, n_tokens, n_hidden_list[-1]] | Below is the the instruction that describes the task:
### Input:
Densely connected convolutional layers. Based on the paper:
[Gao 17] https://arxiv.org/abs/1608.06993
Args:
units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]
n_hidden_list: list with number of hidden units at the ouput of each layer
filter_width: width of the kernel in tokens
use_batch_norm: whether to use batch normalization between layers
use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ...
training_ph: boolean placeholder determining whether is training phase now or not.
It is used only for batch normalization to determine whether to use
current batch average (std) or memory stored average (std)
Returns:
units: tensor at the output of the last convolutional layer
with dimensionality [None, n_tokens, n_hidden_list[-1]]
### Response:
def dense_convolutional_network(units: tf.Tensor,
n_hidden_list: List,
filter_width=3,
use_dilation=False,
use_batch_norm=False,
training_ph=None):
""" Densely connected convolutional layers. Based on the paper:
[Gao 17] https://arxiv.org/abs/1608.06993
Args:
units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]
n_hidden_list: list with number of hidden units at the ouput of each layer
filter_width: width of the kernel in tokens
use_batch_norm: whether to use batch normalization between layers
use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ...
training_ph: boolean placeholder determining whether is training phase now or not.
It is used only for batch normalization to determine whether to use
current batch average (std) or memory stored average (std)
Returns:
units: tensor at the output of the last convolutional layer
with dimensionality [None, n_tokens, n_hidden_list[-1]]
"""
units_list = [units]
for n_layer, n_filters in enumerate(n_hidden_list):
total_units = tf.concat(units_list, axis=-1)
if use_dilation:
dilation_rate = 2 ** n_layer
else:
dilation_rate = 1
units = tf.layers.conv1d(total_units,
n_filters,
filter_width,
dilation_rate=dilation_rate,
padding='same',
kernel_initializer=INITIALIZER())
if use_batch_norm:
units = tf.layers.batch_normalization(units, training=training_ph)
units = tf.nn.relu(units)
units_list.append(units)
return units |
def calc_std_mod_reduc(mod_reduc):
"""Calculate the standard deviation as a function of G/G_max.
Equation 7.29 from Darendeli (2001).
Parameters
----------
mod_reduc : array_like
Modulus reduction values.
Returns
-------
std : :class:`numpy.ndarray`
Standard deviation.
"""
mod_reduc = np.asarray(mod_reduc).astype(float)
std = (np.exp(-4.23) + np.sqrt(0.25 / np.exp(3.62) - (mod_reduc - 0.5)
** 2 / np.exp(3.62)))
return std | Calculate the standard deviation as a function of G/G_max.
Equation 7.29 from Darendeli (2001).
Parameters
----------
mod_reduc : array_like
Modulus reduction values.
Returns
-------
std : :class:`numpy.ndarray`
Standard deviation. | Below is the the instruction that describes the task:
### Input:
Calculate the standard deviation as a function of G/G_max.
Equation 7.29 from Darendeli (2001).
Parameters
----------
mod_reduc : array_like
Modulus reduction values.
Returns
-------
std : :class:`numpy.ndarray`
Standard deviation.
### Response:
def calc_std_mod_reduc(mod_reduc):
"""Calculate the standard deviation as a function of G/G_max.
Equation 7.29 from Darendeli (2001).
Parameters
----------
mod_reduc : array_like
Modulus reduction values.
Returns
-------
std : :class:`numpy.ndarray`
Standard deviation.
"""
mod_reduc = np.asarray(mod_reduc).astype(float)
std = (np.exp(-4.23) + np.sqrt(0.25 / np.exp(3.62) - (mod_reduc - 0.5)
** 2 / np.exp(3.62)))
return std |
def create(self, ignore=None):
"""Yield tuple with created index name and responses from a client."""
ignore = ignore or []
def _create(tree_or_filename, alias=None):
"""Create indices and aliases by walking DFS."""
# Iterate over aliases:
for name, value in tree_or_filename.items():
if isinstance(value, dict):
for result in _create(value, alias=name):
yield result
else:
with open(value, 'r') as body:
yield name, self.client.indices.create(
index=name,
body=json.load(body),
ignore=ignore,
)
if alias:
yield alias, self.client.indices.put_alias(
index=list(_get_indices(tree_or_filename)),
name=alias,
ignore=ignore,
)
for result in _create(self.active_aliases):
yield result | Yield tuple with created index name and responses from a client. | Below is the the instruction that describes the task:
### Input:
Yield tuple with created index name and responses from a client.
### Response:
def create(self, ignore=None):
"""Yield tuple with created index name and responses from a client."""
ignore = ignore or []
def _create(tree_or_filename, alias=None):
"""Create indices and aliases by walking DFS."""
# Iterate over aliases:
for name, value in tree_or_filename.items():
if isinstance(value, dict):
for result in _create(value, alias=name):
yield result
else:
with open(value, 'r') as body:
yield name, self.client.indices.create(
index=name,
body=json.load(body),
ignore=ignore,
)
if alias:
yield alias, self.client.indices.put_alias(
index=list(_get_indices(tree_or_filename)),
name=alias,
ignore=ignore,
)
for result in _create(self.active_aliases):
yield result |
def _restore_clipboard_text(self, backup: str):
"""Restore the clipboard content."""
# Pasting takes some time, so wait a bit before restoring the content. Otherwise the restore is done before
# the pasting happens, causing the backup to be pasted instead of the desired clipboard content.
time.sleep(0.2)
self.clipboard.text = backup if backup is not None else "" | Restore the clipboard content. | Below is the the instruction that describes the task:
### Input:
Restore the clipboard content.
### Response:
def _restore_clipboard_text(self, backup: str):
"""Restore the clipboard content."""
# Pasting takes some time, so wait a bit before restoring the content. Otherwise the restore is done before
# the pasting happens, causing the backup to be pasted instead of the desired clipboard content.
time.sleep(0.2)
self.clipboard.text = backup if backup is not None else "" |
def get_grade_entry_admin_session(self):
"""Gets the ``OsidSession`` associated with the grade entry administration service.
return: (osid.grading.GradeEntryAdminSession) - a
``GradeEntryAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` is ``true``.*
"""
if not self.supports_grade_entry_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.GradeEntryAdminSession(runtime=self._runtime) | Gets the ``OsidSession`` associated with the grade entry administration service.
return: (osid.grading.GradeEntryAdminSession) - a
``GradeEntryAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the grade entry administration service.
return: (osid.grading.GradeEntryAdminSession) - a
``GradeEntryAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` is ``true``.*
### Response:
def get_grade_entry_admin_session(self):
"""Gets the ``OsidSession`` associated with the grade entry administration service.
return: (osid.grading.GradeEntryAdminSession) - a
``GradeEntryAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` is ``true``.*
"""
if not self.supports_grade_entry_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.GradeEntryAdminSession(runtime=self._runtime) |
def jdn_to_gdate(jdn):
"""
Convert from the Julian day to the Gregorian day.
Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer.
Return: day, month, year
"""
# pylint: disable=invalid-name
# The algorithm is a verbatim copy from Peter Meyer's article
# No explanation in the article is given for the variables
# Hence the exceptions for pylint and for flake8 (E741)
l = jdn + 68569 # noqa: E741
n = (4 * l) // 146097
l = l - (146097 * n + 3) // 4 # noqa: E741
i = (4000 * (l + 1)) // 1461001 # that's 1,461,001
l = l - (1461 * i) // 4 + 31 # noqa: E741
j = (80 * l) // 2447
day = l - (2447 * j) // 80
l = j // 11 # noqa: E741
month = j + 2 - (12 * l)
year = 100 * (n - 49) + i + l # that's a lower-case L
return datetime.date(year, month, day) | Convert from the Julian day to the Gregorian day.
Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer.
Return: day, month, year | Below is the the instruction that describes the task:
### Input:
Convert from the Julian day to the Gregorian day.
Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer.
Return: day, month, year
### Response:
def jdn_to_gdate(jdn):
"""
Convert from the Julian day to the Gregorian day.
Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer.
Return: day, month, year
"""
# pylint: disable=invalid-name
# The algorithm is a verbatim copy from Peter Meyer's article
# No explanation in the article is given for the variables
# Hence the exceptions for pylint and for flake8 (E741)
l = jdn + 68569 # noqa: E741
n = (4 * l) // 146097
l = l - (146097 * n + 3) // 4 # noqa: E741
i = (4000 * (l + 1)) // 1461001 # that's 1,461,001
l = l - (1461 * i) // 4 + 31 # noqa: E741
j = (80 * l) // 2447
day = l - (2447 * j) // 80
l = j // 11 # noqa: E741
month = j + 2 - (12 * l)
year = 100 * (n - 49) + i + l # that's a lower-case L
return datetime.date(year, month, day) |
def merge(args):
"""
%prog merge protein-quartets registry LOST
Merge protein quartets table with dna quartets registry. This is specific
to the napus project.
"""
from jcvi.formats.base import DictFile
p = OptionParser(merge.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
quartets, registry, lost = args
qq = DictFile(registry, keypos=1, valuepos=3)
lost = DictFile(lost, keypos=1, valuepos=0, delimiter='|')
qq.update(lost)
fp = open(quartets)
cases = {
"AN,CN": 4,
"BO,AN,CN": 8,
"BO,CN": 2,
"BR,AN": 1,
"BR,AN,CN": 6,
"BR,BO": 3,
"BR,BO,AN": 5,
"BR,BO,AN,CN": 9,
"BR,BO,CN": 7,
}
ip = {
"syntenic_model": "Syntenic_model_excluded_by_OMG",
"complete": "Predictable",
"partial": "Truncated",
"pseudogene": "Pseudogene",
"random": "Match_random",
"real_ns": "Transposed",
"gmap_fail": "GMAP_fail",
"AN LOST": "AN_LOST",
"CN LOST": "CN_LOST",
"BR LOST": "BR_LOST",
"BO LOST": "BO_LOST",
"outside": "Outside_synteny_blocks",
"[NF]": "Not_found",
}
for row in fp:
atoms = row.strip().split("\t")
genes = atoms[:4]
tag = atoms[4]
a, b, c, d = [qq.get(x, ".").rsplit("-", 1)[-1] for x in genes]
qqs = [c, d, a, b]
for i, q in enumerate(qqs):
if atoms[i] != '.':
qqs[i] = "syntenic_model"
# Make comment
comment = "Case{0}".format(cases[tag])
dots = sum([1 for x in genes if x == '.'])
if dots == 1:
idx = genes.index(".")
status = qqs[idx]
status = ip[status]
comment += "-" + status
print(row.strip() + "\t" + "\t".join(qqs + [comment])) | %prog merge protein-quartets registry LOST
Merge protein quartets table with dna quartets registry. This is specific
to the napus project. | Below is the the instruction that describes the task:
### Input:
%prog merge protein-quartets registry LOST
Merge protein quartets table with dna quartets registry. This is specific
to the napus project.
### Response:
def merge(args):
"""
%prog merge protein-quartets registry LOST
Merge protein quartets table with dna quartets registry. This is specific
to the napus project.
"""
from jcvi.formats.base import DictFile
p = OptionParser(merge.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
quartets, registry, lost = args
qq = DictFile(registry, keypos=1, valuepos=3)
lost = DictFile(lost, keypos=1, valuepos=0, delimiter='|')
qq.update(lost)
fp = open(quartets)
cases = {
"AN,CN": 4,
"BO,AN,CN": 8,
"BO,CN": 2,
"BR,AN": 1,
"BR,AN,CN": 6,
"BR,BO": 3,
"BR,BO,AN": 5,
"BR,BO,AN,CN": 9,
"BR,BO,CN": 7,
}
ip = {
"syntenic_model": "Syntenic_model_excluded_by_OMG",
"complete": "Predictable",
"partial": "Truncated",
"pseudogene": "Pseudogene",
"random": "Match_random",
"real_ns": "Transposed",
"gmap_fail": "GMAP_fail",
"AN LOST": "AN_LOST",
"CN LOST": "CN_LOST",
"BR LOST": "BR_LOST",
"BO LOST": "BO_LOST",
"outside": "Outside_synteny_blocks",
"[NF]": "Not_found",
}
for row in fp:
atoms = row.strip().split("\t")
genes = atoms[:4]
tag = atoms[4]
a, b, c, d = [qq.get(x, ".").rsplit("-", 1)[-1] for x in genes]
qqs = [c, d, a, b]
for i, q in enumerate(qqs):
if atoms[i] != '.':
qqs[i] = "syntenic_model"
# Make comment
comment = "Case{0}".format(cases[tag])
dots = sum([1 for x in genes if x == '.'])
if dots == 1:
idx = genes.index(".")
status = qqs[idx]
status = ip[status]
comment += "-" + status
print(row.strip() + "\t" + "\t".join(qqs + [comment])) |
def shell(self, term='xterm'):
""" Start an interactive shell session
This method invokes a shell on the remote SSH server and proxies
traffic to/from both peers.
You must connect to a SSH server using ssh_connect()
prior to starting the session.
"""
channel = self._ssh.invoke_shell(term)
self._bridge(channel)
channel.close() | Start an interactive shell session
This method invokes a shell on the remote SSH server and proxies
traffic to/from both peers.
You must connect to a SSH server using ssh_connect()
prior to starting the session. | Below is the the instruction that describes the task:
### Input:
Start an interactive shell session
This method invokes a shell on the remote SSH server and proxies
traffic to/from both peers.
You must connect to a SSH server using ssh_connect()
prior to starting the session.
### Response:
def shell(self, term='xterm'):
""" Start an interactive shell session
This method invokes a shell on the remote SSH server and proxies
traffic to/from both peers.
You must connect to a SSH server using ssh_connect()
prior to starting the session.
"""
channel = self._ssh.invoke_shell(term)
self._bridge(channel)
channel.close() |
def lookup_controller(obj, remainder, request=None):
'''
Traverses the requested url path and returns the appropriate controller
object, including default routes.
Handles common errors gracefully.
'''
if request is None:
warnings.warn(
(
"The function signature for %s.lookup_controller is changing "
"in the next version of pecan.\nPlease update to: "
"`lookup_controller(self, obj, remainder, request)`." % (
__name__,
)
),
DeprecationWarning
)
notfound_handlers = []
while True:
try:
obj, remainder = find_object(obj, remainder, notfound_handlers,
request)
handle_security(obj)
return obj, remainder
except (exc.HTTPNotFound, exc.HTTPMethodNotAllowed,
PecanNotFound) as e:
if isinstance(e, PecanNotFound):
e = exc.HTTPNotFound()
while notfound_handlers:
name, obj, remainder = notfound_handlers.pop()
if name == '_default':
# Notfound handler is, in fact, a controller, so stop
# traversal
return obj, remainder
else:
# Notfound handler is an internal redirect, so continue
# traversal
result = handle_lookup_traversal(obj, remainder)
if result:
# If no arguments are passed to the _lookup, yet the
# argspec requires at least one, raise a 404
if (
remainder == [''] and
len(obj._pecan['argspec'].args) > 1
):
raise e
obj_, remainder_ = result
return lookup_controller(obj_, remainder_, request)
else:
raise e | Traverses the requested url path and returns the appropriate controller
object, including default routes.
Handles common errors gracefully. | Below is the the instruction that describes the task:
### Input:
Traverses the requested url path and returns the appropriate controller
object, including default routes.
Handles common errors gracefully.
### Response:
def lookup_controller(obj, remainder, request=None):
'''
Traverses the requested url path and returns the appropriate controller
object, including default routes.
Handles common errors gracefully.
'''
if request is None:
warnings.warn(
(
"The function signature for %s.lookup_controller is changing "
"in the next version of pecan.\nPlease update to: "
"`lookup_controller(self, obj, remainder, request)`." % (
__name__,
)
),
DeprecationWarning
)
notfound_handlers = []
while True:
try:
obj, remainder = find_object(obj, remainder, notfound_handlers,
request)
handle_security(obj)
return obj, remainder
except (exc.HTTPNotFound, exc.HTTPMethodNotAllowed,
PecanNotFound) as e:
if isinstance(e, PecanNotFound):
e = exc.HTTPNotFound()
while notfound_handlers:
name, obj, remainder = notfound_handlers.pop()
if name == '_default':
# Notfound handler is, in fact, a controller, so stop
# traversal
return obj, remainder
else:
# Notfound handler is an internal redirect, so continue
# traversal
result = handle_lookup_traversal(obj, remainder)
if result:
# If no arguments are passed to the _lookup, yet the
# argspec requires at least one, raise a 404
if (
remainder == [''] and
len(obj._pecan['argspec'].args) > 1
):
raise e
obj_, remainder_ = result
return lookup_controller(obj_, remainder_, request)
else:
raise e |
def add_event(self, event):
"""
Add a new event and notify subscribers.
event -- the event that occurred
"""
self.events.append(event)
self.event_notify(event) | Add a new event and notify subscribers.
event -- the event that occurred | Below is the the instruction that describes the task:
### Input:
Add a new event and notify subscribers.
event -- the event that occurred
### Response:
def add_event(self, event):
"""
Add a new event and notify subscribers.
event -- the event that occurred
"""
self.events.append(event)
self.event_notify(event) |
def __xinclude_lxml(target, source, env):
"""
Resolving XIncludes, using the lxml module.
"""
from lxml import etree
doc = etree.parse(str(source[0]))
doc.xinclude()
try:
doc.write(str(target[0]), xml_declaration=True,
encoding="UTF-8", pretty_print=True)
except:
pass
return None | Resolving XIncludes, using the lxml module. | Below is the the instruction that describes the task:
### Input:
Resolving XIncludes, using the lxml module.
### Response:
def __xinclude_lxml(target, source, env):
"""
Resolving XIncludes, using the lxml module.
"""
from lxml import etree
doc = etree.parse(str(source[0]))
doc.xinclude()
try:
doc.write(str(target[0]), xml_declaration=True,
encoding="UTF-8", pretty_print=True)
except:
pass
return None |
def _get_adj_list_directional(self, umis, counts):
''' identify all umis within the hamming distance threshold
and where the counts of the first umi is > (2 * second umi counts)-1'''
adj_list = {umi: [] for umi in umis}
if self.fuzzy_match:
for umi1 in umis:
# we need a second regex for some insertions,
# e.g UMI1 = "ATCG", UMI2 = "ATTC"
comp_regex_err = regex.compile("(%s){e<=1}" % str(umi1))
comp_regex_del = regex.compile("(%s){i<=1}" % str(umi1)[::-1])
for umi2 in umis:
if umi1 == umi2:
continue
if counts[umi1] >= (counts[umi2]*self.dir_threshold):
if (max(len(umi1), len(umi2)) -
min(len(umi1), len(umi2))) > 1:
continue
if (comp_regex_err.match(str(umi2)) or
comp_regex_del.match(str(umi2))):
adj_list[umi1].append(umi2)
else:
for umi1, umi2 in itertools.combinations(umis, 2):
if edit_distance(umi1, umi2) <= 1:
if counts[umi1] >= (counts[umi2]*2)-1:
adj_list[umi1].append(umi2)
if counts[umi2] >= (counts[umi1]*2)-1:
adj_list[umi2].append(umi1)
return adj_list | identify all umis within the hamming distance threshold
and where the counts of the first umi is > (2 * second umi counts)-1 | Below is the the instruction that describes the task:
### Input:
identify all umis within the hamming distance threshold
and where the counts of the first umi is > (2 * second umi counts)-1
### Response:
def _get_adj_list_directional(self, umis, counts):
''' identify all umis within the hamming distance threshold
and where the counts of the first umi is > (2 * second umi counts)-1'''
adj_list = {umi: [] for umi in umis}
if self.fuzzy_match:
for umi1 in umis:
# we need a second regex for some insertions,
# e.g UMI1 = "ATCG", UMI2 = "ATTC"
comp_regex_err = regex.compile("(%s){e<=1}" % str(umi1))
comp_regex_del = regex.compile("(%s){i<=1}" % str(umi1)[::-1])
for umi2 in umis:
if umi1 == umi2:
continue
if counts[umi1] >= (counts[umi2]*self.dir_threshold):
if (max(len(umi1), len(umi2)) -
min(len(umi1), len(umi2))) > 1:
continue
if (comp_regex_err.match(str(umi2)) or
comp_regex_del.match(str(umi2))):
adj_list[umi1].append(umi2)
else:
for umi1, umi2 in itertools.combinations(umis, 2):
if edit_distance(umi1, umi2) <= 1:
if counts[umi1] >= (counts[umi2]*2)-1:
adj_list[umi1].append(umi2)
if counts[umi2] >= (counts[umi1]*2)-1:
adj_list[umi2].append(umi1)
return adj_list |
def __generate_location(self):
"""
Reset the location of the cloud once it has left the viewable area of the screen.
"""
screen_width = world.get_backbuffer_size().X
self.movement_speed = random.randrange(10, 25)
# This line of code places the cloud to the right of the viewable screen, so it appears to
# gradually move in from the right instead of randomally appearing on some portion of the viewable
# window.
self.coords = R.Vector2(screen_width + self.image.get_width(), random.randrange(0, 100)) | Reset the location of the cloud once it has left the viewable area of the screen. | Below is the the instruction that describes the task:
### Input:
Reset the location of the cloud once it has left the viewable area of the screen.
### Response:
def __generate_location(self):
"""
Reset the location of the cloud once it has left the viewable area of the screen.
"""
screen_width = world.get_backbuffer_size().X
self.movement_speed = random.randrange(10, 25)
# This line of code places the cloud to the right of the viewable screen, so it appears to
# gradually move in from the right instead of randomally appearing on some portion of the viewable
# window.
self.coords = R.Vector2(screen_width + self.image.get_width(), random.randrange(0, 100)) |
def write_rtt(jlink):
"""Writes kayboard input to JLink RTT buffer #0.
This method is a loop that blocks waiting on stdin. When enter is pressed,
LF and NUL bytes are added to the input and transmitted as a byte list.
If the JLink is disconnected, it will exit gracefully. If any other
exceptions are raised, they will be caught and re-raised after interrupting
the main thread.
Args:
jlink (pylink.JLink): The JLink to write to.
Raises:
Exception on error.
"""
try:
while jlink.connected():
bytes = list(bytearray(input(), "utf-8") + b"\x0A\x00")
bytes_written = jlink.rtt_write(0, bytes)
except Exception:
print("IO write thread exception, exiting...")
thread.interrupt_main()
raise | Writes kayboard input to JLink RTT buffer #0.
This method is a loop that blocks waiting on stdin. When enter is pressed,
LF and NUL bytes are added to the input and transmitted as a byte list.
If the JLink is disconnected, it will exit gracefully. If any other
exceptions are raised, they will be caught and re-raised after interrupting
the main thread.
Args:
jlink (pylink.JLink): The JLink to write to.
Raises:
Exception on error. | Below is the the instruction that describes the task:
### Input:
Writes kayboard input to JLink RTT buffer #0.
This method is a loop that blocks waiting on stdin. When enter is pressed,
LF and NUL bytes are added to the input and transmitted as a byte list.
If the JLink is disconnected, it will exit gracefully. If any other
exceptions are raised, they will be caught and re-raised after interrupting
the main thread.
Args:
jlink (pylink.JLink): The JLink to write to.
Raises:
Exception on error.
### Response:
def write_rtt(jlink):
"""Writes kayboard input to JLink RTT buffer #0.
This method is a loop that blocks waiting on stdin. When enter is pressed,
LF and NUL bytes are added to the input and transmitted as a byte list.
If the JLink is disconnected, it will exit gracefully. If any other
exceptions are raised, they will be caught and re-raised after interrupting
the main thread.
Args:
jlink (pylink.JLink): The JLink to write to.
Raises:
Exception on error.
"""
try:
while jlink.connected():
bytes = list(bytearray(input(), "utf-8") + b"\x0A\x00")
bytes_written = jlink.rtt_write(0, bytes)
except Exception:
print("IO write thread exception, exiting...")
thread.interrupt_main()
raise |
def __is_valid_value_for_arg(self, arg, value, check_extension=True):
"""Check if value is allowed for arg
Some commands only allow a limited set of values. The method
always returns True for methods that do not provide such a
set.
:param arg: the argument's name
:param value: the value to check
:param check_extension: check if value requires an extension
:return: True on succes, False otherwise
"""
if "values" not in arg and "extension_values" not in arg:
return True
if "values" in arg and value.lower() in arg["values"]:
return True
if "extension_values" in arg:
extension = arg["extension_values"].get(value.lower())
if extension:
condition = (
check_extension and
extension not in RequireCommand.loaded_extensions
)
if condition:
raise ExtensionNotLoaded(extension)
return True
return False | Check if value is allowed for arg
Some commands only allow a limited set of values. The method
always returns True for methods that do not provide such a
set.
:param arg: the argument's name
:param value: the value to check
:param check_extension: check if value requires an extension
:return: True on succes, False otherwise | Below is the the instruction that describes the task:
### Input:
Check if value is allowed for arg
Some commands only allow a limited set of values. The method
always returns True for methods that do not provide such a
set.
:param arg: the argument's name
:param value: the value to check
:param check_extension: check if value requires an extension
:return: True on succes, False otherwise
### Response:
def __is_valid_value_for_arg(self, arg, value, check_extension=True):
"""Check if value is allowed for arg
Some commands only allow a limited set of values. The method
always returns True for methods that do not provide such a
set.
:param arg: the argument's name
:param value: the value to check
:param check_extension: check if value requires an extension
:return: True on succes, False otherwise
"""
if "values" not in arg and "extension_values" not in arg:
return True
if "values" in arg and value.lower() in arg["values"]:
return True
if "extension_values" in arg:
extension = arg["extension_values"].get(value.lower())
if extension:
condition = (
check_extension and
extension not in RequireCommand.loaded_extensions
)
if condition:
raise ExtensionNotLoaded(extension)
return True
return False |
def calc_point_distance_vary(self, chi_coords, point_fupper, mus):
"""
Calculate distance between point and the bank allowing the metric to
vary based on varying upper frequency cutoff. Slower than
calc_point_distance, but more reliable when upper frequency cutoff can
change a lot.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
point_fupper : float
The upper frequency cutoff to use for this point. This value must
be one of the ones already calculated in the metric.
mus : numpy.array
A 2D array where idx 0 holds the upper frequency cutoff and idx 1
holds the coordinates in the [not covaried] mu parameter space for
each value of the upper frequency cutoff.
Returns
--------
min_dist : float
The smallest **SQUARED** metric distance between the test point and
the bank.
indexes : The chi1_bin, chi2_bin and position within that bin at which
the closest matching point lies.
"""
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
min_dist = 1000000000
indexes = None
for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order:
curr_chi1_bin = chi1_bin + chi1_bin_offset
curr_chi2_bin = chi2_bin + chi2_bin_offset
# No points = Next iteration
curr_bank = self.massbank[curr_chi1_bin][curr_chi2_bin]
if not curr_bank['mass1s'].size:
continue
# *NOT* the same of .min and .max
f_upper = numpy.minimum(point_fupper, curr_bank['freqcuts'])
f_other = numpy.maximum(point_fupper, curr_bank['freqcuts'])
# NOTE: freq_idxes is a vector!
freq_idxes = numpy.array([self.frequency_map[f] for f in f_upper])
# vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index
vecs1 = mus[freq_idxes, :]
# vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index
range_idxes = numpy.arange(len(freq_idxes))
vecs2 = curr_bank['mus'][range_idxes, freq_idxes, :]
# Now do the sums
dists = (vecs1 - vecs2)*(vecs1 - vecs2)
# This reduces to 1D: idx = stored index
dists = numpy.sum(dists, axis=1)
norm_upper = numpy.array([self.normalization_map[f] \
for f in f_upper])
norm_other = numpy.array([self.normalization_map[f] \
for f in f_other])
norm_fac = norm_upper / norm_other
renormed_dists = 1 - (1 - dists)*norm_fac
curr_min_dist = renormed_dists.min()
if curr_min_dist < min_dist:
min_dist = curr_min_dist
indexes = curr_chi1_bin, curr_chi2_bin, renormed_dists.argmin()
return min_dist, indexes | Calculate distance between point and the bank allowing the metric to
vary based on varying upper frequency cutoff. Slower than
calc_point_distance, but more reliable when upper frequency cutoff can
change a lot.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
point_fupper : float
The upper frequency cutoff to use for this point. This value must
be one of the ones already calculated in the metric.
mus : numpy.array
A 2D array where idx 0 holds the upper frequency cutoff and idx 1
holds the coordinates in the [not covaried] mu parameter space for
each value of the upper frequency cutoff.
Returns
--------
min_dist : float
The smallest **SQUARED** metric distance between the test point and
the bank.
indexes : The chi1_bin, chi2_bin and position within that bin at which
the closest matching point lies. | Below is the the instruction that describes the task:
### Input:
Calculate distance between point and the bank allowing the metric to
vary based on varying upper frequency cutoff. Slower than
calc_point_distance, but more reliable when upper frequency cutoff can
change a lot.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
point_fupper : float
The upper frequency cutoff to use for this point. This value must
be one of the ones already calculated in the metric.
mus : numpy.array
A 2D array where idx 0 holds the upper frequency cutoff and idx 1
holds the coordinates in the [not covaried] mu parameter space for
each value of the upper frequency cutoff.
Returns
--------
min_dist : float
The smallest **SQUARED** metric distance between the test point and
the bank.
indexes : The chi1_bin, chi2_bin and position within that bin at which
the closest matching point lies.
### Response:
def calc_point_distance_vary(self, chi_coords, point_fupper, mus):
"""
Calculate distance between point and the bank allowing the metric to
vary based on varying upper frequency cutoff. Slower than
calc_point_distance, but more reliable when upper frequency cutoff can
change a lot.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
point_fupper : float
The upper frequency cutoff to use for this point. This value must
be one of the ones already calculated in the metric.
mus : numpy.array
A 2D array where idx 0 holds the upper frequency cutoff and idx 1
holds the coordinates in the [not covaried] mu parameter space for
each value of the upper frequency cutoff.
Returns
--------
min_dist : float
The smallest **SQUARED** metric distance between the test point and
the bank.
indexes : The chi1_bin, chi2_bin and position within that bin at which
the closest matching point lies.
"""
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
min_dist = 1000000000
indexes = None
for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order:
curr_chi1_bin = chi1_bin + chi1_bin_offset
curr_chi2_bin = chi2_bin + chi2_bin_offset
# No points = Next iteration
curr_bank = self.massbank[curr_chi1_bin][curr_chi2_bin]
if not curr_bank['mass1s'].size:
continue
# *NOT* the same of .min and .max
f_upper = numpy.minimum(point_fupper, curr_bank['freqcuts'])
f_other = numpy.maximum(point_fupper, curr_bank['freqcuts'])
# NOTE: freq_idxes is a vector!
freq_idxes = numpy.array([self.frequency_map[f] for f in f_upper])
# vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index
vecs1 = mus[freq_idxes, :]
# vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index
range_idxes = numpy.arange(len(freq_idxes))
vecs2 = curr_bank['mus'][range_idxes, freq_idxes, :]
# Now do the sums
dists = (vecs1 - vecs2)*(vecs1 - vecs2)
# This reduces to 1D: idx = stored index
dists = numpy.sum(dists, axis=1)
norm_upper = numpy.array([self.normalization_map[f] \
for f in f_upper])
norm_other = numpy.array([self.normalization_map[f] \
for f in f_other])
norm_fac = norm_upper / norm_other
renormed_dists = 1 - (1 - dists)*norm_fac
curr_min_dist = renormed_dists.min()
if curr_min_dist < min_dist:
min_dist = curr_min_dist
indexes = curr_chi1_bin, curr_chi2_bin, renormed_dists.argmin()
return min_dist, indexes |
def initializable(self):
"""True if the Slot is initializable."""
return bool(lib.EnvSlotInitableP(self._env, self._cls, self._name)) | True if the Slot is initializable. | Below is the the instruction that describes the task:
### Input:
True if the Slot is initializable.
### Response:
def initializable(self):
"""True if the Slot is initializable."""
return bool(lib.EnvSlotInitableP(self._env, self._cls, self._name)) |
def transform_op(self, op, value):
"""For comparisons, if the value is None (null), the '=' operator must be replaced with ' is '
and the '!=' operator must be replaced with ' is not '. This function handles that conversion.
It's up to the caller to call this function only on comparisons and not on assignments.
"""
if value is None:
if _EQ_RE.match(op):
return "is"
elif _NEQ_RE.match(op):
return "is not"
return op | For comparisons, if the value is None (null), the '=' operator must be replaced with ' is '
and the '!=' operator must be replaced with ' is not '. This function handles that conversion.
It's up to the caller to call this function only on comparisons and not on assignments. | Below is the the instruction that describes the task:
### Input:
For comparisons, if the value is None (null), the '=' operator must be replaced with ' is '
and the '!=' operator must be replaced with ' is not '. This function handles that conversion.
It's up to the caller to call this function only on comparisons and not on assignments.
### Response:
def transform_op(self, op, value):
"""For comparisons, if the value is None (null), the '=' operator must be replaced with ' is '
and the '!=' operator must be replaced with ' is not '. This function handles that conversion.
It's up to the caller to call this function only on comparisons and not on assignments.
"""
if value is None:
if _EQ_RE.match(op):
return "is"
elif _NEQ_RE.match(op):
return "is not"
return op |
def get_unset_cache(self):
"""return : returns a tuple (num_of_not_None_caches, [list of unset caches endpoint])
"""
caches = []
if self._cached_api_global_response is None:
caches.append('global')
if self._cached_api_ticker_response is None:
caches.append('ticker')
return (len(caches), caches) | return : returns a tuple (num_of_not_None_caches, [list of unset caches endpoint]) | Below is the the instruction that describes the task:
### Input:
return : returns a tuple (num_of_not_None_caches, [list of unset caches endpoint])
### Response:
def get_unset_cache(self):
"""return : returns a tuple (num_of_not_None_caches, [list of unset caches endpoint])
"""
caches = []
if self._cached_api_global_response is None:
caches.append('global')
if self._cached_api_ticker_response is None:
caches.append('ticker')
return (len(caches), caches) |
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all polygons that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove polygons that are fully outside of the image.
partly : bool, optional
Whether to remove polygons that are partially outside of the image.
Returns
-------
imgaug.PolygonsOnImage
Reduced set of polygons, with those that were fully/partially
outside of the image removed.
"""
polys_clean = [
poly for poly in self.polygons
if not poly.is_out_of_image(self.shape, fully=fully, partly=partly)
]
# TODO use deepcopy() here
return PolygonsOnImage(polys_clean, shape=self.shape) | Remove all polygons that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove polygons that are fully outside of the image.
partly : bool, optional
Whether to remove polygons that are partially outside of the image.
Returns
-------
imgaug.PolygonsOnImage
Reduced set of polygons, with those that were fully/partially
outside of the image removed. | Below is the the instruction that describes the task:
### Input:
Remove all polygons that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove polygons that are fully outside of the image.
partly : bool, optional
Whether to remove polygons that are partially outside of the image.
Returns
-------
imgaug.PolygonsOnImage
Reduced set of polygons, with those that were fully/partially
outside of the image removed.
### Response:
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all polygons that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove polygons that are fully outside of the image.
partly : bool, optional
Whether to remove polygons that are partially outside of the image.
Returns
-------
imgaug.PolygonsOnImage
Reduced set of polygons, with those that were fully/partially
outside of the image removed.
"""
polys_clean = [
poly for poly in self.polygons
if not poly.is_out_of_image(self.shape, fully=fully, partly=partly)
]
# TODO use deepcopy() here
return PolygonsOnImage(polys_clean, shape=self.shape) |
def reduce(coro, iterable, initializer=None, limit=1, right=False, loop=None):
"""
Apply function of two arguments cumulatively to the items of sequence,
from left to right, so as to reduce the sequence to a single value.
Reduction will be executed sequentially without concurrency,
so passed values would be in order.
This function is the asynchronous coroutine equivalent to Python standard
`functools.reduce()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): reducer coroutine binary function.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
initializer (mixed): initial accumulator value used in
the first reduction call.
limit (int): max iteration concurrency limit. Use ``0`` for no limit.
right (bool): reduce iterable from right to left.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
mixed: accumulated final reduced value.
Usage::
async def reducer(acc, num):
return acc + num
await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0)
# => 15
"""
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
# Reduced accumulator value
acc = initializer
# If interable is empty, just return the initializer value
if len(iterable) == 0:
return initializer
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
# Reducer partial function for deferred coroutine execution
def reducer(element):
@asyncio.coroutine
def wrapper():
nonlocal acc
acc = yield from coro(acc, element)
return wrapper
# Support right reduction
if right:
iterable.reverse()
# Iterate and attach coroutine for defer scheduling
for element in iterable:
pool.add(reducer(element))
# Wait until all coroutines finish
yield from pool.run(ignore_empty=True)
# Returns final reduced value
return acc | Apply function of two arguments cumulatively to the items of sequence,
from left to right, so as to reduce the sequence to a single value.
Reduction will be executed sequentially without concurrency,
so passed values would be in order.
This function is the asynchronous coroutine equivalent to Python standard
`functools.reduce()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): reducer coroutine binary function.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
initializer (mixed): initial accumulator value used in
the first reduction call.
limit (int): max iteration concurrency limit. Use ``0`` for no limit.
right (bool): reduce iterable from right to left.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
mixed: accumulated final reduced value.
Usage::
async def reducer(acc, num):
return acc + num
await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0)
# => 15 | Below is the the instruction that describes the task:
### Input:
Apply function of two arguments cumulatively to the items of sequence,
from left to right, so as to reduce the sequence to a single value.
Reduction will be executed sequentially without concurrency,
so passed values would be in order.
This function is the asynchronous coroutine equivalent to Python standard
`functools.reduce()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): reducer coroutine binary function.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
initializer (mixed): initial accumulator value used in
the first reduction call.
limit (int): max iteration concurrency limit. Use ``0`` for no limit.
right (bool): reduce iterable from right to left.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
mixed: accumulated final reduced value.
Usage::
async def reducer(acc, num):
return acc + num
await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0)
# => 15
### Response:
def reduce(coro, iterable, initializer=None, limit=1, right=False, loop=None):
"""
Apply function of two arguments cumulatively to the items of sequence,
from left to right, so as to reduce the sequence to a single value.
Reduction will be executed sequentially without concurrency,
so passed values would be in order.
This function is the asynchronous coroutine equivalent to Python standard
`functools.reduce()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): reducer coroutine binary function.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
initializer (mixed): initial accumulator value used in
the first reduction call.
limit (int): max iteration concurrency limit. Use ``0`` for no limit.
right (bool): reduce iterable from right to left.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
mixed: accumulated final reduced value.
Usage::
async def reducer(acc, num):
return acc + num
await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0)
# => 15
"""
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
# Reduced accumulator value
acc = initializer
# If interable is empty, just return the initializer value
if len(iterable) == 0:
return initializer
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
# Reducer partial function for deferred coroutine execution
def reducer(element):
@asyncio.coroutine
def wrapper():
nonlocal acc
acc = yield from coro(acc, element)
return wrapper
# Support right reduction
if right:
iterable.reverse()
# Iterate and attach coroutine for defer scheduling
for element in iterable:
pool.add(reducer(element))
# Wait until all coroutines finish
yield from pool.run(ignore_empty=True)
# Returns final reduced value
return acc |
def add_cli_summarize(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``summarize`` command to main :mod:`click` function."""
@main.command()
@click.pass_obj
def summarize(manager: AbstractManager):
"""Summarize the contents of the database."""
if not manager.is_populated():
click.secho(f'{manager.module_name} has not been populated', fg='red')
sys.exit(1)
for name, count in sorted(manager.summarize().items()):
click.echo(f'{name.capitalize()}: {count}')
return main | Add a ``summarize`` command to main :mod:`click` function. | Below is the the instruction that describes the task:
### Input:
Add a ``summarize`` command to main :mod:`click` function.
### Response:
def add_cli_summarize(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``summarize`` command to main :mod:`click` function."""
@main.command()
@click.pass_obj
def summarize(manager: AbstractManager):
"""Summarize the contents of the database."""
if not manager.is_populated():
click.secho(f'{manager.module_name} has not been populated', fg='red')
sys.exit(1)
for name, count in sorted(manager.summarize().items()):
click.echo(f'{name.capitalize()}: {count}')
return main |
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value) | Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute | Below is the the instruction that describes the task:
### Input:
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
### Response:
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value) |
def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist) | Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module. | Below is the the instruction that describes the task:
### Input:
Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
### Response:
def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist) |
def spin(compound, theta, around):
"""Rotate a compound in place around an arbitrary vector.
Parameters
----------
compound : mb.Compound
The compound being rotated.
theta : float
The angle by which to rotate the compound, in radians.
around : np.ndarray, shape=(3,), dtype=float
The axis about which to spin the compound.
"""
around = np.asarray(around).reshape(3)
if np.array_equal(around, np.zeros(3)):
raise ValueError('Cannot spin around a zero vector')
center_pos = compound.center
translate(compound, -center_pos)
rotate(compound, theta, around)
translate(compound, center_pos) | Rotate a compound in place around an arbitrary vector.
Parameters
----------
compound : mb.Compound
The compound being rotated.
theta : float
The angle by which to rotate the compound, in radians.
around : np.ndarray, shape=(3,), dtype=float
The axis about which to spin the compound. | Below is the the instruction that describes the task:
### Input:
Rotate a compound in place around an arbitrary vector.
Parameters
----------
compound : mb.Compound
The compound being rotated.
theta : float
The angle by which to rotate the compound, in radians.
around : np.ndarray, shape=(3,), dtype=float
The axis about which to spin the compound.
### Response:
def spin(compound, theta, around):
"""Rotate a compound in place around an arbitrary vector.
Parameters
----------
compound : mb.Compound
The compound being rotated.
theta : float
The angle by which to rotate the compound, in radians.
around : np.ndarray, shape=(3,), dtype=float
The axis about which to spin the compound.
"""
around = np.asarray(around).reshape(3)
if np.array_equal(around, np.zeros(3)):
raise ValueError('Cannot spin around a zero vector')
center_pos = compound.center
translate(compound, -center_pos)
rotate(compound, theta, around)
translate(compound, center_pos) |
def format_metadata_to_key(key_metadata):
"""
<Purpose>
Construct a key dictionary (e.g., securesystemslib.formats.RSAKEY_SCHEMA)
according to the keytype of 'key_metadata'. The dict returned by this
function has the exact format as the dict returned by one of the key
generations functions, like generate_ed25519_key(). The dict returned
has the form:
{'keytype': keytype,
'scheme': scheme,
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '...',
'private': '...'}}
For example, RSA key dictionaries in RSAKEY_SCHEMA format should be used by
modules storing a collection of keys, such as with keydb.py. RSA keys as
stored in metadata files use a different format, so this function should be
called if an RSA key is extracted from one of these metadata files and need
converting. The key generation functions create an entirely new key and
return it in the format appropriate for 'keydb.py'.
>>> ed25519_key = generate_ed25519_key()
>>> key_val = ed25519_key['keyval']
>>> keytype = ed25519_key['keytype']
>>> scheme = ed25519_key['scheme']
>>> ed25519_metadata = \
format_keyval_to_metadata(keytype, scheme, key_val, private=True)
>>> ed25519_key_2, junk = format_metadata_to_key(ed25519_metadata)
>>> securesystemslib.formats.ED25519KEY_SCHEMA.matches(ed25519_key_2)
True
>>> ed25519_key == ed25519_key_2
True
<Arguments>
key_metadata:
The key dictionary as stored in Metadata files, conforming to
'securesystemslib.formats.KEY_SCHEMA'. It has the form:
{'keytype': '...',
'scheme': scheme,
'keyval': {'public': '...',
'private': '...'}}
<Exceptions>
securesystemslib.exceptions.FormatError, if 'key_metadata' does not conform
to 'securesystemslib.formats.KEY_SCHEMA'.
<Side Effects>
None.
<Returns>
In the case of an RSA key, a dictionary conformant to
'securesystemslib.formats.RSAKEY_SCHEMA'.
"""
# Does 'key_metadata' have the correct format?
# This check will ensure 'key_metadata' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.KEY_SCHEMA.check_match(key_metadata)
# Construct the dictionary to be returned.
key_dict = {}
keytype = key_metadata['keytype']
scheme = key_metadata['scheme']
key_value = key_metadata['keyval']
# Convert 'key_value' to 'securesystemslib.formats.KEY_SCHEMA' and generate
# its hash The hash is in hexdigest form.
default_keyid = _get_keyid(keytype, scheme, key_value)
keyids = set()
keyids.add(default_keyid)
for hash_algorithm in securesystemslib.settings.HASH_ALGORITHMS:
keyid = _get_keyid(keytype, scheme, key_value, hash_algorithm)
keyids.add(keyid)
# All the required key values gathered. Build 'key_dict'.
# 'keyid_hash_algorithms'
key_dict['keytype'] = keytype
key_dict['scheme'] = scheme
key_dict['keyid'] = default_keyid
key_dict['keyid_hash_algorithms'] = securesystemslib.settings.HASH_ALGORITHMS
key_dict['keyval'] = key_value
return key_dict, keyids | <Purpose>
Construct a key dictionary (e.g., securesystemslib.formats.RSAKEY_SCHEMA)
according to the keytype of 'key_metadata'. The dict returned by this
function has the exact format as the dict returned by one of the key
generations functions, like generate_ed25519_key(). The dict returned
has the form:
{'keytype': keytype,
'scheme': scheme,
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '...',
'private': '...'}}
For example, RSA key dictionaries in RSAKEY_SCHEMA format should be used by
modules storing a collection of keys, such as with keydb.py. RSA keys as
stored in metadata files use a different format, so this function should be
called if an RSA key is extracted from one of these metadata files and need
converting. The key generation functions create an entirely new key and
return it in the format appropriate for 'keydb.py'.
>>> ed25519_key = generate_ed25519_key()
>>> key_val = ed25519_key['keyval']
>>> keytype = ed25519_key['keytype']
>>> scheme = ed25519_key['scheme']
>>> ed25519_metadata = \
format_keyval_to_metadata(keytype, scheme, key_val, private=True)
>>> ed25519_key_2, junk = format_metadata_to_key(ed25519_metadata)
>>> securesystemslib.formats.ED25519KEY_SCHEMA.matches(ed25519_key_2)
True
>>> ed25519_key == ed25519_key_2
True
<Arguments>
key_metadata:
The key dictionary as stored in Metadata files, conforming to
'securesystemslib.formats.KEY_SCHEMA'. It has the form:
{'keytype': '...',
'scheme': scheme,
'keyval': {'public': '...',
'private': '...'}}
<Exceptions>
securesystemslib.exceptions.FormatError, if 'key_metadata' does not conform
to 'securesystemslib.formats.KEY_SCHEMA'.
<Side Effects>
None.
<Returns>
In the case of an RSA key, a dictionary conformant to
'securesystemslib.formats.RSAKEY_SCHEMA'. | Below is the the instruction that describes the task:
### Input:
<Purpose>
Construct a key dictionary (e.g., securesystemslib.formats.RSAKEY_SCHEMA)
according to the keytype of 'key_metadata'. The dict returned by this
function has the exact format as the dict returned by one of the key
generations functions, like generate_ed25519_key(). The dict returned
has the form:
{'keytype': keytype,
'scheme': scheme,
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '...',
'private': '...'}}
For example, RSA key dictionaries in RSAKEY_SCHEMA format should be used by
modules storing a collection of keys, such as with keydb.py. RSA keys as
stored in metadata files use a different format, so this function should be
called if an RSA key is extracted from one of these metadata files and need
converting. The key generation functions create an entirely new key and
return it in the format appropriate for 'keydb.py'.
>>> ed25519_key = generate_ed25519_key()
>>> key_val = ed25519_key['keyval']
>>> keytype = ed25519_key['keytype']
>>> scheme = ed25519_key['scheme']
>>> ed25519_metadata = \
format_keyval_to_metadata(keytype, scheme, key_val, private=True)
>>> ed25519_key_2, junk = format_metadata_to_key(ed25519_metadata)
>>> securesystemslib.formats.ED25519KEY_SCHEMA.matches(ed25519_key_2)
True
>>> ed25519_key == ed25519_key_2
True
<Arguments>
key_metadata:
The key dictionary as stored in Metadata files, conforming to
'securesystemslib.formats.KEY_SCHEMA'. It has the form:
{'keytype': '...',
'scheme': scheme,
'keyval': {'public': '...',
'private': '...'}}
<Exceptions>
securesystemslib.exceptions.FormatError, if 'key_metadata' does not conform
to 'securesystemslib.formats.KEY_SCHEMA'.
<Side Effects>
None.
<Returns>
In the case of an RSA key, a dictionary conformant to
'securesystemslib.formats.RSAKEY_SCHEMA'.
### Response:
def format_metadata_to_key(key_metadata):
"""
<Purpose>
Construct a key dictionary (e.g., securesystemslib.formats.RSAKEY_SCHEMA)
according to the keytype of 'key_metadata'. The dict returned by this
function has the exact format as the dict returned by one of the key
generations functions, like generate_ed25519_key(). The dict returned
has the form:
{'keytype': keytype,
'scheme': scheme,
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '...',
'private': '...'}}
For example, RSA key dictionaries in RSAKEY_SCHEMA format should be used by
modules storing a collection of keys, such as with keydb.py. RSA keys as
stored in metadata files use a different format, so this function should be
called if an RSA key is extracted from one of these metadata files and need
converting. The key generation functions create an entirely new key and
return it in the format appropriate for 'keydb.py'.
>>> ed25519_key = generate_ed25519_key()
>>> key_val = ed25519_key['keyval']
>>> keytype = ed25519_key['keytype']
>>> scheme = ed25519_key['scheme']
>>> ed25519_metadata = \
format_keyval_to_metadata(keytype, scheme, key_val, private=True)
>>> ed25519_key_2, junk = format_metadata_to_key(ed25519_metadata)
>>> securesystemslib.formats.ED25519KEY_SCHEMA.matches(ed25519_key_2)
True
>>> ed25519_key == ed25519_key_2
True
<Arguments>
key_metadata:
The key dictionary as stored in Metadata files, conforming to
'securesystemslib.formats.KEY_SCHEMA'. It has the form:
{'keytype': '...',
'scheme': scheme,
'keyval': {'public': '...',
'private': '...'}}
<Exceptions>
securesystemslib.exceptions.FormatError, if 'key_metadata' does not conform
to 'securesystemslib.formats.KEY_SCHEMA'.
<Side Effects>
None.
<Returns>
In the case of an RSA key, a dictionary conformant to
'securesystemslib.formats.RSAKEY_SCHEMA'.
"""
# Does 'key_metadata' have the correct format?
# This check will ensure 'key_metadata' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.KEY_SCHEMA.check_match(key_metadata)
# Construct the dictionary to be returned.
key_dict = {}
keytype = key_metadata['keytype']
scheme = key_metadata['scheme']
key_value = key_metadata['keyval']
# Convert 'key_value' to 'securesystemslib.formats.KEY_SCHEMA' and generate
# its hash The hash is in hexdigest form.
default_keyid = _get_keyid(keytype, scheme, key_value)
keyids = set()
keyids.add(default_keyid)
for hash_algorithm in securesystemslib.settings.HASH_ALGORITHMS:
keyid = _get_keyid(keytype, scheme, key_value, hash_algorithm)
keyids.add(keyid)
# All the required key values gathered. Build 'key_dict'.
# 'keyid_hash_algorithms'
key_dict['keytype'] = keytype
key_dict['scheme'] = scheme
key_dict['keyid'] = default_keyid
key_dict['keyid_hash_algorithms'] = securesystemslib.settings.HASH_ALGORITHMS
key_dict['keyval'] = key_value
return key_dict, keyids |
def _request(request, request_fallback=None):
''' Extract request fields wherever they may come from: GET, POST, forms, fallback '''
# Use lambdas to avoid evaluating bottle.request.* which may throw an Error
all_dicts = [
lambda: request.json,
lambda: request.forms,
lambda: request.query,
lambda: request.files,
#lambda: request.POST,
lambda: request_fallback
]
request_dict = dict()
for req_dict_ in all_dicts:
try:
req_dict = req_dict_()
except KeyError:
continue
if req_dict is not None and hasattr(req_dict, 'items'):
for req_key, req_val in req_dict.items():
request_dict[req_key] = req_val
return request_dict | Extract request fields wherever they may come from: GET, POST, forms, fallback | Below is the the instruction that describes the task:
### Input:
Extract request fields wherever they may come from: GET, POST, forms, fallback
### Response:
def _request(request, request_fallback=None):
''' Extract request fields wherever they may come from: GET, POST, forms, fallback '''
# Use lambdas to avoid evaluating bottle.request.* which may throw an Error
all_dicts = [
lambda: request.json,
lambda: request.forms,
lambda: request.query,
lambda: request.files,
#lambda: request.POST,
lambda: request_fallback
]
request_dict = dict()
for req_dict_ in all_dicts:
try:
req_dict = req_dict_()
except KeyError:
continue
if req_dict is not None and hasattr(req_dict, 'items'):
for req_key, req_val in req_dict.items():
request_dict[req_key] = req_val
return request_dict |
def resizeEvent(self, event):
"""Schedules an item layout if resize mode is \"adjust\". Somehow this is
needed for correctly scaling down items.
The reason this was reimplemented was the CommentDelegate.
:param event: the resize event
:type event: QtCore.QEvent
:returns: None
:rtype: None
:raises: None
"""
if self.resizeMode() == self.Adjust:
self.scheduleDelayedItemsLayout()
return super(ListLevel, self).resizeEvent(event) | Schedules an item layout if resize mode is \"adjust\". Somehow this is
needed for correctly scaling down items.
The reason this was reimplemented was the CommentDelegate.
:param event: the resize event
:type event: QtCore.QEvent
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Schedules an item layout if resize mode is \"adjust\". Somehow this is
needed for correctly scaling down items.
The reason this was reimplemented was the CommentDelegate.
:param event: the resize event
:type event: QtCore.QEvent
:returns: None
:rtype: None
:raises: None
### Response:
def resizeEvent(self, event):
"""Schedules an item layout if resize mode is \"adjust\". Somehow this is
needed for correctly scaling down items.
The reason this was reimplemented was the CommentDelegate.
:param event: the resize event
:type event: QtCore.QEvent
:returns: None
:rtype: None
:raises: None
"""
if self.resizeMode() == self.Adjust:
self.scheduleDelayedItemsLayout()
return super(ListLevel, self).resizeEvent(event) |
def _getFromTime(self, atDate=None):
"""
Time that the event starts (in the local time zone).
"""
return getLocalTime(self.date_from, self.time_from, self.tz) | Time that the event starts (in the local time zone). | Below is the the instruction that describes the task:
### Input:
Time that the event starts (in the local time zone).
### Response:
def _getFromTime(self, atDate=None):
"""
Time that the event starts (in the local time zone).
"""
return getLocalTime(self.date_from, self.time_from, self.tz) |
def linkify_with_escalations(self, escalations):
"""
Link with escalations
:param escalations: all escalations object
:type escalations: alignak.objects.escalation.Escalations
:return: None
"""
for i in self:
if not hasattr(i, 'escalations'):
continue
links_list = strip_and_uniq(i.escalations)
new = []
for name in [e for e in links_list if e]:
escalation = escalations.find_by_name(name)
if escalation is not None and escalation.uuid not in new:
new.append(escalation.uuid)
else:
i.add_error("the escalation '%s' defined for '%s' is unknown"
% (name, i.get_name()))
i.escalations = new | Link with escalations
:param escalations: all escalations object
:type escalations: alignak.objects.escalation.Escalations
:return: None | Below is the the instruction that describes the task:
### Input:
Link with escalations
:param escalations: all escalations object
:type escalations: alignak.objects.escalation.Escalations
:return: None
### Response:
def linkify_with_escalations(self, escalations):
"""
Link with escalations
:param escalations: all escalations object
:type escalations: alignak.objects.escalation.Escalations
:return: None
"""
for i in self:
if not hasattr(i, 'escalations'):
continue
links_list = strip_and_uniq(i.escalations)
new = []
for name in [e for e in links_list if e]:
escalation = escalations.find_by_name(name)
if escalation is not None and escalation.uuid not in new:
new.append(escalation.uuid)
else:
i.add_error("the escalation '%s' defined for '%s' is unknown"
% (name, i.get_name()))
i.escalations = new |
def get_tagged_albums(self, tag, limit=None, cacheable=True):
"""Returns the albums tagged by a user."""
params = self._get_params()
params["tag"] = tag
params["taggingtype"] = "album"
if limit:
params["limit"] = limit
doc = self._request(self.ws_prefix + ".getpersonaltags", cacheable, params)
return _extract_albums(doc, self.network) | Returns the albums tagged by a user. | Below is the the instruction that describes the task:
### Input:
Returns the albums tagged by a user.
### Response:
def get_tagged_albums(self, tag, limit=None, cacheable=True):
"""Returns the albums tagged by a user."""
params = self._get_params()
params["tag"] = tag
params["taggingtype"] = "album"
if limit:
params["limit"] = limit
doc = self._request(self.ws_prefix + ".getpersonaltags", cacheable, params)
return _extract_albums(doc, self.network) |
def _find_best_in_population(population, values):
"""Finds the population member with the lowest value."""
best_value = tf.math.reduce_min(input_tensor=values)
best_index = tf.where(tf.math.equal(values, best_value))[0, 0]
return ([population_part[best_index] for population_part in population],
best_value) | Finds the population member with the lowest value. | Below is the the instruction that describes the task:
### Input:
Finds the population member with the lowest value.
### Response:
def _find_best_in_population(population, values):
"""Finds the population member with the lowest value."""
best_value = tf.math.reduce_min(input_tensor=values)
best_index = tf.where(tf.math.equal(values, best_value))[0, 0]
return ([population_part[best_index] for population_part in population],
best_value) |
def working_cycletime(start, end, workday_start=datetime.timedelta(hours=0), workday_end=datetime.timedelta(hours=24)):
"""
Get the working time between a beginning and an end point subtracting out non-office time
"""
def clamp(t, start, end):
"Return 't' clamped to the range ['start', 'end']"
return max(start, min(end, t))
def day_part(t):
"Return timedelta between midnight and 't'."
return t - t.replace(hour=0, minute=0, second=0)
if not start:
return None
if not end:
end = datetime.datetime.now()
zero = datetime.timedelta(0)
# Make sure that the work day is valid
assert(zero <= workday_start <= workday_end <= datetime.timedelta(1))
# Get the workday delta
workday = workday_end - workday_start
# Get the number of days it took
days = (end - start).days + 1
# Number of weeks
weeks = days // 7
# Get the number of days in addition to weeks
extra = (max(0, 5 - start.weekday()) + min(5, 1 + end.weekday())) % 5
# Get the number of working days
weekdays = weeks * 5 + extra
# Get the total time spent accounting for the workday
total = workday * weekdays
if start.weekday() < 5:
# Figuring out how much time it wasn't being worked on and subtracting
total -= clamp(day_part(start) - workday_start, zero, workday)
if end.weekday() < 5:
# Figuring out how much time it wasn't being worked on and subtracting
total -= clamp(workday_end - day_part(end), zero, workday)
cycle_time = timedelta_total_seconds(total) / timedelta_total_seconds(workday)
return cycle_time | Get the working time between a beginning and an end point subtracting out non-office time | Below is the the instruction that describes the task:
### Input:
Get the working time between a beginning and an end point subtracting out non-office time
### Response:
def working_cycletime(start, end, workday_start=datetime.timedelta(hours=0), workday_end=datetime.timedelta(hours=24)):
"""
Get the working time between a beginning and an end point subtracting out non-office time
"""
def clamp(t, start, end):
"Return 't' clamped to the range ['start', 'end']"
return max(start, min(end, t))
def day_part(t):
"Return timedelta between midnight and 't'."
return t - t.replace(hour=0, minute=0, second=0)
if not start:
return None
if not end:
end = datetime.datetime.now()
zero = datetime.timedelta(0)
# Make sure that the work day is valid
assert(zero <= workday_start <= workday_end <= datetime.timedelta(1))
# Get the workday delta
workday = workday_end - workday_start
# Get the number of days it took
days = (end - start).days + 1
# Number of weeks
weeks = days // 7
# Get the number of days in addition to weeks
extra = (max(0, 5 - start.weekday()) + min(5, 1 + end.weekday())) % 5
# Get the number of working days
weekdays = weeks * 5 + extra
# Get the total time spent accounting for the workday
total = workday * weekdays
if start.weekday() < 5:
# Figuring out how much time it wasn't being worked on and subtracting
total -= clamp(day_part(start) - workday_start, zero, workday)
if end.weekday() < 5:
# Figuring out how much time it wasn't being worked on and subtracting
total -= clamp(workday_end - day_part(end), zero, workday)
cycle_time = timedelta_total_seconds(total) / timedelta_total_seconds(workday)
return cycle_time |
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Generate a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlip.axis.Axes`
"""
from math import sqrt, pi
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
# Take the rest of the coefficients and resize them
# appropriately. Take a copy of amplitudes as otherwise numpy
# deletes the element from amplitudes itself.
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
# Generate the harmonics and arguments for the sin and cos
# functions.
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) +
coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-pi, pi, samples)
used_legends = set()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax | Generate a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlip.axis.Axes` | Below is the the instruction that describes the task:
### Input:
Generate a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlip.axis.Axes`
### Response:
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Generate a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlip.axis.Axes`
"""
from math import sqrt, pi
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
# Take the rest of the coefficients and resize them
# appropriately. Take a copy of amplitudes as otherwise numpy
# deletes the element from amplitudes itself.
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
# Generate the harmonics and arguments for the sin and cos
# functions.
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) +
coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-pi, pi, samples)
used_legends = set()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax |
def check_psd(matrix, tolerance=1e-6):
""" A square matrix is PSD if all eigenvalues of its Hermitian part are
non- negative. The Hermitian part is given by (self + M*)/2, where M* is
the complex conjugate transpose of M """
hermitian = (matrix + matrix.T.conjugate()) / 2
eigenvalues = np.linalg.eigh(hermitian)[0]
return (eigenvalues > -tolerance).all() | A square matrix is PSD if all eigenvalues of its Hermitian part are
non- negative. The Hermitian part is given by (self + M*)/2, where M* is
the complex conjugate transpose of M | Below is the the instruction that describes the task:
### Input:
A square matrix is PSD if all eigenvalues of its Hermitian part are
non- negative. The Hermitian part is given by (self + M*)/2, where M* is
the complex conjugate transpose of M
### Response:
def check_psd(matrix, tolerance=1e-6):
""" A square matrix is PSD if all eigenvalues of its Hermitian part are
non- negative. The Hermitian part is given by (self + M*)/2, where M* is
the complex conjugate transpose of M """
hermitian = (matrix + matrix.T.conjugate()) / 2
eigenvalues = np.linalg.eigh(hermitian)[0]
return (eigenvalues > -tolerance).all() |
def summarize(self, text, topics=4, length=5, binary_matrix=True, topic_sigma_threshold=0.5):
"""
Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper:
J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation.
Proc. ISIM ’04, pp. 93–100.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param topics: the number of topics/concepts covered in the input text (defines the degree of
dimensionality reduction in the SVD step)
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:param topic_sigma_threshold: filters out topics/concepts with a singular value less than this
percentage of the largest singular value (must be between 0 and 1, 0.5 by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
topics = self._validate_num_topics(topics, sentences)
# Generate a matrix of terms that appear in each sentence
weighting = 'binary' if binary_matrix else 'frequency'
sentence_matrix = self._compute_matrix(sentences, weighting=weighting)
sentence_matrix = sentence_matrix.transpose()
# Filter out negatives in the sparse matrix (need to do this on Vt for LSA method):
sentence_matrix = sentence_matrix.multiply(sentence_matrix > 0)
s, u, v = self._svd(sentence_matrix, num_concepts=topics)
# Only consider topics/concepts whose singular values are half of the largest singular value
if 1 <= topic_sigma_threshold < 0:
raise ValueError('Parameter topic_sigma_threshold must take a value between 0 and 1')
sigma_threshold = max(u) * topic_sigma_threshold
u[u < sigma_threshold] = 0 # Set all other singular values to zero
# Build a "length vector" containing the length (i.e. saliency) of each sentence
saliency_vec = np.dot(np.square(u), np.square(v))
top_sentences = saliency_vec.argsort()[-length:][::-1]
# Return the sentences in the order in which they appear in the document
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences] | Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper:
J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation.
Proc. ISIM ’04, pp. 93–100.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param topics: the number of topics/concepts covered in the input text (defines the degree of
dimensionality reduction in the SVD step)
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:param topic_sigma_threshold: filters out topics/concepts with a singular value less than this
percentage of the largest singular value (must be between 0 and 1, 0.5 by default)
:return: list of sentences for the summary | Below is the the instruction that describes the task:
### Input:
Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper:
J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation.
Proc. ISIM ’04, pp. 93–100.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param topics: the number of topics/concepts covered in the input text (defines the degree of
dimensionality reduction in the SVD step)
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:param topic_sigma_threshold: filters out topics/concepts with a singular value less than this
percentage of the largest singular value (must be between 0 and 1, 0.5 by default)
:return: list of sentences for the summary
### Response:
def summarize(self, text, topics=4, length=5, binary_matrix=True, topic_sigma_threshold=0.5):
"""
Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper:
J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation.
Proc. ISIM ’04, pp. 93–100.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param topics: the number of topics/concepts covered in the input text (defines the degree of
dimensionality reduction in the SVD step)
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:param topic_sigma_threshold: filters out topics/concepts with a singular value less than this
percentage of the largest singular value (must be between 0 and 1, 0.5 by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
topics = self._validate_num_topics(topics, sentences)
# Generate a matrix of terms that appear in each sentence
weighting = 'binary' if binary_matrix else 'frequency'
sentence_matrix = self._compute_matrix(sentences, weighting=weighting)
sentence_matrix = sentence_matrix.transpose()
# Filter out negatives in the sparse matrix (need to do this on Vt for LSA method):
sentence_matrix = sentence_matrix.multiply(sentence_matrix > 0)
s, u, v = self._svd(sentence_matrix, num_concepts=topics)
# Only consider topics/concepts whose singular values are half of the largest singular value
if 1 <= topic_sigma_threshold < 0:
raise ValueError('Parameter topic_sigma_threshold must take a value between 0 and 1')
sigma_threshold = max(u) * topic_sigma_threshold
u[u < sigma_threshold] = 0 # Set all other singular values to zero
# Build a "length vector" containing the length (i.e. saliency) of each sentence
saliency_vec = np.dot(np.square(u), np.square(v))
top_sentences = saliency_vec.argsort()[-length:][::-1]
# Return the sentences in the order in which they appear in the document
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences] |
def grab_bulbs(host, token=None):
"""Grab XML, then add all bulbs to a dict. Removes room functionality"""
xml = grab_xml(host, token)
bulbs = {}
for room in xml:
for device in room['device']:
bulbs[int(device['did'])] = device
return bulbs | Grab XML, then add all bulbs to a dict. Removes room functionality | Below is the the instruction that describes the task:
### Input:
Grab XML, then add all bulbs to a dict. Removes room functionality
### Response:
def grab_bulbs(host, token=None):
"""Grab XML, then add all bulbs to a dict. Removes room functionality"""
xml = grab_xml(host, token)
bulbs = {}
for room in xml:
for device in room['device']:
bulbs[int(device['did'])] = device
return bulbs |
def main(args):
"""
%prog newicktree
Plot Newick formatted tree. The gene structure can be plotted along if
--gffdir is given. The gff file needs to be `genename.gff`. If --sizes is
on, also show the number of amino acids.
With --barcode a mapping file can be provided to convert seq names to
eg. species names, useful in unified tree display. This file should have
distinctive barcodes in column1 and new names in column2, tab delimited.
"""
p = OptionParser(main.__doc__)
p.add_option("--outgroup", help="Outgroup for rerooting the tree. " +
"Use comma to separate multiple taxa.")
p.add_option("--noreroot", default=False, action="store_true",
help="Don't reroot the input tree [default: %default]")
p.add_option("--rmargin", default=.3, type="float",
help="Set blank rmargin to the right [default: %default]")
p.add_option("--gffdir", default=None,
help="The directory that contain GFF files [default: %default]")
p.add_option("--sizes", default=None,
help="The FASTA file or the sizes file [default: %default]")
p.add_option("--SH", default=None, type="string",
help="SH test p-value [default: %default]")
p.add_option("--scutoff", default=0, type="int",
help="cutoff for displaying node support, 0-100 [default: %default]")
p.add_option("--barcode", default=None,
help="path to seq names barcode mapping file: "
"barcode<tab>new_name [default: %default]")
p.add_option("--leafcolor", default="k",
help="Font color for the OTUs, or path to a file "
"containing color mappings: leafname<tab>color [default: %default]")
p.add_option("--leaffont", default=12, help="Font size for the OTUs")
p.add_option("--geoscale", default=False, action="store_true",
help="Plot geological scale")
opts, args, iopts = p.set_image_options(args, figsize="8x6")
if len(args) != 1:
sys.exit(not p.print_help())
datafile, = args
outgroup = None
reroot = not opts.noreroot
if opts.outgroup:
outgroup = opts.outgroup.split(",")
if datafile == "demo":
tx = """(((Os02g0681100:0.1151,Sb04g031800:0.11220)1.0:0.0537,
(Os04g0578800:0.04318,Sb06g026210:0.04798)-1.0:0.08870)1.0:0.06985,
((Os03g0124100:0.08845,Sb01g048930:0.09055)1.0:0.05332,
(Os10g0534700:0.06592,Sb01g030630:0.04824)-1.0:0.07886):0.09389);"""
else:
logging.debug("Load tree file `{0}`.".format(datafile))
tx = open(datafile).read()
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
if opts.geoscale:
draw_geoscale(root)
else:
if op.isfile(opts.leafcolor):
leafcolor = "k"
leafcolorfile = opts.leafcolor
else:
leafcolor = opts.leafcolor
leafcolorfile = None
draw_tree(root, tx, rmargin=opts.rmargin, leafcolor=leafcolor,
outgroup=outgroup, reroot=reroot, gffdir=opts.gffdir,
sizes=opts.sizes, SH=opts.SH, scutoff=opts.scutoff,
barcodefile=opts.barcode, leafcolorfile=leafcolorfile,
leaffont=opts.leaffont)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog newicktree
Plot Newick formatted tree. The gene structure can be plotted along if
--gffdir is given. The gff file needs to be `genename.gff`. If --sizes is
on, also show the number of amino acids.
With --barcode a mapping file can be provided to convert seq names to
eg. species names, useful in unified tree display. This file should have
distinctive barcodes in column1 and new names in column2, tab delimited. | Below is the the instruction that describes the task:
### Input:
%prog newicktree
Plot Newick formatted tree. The gene structure can be plotted along if
--gffdir is given. The gff file needs to be `genename.gff`. If --sizes is
on, also show the number of amino acids.
With --barcode a mapping file can be provided to convert seq names to
eg. species names, useful in unified tree display. This file should have
distinctive barcodes in column1 and new names in column2, tab delimited.
### Response:
def main(args):
"""
%prog newicktree
Plot Newick formatted tree. The gene structure can be plotted along if
--gffdir is given. The gff file needs to be `genename.gff`. If --sizes is
on, also show the number of amino acids.
With --barcode a mapping file can be provided to convert seq names to
eg. species names, useful in unified tree display. This file should have
distinctive barcodes in column1 and new names in column2, tab delimited.
"""
p = OptionParser(main.__doc__)
p.add_option("--outgroup", help="Outgroup for rerooting the tree. " +
"Use comma to separate multiple taxa.")
p.add_option("--noreroot", default=False, action="store_true",
help="Don't reroot the input tree [default: %default]")
p.add_option("--rmargin", default=.3, type="float",
help="Set blank rmargin to the right [default: %default]")
p.add_option("--gffdir", default=None,
help="The directory that contain GFF files [default: %default]")
p.add_option("--sizes", default=None,
help="The FASTA file or the sizes file [default: %default]")
p.add_option("--SH", default=None, type="string",
help="SH test p-value [default: %default]")
p.add_option("--scutoff", default=0, type="int",
help="cutoff for displaying node support, 0-100 [default: %default]")
p.add_option("--barcode", default=None,
help="path to seq names barcode mapping file: "
"barcode<tab>new_name [default: %default]")
p.add_option("--leafcolor", default="k",
help="Font color for the OTUs, or path to a file "
"containing color mappings: leafname<tab>color [default: %default]")
p.add_option("--leaffont", default=12, help="Font size for the OTUs")
p.add_option("--geoscale", default=False, action="store_true",
help="Plot geological scale")
opts, args, iopts = p.set_image_options(args, figsize="8x6")
if len(args) != 1:
sys.exit(not p.print_help())
datafile, = args
outgroup = None
reroot = not opts.noreroot
if opts.outgroup:
outgroup = opts.outgroup.split(",")
if datafile == "demo":
tx = """(((Os02g0681100:0.1151,Sb04g031800:0.11220)1.0:0.0537,
(Os04g0578800:0.04318,Sb06g026210:0.04798)-1.0:0.08870)1.0:0.06985,
((Os03g0124100:0.08845,Sb01g048930:0.09055)1.0:0.05332,
(Os10g0534700:0.06592,Sb01g030630:0.04824)-1.0:0.07886):0.09389);"""
else:
logging.debug("Load tree file `{0}`.".format(datafile))
tx = open(datafile).read()
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
if opts.geoscale:
draw_geoscale(root)
else:
if op.isfile(opts.leafcolor):
leafcolor = "k"
leafcolorfile = opts.leafcolor
else:
leafcolor = opts.leafcolor
leafcolorfile = None
draw_tree(root, tx, rmargin=opts.rmargin, leafcolor=leafcolor,
outgroup=outgroup, reroot=reroot, gffdir=opts.gffdir,
sizes=opts.sizes, SH=opts.SH, scutoff=opts.scutoff,
barcodefile=opts.barcode, leafcolorfile=leafcolorfile,
leaffont=opts.leaffont)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
def QA_indicator_BOLL(DataFrame, N=20, P=2):
'布林线'
C = DataFrame['close']
boll = MA(C, N)
UB = boll + P * STD(C, N)
LB = boll - P * STD(C, N)
DICT = {'BOLL': boll, 'UB': UB, 'LB': LB}
return pd.DataFrame(DICT) | 布林线 | Below is the the instruction that describes the task:
### Input:
布林线
### Response:
def QA_indicator_BOLL(DataFrame, N=20, P=2):
'布林线'
C = DataFrame['close']
boll = MA(C, N)
UB = boll + P * STD(C, N)
LB = boll - P * STD(C, N)
DICT = {'BOLL': boll, 'UB': UB, 'LB': LB}
return pd.DataFrame(DICT) |
def get_git_repositories_activity_metrics(self, project, from_date, aggregation_type, skip, top):
"""GetGitRepositoriesActivityMetrics.
[Preview API] Retrieves git activity metrics for repositories matching a specified criteria.
:param str project: Project ID or project name
:param datetime from_date: Date from which, the trends are to be fetched.
:param str aggregation_type: Bucket size on which, trends are to be aggregated.
:param int skip: The number of repositories to ignore.
:param int top: The number of repositories for which activity metrics are to be retrieved.
:rtype: [RepositoryActivityMetrics]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if from_date is not None:
query_parameters['fromDate'] = self._serialize.query('from_date', from_date, 'iso-8601')
if aggregation_type is not None:
query_parameters['aggregationType'] = self._serialize.query('aggregation_type', aggregation_type, 'str')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='df7fbbca-630a-40e3-8aa3-7a3faf66947e',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[RepositoryActivityMetrics]', self._unwrap_collection(response)) | GetGitRepositoriesActivityMetrics.
[Preview API] Retrieves git activity metrics for repositories matching a specified criteria.
:param str project: Project ID or project name
:param datetime from_date: Date from which, the trends are to be fetched.
:param str aggregation_type: Bucket size on which, trends are to be aggregated.
:param int skip: The number of repositories to ignore.
:param int top: The number of repositories for which activity metrics are to be retrieved.
:rtype: [RepositoryActivityMetrics] | Below is the the instruction that describes the task:
### Input:
GetGitRepositoriesActivityMetrics.
[Preview API] Retrieves git activity metrics for repositories matching a specified criteria.
:param str project: Project ID or project name
:param datetime from_date: Date from which, the trends are to be fetched.
:param str aggregation_type: Bucket size on which, trends are to be aggregated.
:param int skip: The number of repositories to ignore.
:param int top: The number of repositories for which activity metrics are to be retrieved.
:rtype: [RepositoryActivityMetrics]
### Response:
def get_git_repositories_activity_metrics(self, project, from_date, aggregation_type, skip, top):
"""GetGitRepositoriesActivityMetrics.
[Preview API] Retrieves git activity metrics for repositories matching a specified criteria.
:param str project: Project ID or project name
:param datetime from_date: Date from which, the trends are to be fetched.
:param str aggregation_type: Bucket size on which, trends are to be aggregated.
:param int skip: The number of repositories to ignore.
:param int top: The number of repositories for which activity metrics are to be retrieved.
:rtype: [RepositoryActivityMetrics]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if from_date is not None:
query_parameters['fromDate'] = self._serialize.query('from_date', from_date, 'iso-8601')
if aggregation_type is not None:
query_parameters['aggregationType'] = self._serialize.query('aggregation_type', aggregation_type, 'str')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='df7fbbca-630a-40e3-8aa3-7a3faf66947e',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[RepositoryActivityMetrics]', self._unwrap_collection(response)) |
def clone(self, *args, **overrides):
"""
Returns a clone of the object with matching parameter values
containing the specified args and kwargs.
"""
link = overrides.pop('link', True)
settings = dict(self.get_param_values(), **overrides)
if 'id' not in settings:
settings['id'] = self.id
if not args and link:
settings['plot_id'] = self._plot_id
pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', [])
return self.__class__(*(settings[n] for n in pos_args),
**{k:v for k,v in settings.items()
if k not in pos_args}) | Returns a clone of the object with matching parameter values
containing the specified args and kwargs. | Below is the the instruction that describes the task:
### Input:
Returns a clone of the object with matching parameter values
containing the specified args and kwargs.
### Response:
def clone(self, *args, **overrides):
"""
Returns a clone of the object with matching parameter values
containing the specified args and kwargs.
"""
link = overrides.pop('link', True)
settings = dict(self.get_param_values(), **overrides)
if 'id' not in settings:
settings['id'] = self.id
if not args and link:
settings['plot_id'] = self._plot_id
pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', [])
return self.__class__(*(settings[n] for n in pos_args),
**{k:v for k,v in settings.items()
if k not in pos_args}) |
def _get_popularity_baseline(self):
"""
Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes.
"""
response = self.__proxy__.get_popularity_baseline()
from .popularity_recommender import PopularityRecommender
return PopularityRecommender(response) | Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes. | Below is the the instruction that describes the task:
### Input:
Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes.
### Response:
def _get_popularity_baseline(self):
"""
Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes.
"""
response = self.__proxy__.get_popularity_baseline()
from .popularity_recommender import PopularityRecommender
return PopularityRecommender(response) |
def setCheckedItems(self, items):
"""
Returns the checked items for this combobox.
:return items | [<str>, ..]
"""
if not self.isCheckable():
return
model = self.model()
for i in range(self.count()):
item_text = self.itemText(i)
if not item_text:
continue
if nativestring(item_text) in items:
state = Qt.Checked
else:
state = Qt.Unchecked
model.item(i).setCheckState(state) | Returns the checked items for this combobox.
:return items | [<str>, ..] | Below is the the instruction that describes the task:
### Input:
Returns the checked items for this combobox.
:return items | [<str>, ..]
### Response:
def setCheckedItems(self, items):
"""
Returns the checked items for this combobox.
:return items | [<str>, ..]
"""
if not self.isCheckable():
return
model = self.model()
for i in range(self.count()):
item_text = self.itemText(i)
if not item_text:
continue
if nativestring(item_text) in items:
state = Qt.Checked
else:
state = Qt.Unchecked
model.item(i).setCheckState(state) |
def get_binary_dist(self, requirement):
"""
Get or create a cached binary distribution archive.
:param requirement: A :class:`.Requirement` object.
:returns: An iterable of tuples with two values each: A
:class:`tarfile.TarInfo` object and a file-like object.
Gets the cached binary distribution that was previously built for the
given requirement. If no binary distribution has been cached yet, a new
binary distribution is built and added to the cache.
Uses :func:`build_binary_dist()` to build binary distribution
archives. If this fails with a build error :func:`get_binary_dist()`
will use :class:`.SystemPackageManager` to check for and install
missing system packages and retry the build when missing system
packages were installed.
"""
cache_file = self.cache.get(requirement)
if cache_file:
if self.needs_invalidation(requirement, cache_file):
logger.info("Invalidating old %s binary (source has changed) ..", requirement)
cache_file = None
else:
logger.debug("%s hasn't been cached yet, doing so now.", requirement)
if not cache_file:
# Build the binary distribution.
try:
raw_file = self.build_binary_dist(requirement)
except BuildFailed:
logger.warning("Build of %s failed, checking for missing dependencies ..", requirement)
if self.system_package_manager.install_dependencies(requirement):
raw_file = self.build_binary_dist(requirement)
else:
raise
# Transform the binary distribution archive into a form that we can re-use.
fd, transformed_file = tempfile.mkstemp(prefix='pip-accel-bdist-', suffix='.tar.gz')
try:
archive = tarfile.open(transformed_file, 'w:gz')
try:
for member, from_handle in self.transform_binary_dist(raw_file):
archive.addfile(member, from_handle)
finally:
archive.close()
# Push the binary distribution archive to all available backends.
with open(transformed_file, 'rb') as handle:
self.cache.put(requirement, handle)
finally:
# Close file descriptor before removing the temporary file.
# Without closing Windows is complaining that the file cannot
# be removed because it is used by another process.
os.close(fd)
# Cleanup the temporary file.
os.remove(transformed_file)
# Get the absolute pathname of the file in the local cache.
cache_file = self.cache.get(requirement)
# Enable checksum based cache invalidation.
self.persist_checksum(requirement, cache_file)
archive = tarfile.open(cache_file, 'r:gz')
try:
for member in archive.getmembers():
yield member, archive.extractfile(member.name)
finally:
archive.close() | Get or create a cached binary distribution archive.
:param requirement: A :class:`.Requirement` object.
:returns: An iterable of tuples with two values each: A
:class:`tarfile.TarInfo` object and a file-like object.
Gets the cached binary distribution that was previously built for the
given requirement. If no binary distribution has been cached yet, a new
binary distribution is built and added to the cache.
Uses :func:`build_binary_dist()` to build binary distribution
archives. If this fails with a build error :func:`get_binary_dist()`
will use :class:`.SystemPackageManager` to check for and install
missing system packages and retry the build when missing system
packages were installed. | Below is the the instruction that describes the task:
### Input:
Get or create a cached binary distribution archive.
:param requirement: A :class:`.Requirement` object.
:returns: An iterable of tuples with two values each: A
:class:`tarfile.TarInfo` object and a file-like object.
Gets the cached binary distribution that was previously built for the
given requirement. If no binary distribution has been cached yet, a new
binary distribution is built and added to the cache.
Uses :func:`build_binary_dist()` to build binary distribution
archives. If this fails with a build error :func:`get_binary_dist()`
will use :class:`.SystemPackageManager` to check for and install
missing system packages and retry the build when missing system
packages were installed.
### Response:
def get_binary_dist(self, requirement):
"""
Get or create a cached binary distribution archive.
:param requirement: A :class:`.Requirement` object.
:returns: An iterable of tuples with two values each: A
:class:`tarfile.TarInfo` object and a file-like object.
Gets the cached binary distribution that was previously built for the
given requirement. If no binary distribution has been cached yet, a new
binary distribution is built and added to the cache.
Uses :func:`build_binary_dist()` to build binary distribution
archives. If this fails with a build error :func:`get_binary_dist()`
will use :class:`.SystemPackageManager` to check for and install
missing system packages and retry the build when missing system
packages were installed.
"""
cache_file = self.cache.get(requirement)
if cache_file:
if self.needs_invalidation(requirement, cache_file):
logger.info("Invalidating old %s binary (source has changed) ..", requirement)
cache_file = None
else:
logger.debug("%s hasn't been cached yet, doing so now.", requirement)
if not cache_file:
# Build the binary distribution.
try:
raw_file = self.build_binary_dist(requirement)
except BuildFailed:
logger.warning("Build of %s failed, checking for missing dependencies ..", requirement)
if self.system_package_manager.install_dependencies(requirement):
raw_file = self.build_binary_dist(requirement)
else:
raise
# Transform the binary distribution archive into a form that we can re-use.
fd, transformed_file = tempfile.mkstemp(prefix='pip-accel-bdist-', suffix='.tar.gz')
try:
archive = tarfile.open(transformed_file, 'w:gz')
try:
for member, from_handle in self.transform_binary_dist(raw_file):
archive.addfile(member, from_handle)
finally:
archive.close()
# Push the binary distribution archive to all available backends.
with open(transformed_file, 'rb') as handle:
self.cache.put(requirement, handle)
finally:
# Close file descriptor before removing the temporary file.
# Without closing Windows is complaining that the file cannot
# be removed because it is used by another process.
os.close(fd)
# Cleanup the temporary file.
os.remove(transformed_file)
# Get the absolute pathname of the file in the local cache.
cache_file = self.cache.get(requirement)
# Enable checksum based cache invalidation.
self.persist_checksum(requirement, cache_file)
archive = tarfile.open(cache_file, 'r:gz')
try:
for member in archive.getmembers():
yield member, archive.extractfile(member.name)
finally:
archive.close() |
def row_col_maker(app, fromdocname, all_needs, need_info, need_key, make_ref=False, ref_lookup=False, prefix=''):
"""
Creates and returns a column.
:param app: current sphinx app
:param fromdocname: current document
:param all_needs: Dictionary of all need objects
:param need_info: need_info object, which stores all related need data
:param need_key: The key to access the needed data from need_info
:param make_ref: If true, creates a reference for the given data in need_key
:param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference
:param prefix: string, which is used as prefix for the text output
:return: column object (nodes.entry)
"""
row_col = nodes.entry()
para_col = nodes.paragraph()
if need_key in need_info and need_info[need_key] is not None:
if not isinstance(need_info[need_key], (list, set)):
data = [need_info[need_key]]
else:
data = need_info[need_key]
for index, datum in enumerate(data):
link_id = datum
link_part = None
if need_key in ['links', 'back_links']:
if '.' in datum:
link_id = datum.split('.')[0]
link_part = datum.split('.')[1]
datum_text = prefix + datum
text_col = nodes.Text(datum_text, datum_text)
if make_ref or ref_lookup:
try:
ref_col = nodes.reference("", "")
if not ref_lookup:
ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, need_info['docname'])
ref_col['refuri'] += "#" + datum
else:
temp_need = all_needs[link_id]
ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, temp_need['docname'])
ref_col['refuri'] += "#" + temp_need["id"]
if link_part is not None:
ref_col['refuri'] += '.' + link_part
except KeyError:
para_col += text_col
else:
ref_col.append(text_col)
para_col += ref_col
else:
para_col += text_col
if index + 1 < len(data):
para_col += nodes.emphasis("; ", "; ")
row_col += para_col
return row_col | Creates and returns a column.
:param app: current sphinx app
:param fromdocname: current document
:param all_needs: Dictionary of all need objects
:param need_info: need_info object, which stores all related need data
:param need_key: The key to access the needed data from need_info
:param make_ref: If true, creates a reference for the given data in need_key
:param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference
:param prefix: string, which is used as prefix for the text output
:return: column object (nodes.entry) | Below is the the instruction that describes the task:
### Input:
Creates and returns a column.
:param app: current sphinx app
:param fromdocname: current document
:param all_needs: Dictionary of all need objects
:param need_info: need_info object, which stores all related need data
:param need_key: The key to access the needed data from need_info
:param make_ref: If true, creates a reference for the given data in need_key
:param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference
:param prefix: string, which is used as prefix for the text output
:return: column object (nodes.entry)
### Response:
def row_col_maker(app, fromdocname, all_needs, need_info, need_key, make_ref=False, ref_lookup=False, prefix=''):
"""
Creates and returns a column.
:param app: current sphinx app
:param fromdocname: current document
:param all_needs: Dictionary of all need objects
:param need_info: need_info object, which stores all related need data
:param need_key: The key to access the needed data from need_info
:param make_ref: If true, creates a reference for the given data in need_key
:param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference
:param prefix: string, which is used as prefix for the text output
:return: column object (nodes.entry)
"""
row_col = nodes.entry()
para_col = nodes.paragraph()
if need_key in need_info and need_info[need_key] is not None:
if not isinstance(need_info[need_key], (list, set)):
data = [need_info[need_key]]
else:
data = need_info[need_key]
for index, datum in enumerate(data):
link_id = datum
link_part = None
if need_key in ['links', 'back_links']:
if '.' in datum:
link_id = datum.split('.')[0]
link_part = datum.split('.')[1]
datum_text = prefix + datum
text_col = nodes.Text(datum_text, datum_text)
if make_ref or ref_lookup:
try:
ref_col = nodes.reference("", "")
if not ref_lookup:
ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, need_info['docname'])
ref_col['refuri'] += "#" + datum
else:
temp_need = all_needs[link_id]
ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, temp_need['docname'])
ref_col['refuri'] += "#" + temp_need["id"]
if link_part is not None:
ref_col['refuri'] += '.' + link_part
except KeyError:
para_col += text_col
else:
ref_col.append(text_col)
para_col += ref_col
else:
para_col += text_col
if index + 1 < len(data):
para_col += nodes.emphasis("; ", "; ")
row_col += para_col
return row_col |
def at_time(self, time, nearest_sample=False):
""" Return the value at the specified gps time
"""
if nearest_sample:
time += self.delta_t / 2.0
return self[int((time-self.start_time)*self.sample_rate)] | Return the value at the specified gps time | Below is the the instruction that describes the task:
### Input:
Return the value at the specified gps time
### Response:
def at_time(self, time, nearest_sample=False):
""" Return the value at the specified gps time
"""
if nearest_sample:
time += self.delta_t / 2.0
return self[int((time-self.start_time)*self.sample_rate)] |
def route_request(self, request_json, metadata=None):
''' Route the request object to the right handler function '''
request = Request(request_json)
request.metadata = metadata
# add reprompt handler or some such for default?
handler_fn = self._handlers[self._default] # Set default handling for noisy requests
if not request.is_intent() and (request.request_type() in self._handlers):
''' Route request to a non intent handler '''
handler_fn = self._handlers[request.request_type()]
elif request.is_intent() and request.intent_name() in self._handlers['IntentRequest']:
''' Route to right intent handler '''
handler_fn = self._handlers['IntentRequest'][request.intent_name()]
response = handler_fn(request)
response.set_session(request.session)
return response.to_json() | Route the request object to the right handler function | Below is the the instruction that describes the task:
### Input:
Route the request object to the right handler function
### Response:
def route_request(self, request_json, metadata=None):
''' Route the request object to the right handler function '''
request = Request(request_json)
request.metadata = metadata
# add reprompt handler or some such for default?
handler_fn = self._handlers[self._default] # Set default handling for noisy requests
if not request.is_intent() and (request.request_type() in self._handlers):
''' Route request to a non intent handler '''
handler_fn = self._handlers[request.request_type()]
elif request.is_intent() and request.intent_name() in self._handlers['IntentRequest']:
''' Route to right intent handler '''
handler_fn = self._handlers['IntentRequest'][request.intent_name()]
response = handler_fn(request)
response.set_session(request.session)
return response.to_json() |
def find_commands(command_dir: str) -> List[str]:
"""
Get all command names in the a folder
:return: List of commands names
"""
if not command_dir:
return []
return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir])
if not is_pkg and not name.startswith('_')] | Get all command names in the a folder
:return: List of commands names | Below is the the instruction that describes the task:
### Input:
Get all command names in the a folder
:return: List of commands names
### Response:
def find_commands(command_dir: str) -> List[str]:
"""
Get all command names in the a folder
:return: List of commands names
"""
if not command_dir:
return []
return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir])
if not is_pkg and not name.startswith('_')] |
def loadbinary(fname):
"""
Load a numpy binary file or archive created by tabular.io.savebinary.
Load a numpy binary file (``.npy``) or archive (``.npz``) created by
:func:`tabular.io.savebinary`.
The data and associated data type (e.g. `dtype`, including if given, column
names) are loaded and reconstituted.
If `fname` is a numpy archive, it may contain additional data giving
hierarchical column-oriented structure (e.g. `coloring`). See
:func:`tabular.tab.tabarray.__new__` for more information about coloring.
The ``.npz`` file is a zipped archive created using :func:`numpy.savez` and
containing one or more ``.npy`` files, which are NumPy binary files created
by :func:`numpy.save`.
**Parameters**
**fname** : string or file-like object
File name or open numpy binary file (``.npy``) or archive
(``.npz``) created by :func:`tabular.io.savebinary`.
* When `fname` is a ``.npy`` binary file, it is reconstituted as a
flat ndarray of data, with structured dtype.
* When `fname` is a ``.npz`` archive, it contains at least one
``.npy`` binary file and optionally another:
* ``data.npy`` must be in the archive, and is reconstituted as `X`,
a flat ndarray of data, with structured dtype, `dtype`.
* ``coloring.npy``, if present is reconstitued as `coloring`, a
dictionary.
**Returns**
**X** : numpy ndarray with structured dtype
The data, where each column is named and is of a uniform NumPy data
type.
**dtype** : numpy dtype object
The data type of `X`, e.g. `X.dtype`.
**coloring** : dictionary, or None
Hierarchical structure on the columns given in the header of the
file; an attribute of tabarrays.
See :func:`tabular.tab.tabarray.__new__` for more information about
coloring.
**See Also:**
:func:`tabular.io.savebinary`, :func:`numpy.load`,
:func:`numpy.save`, :func:`numpy.savez`
"""
X = np.load(fname)
if isinstance(X, np.lib.npyio.NpzFile):
if 'coloring' in X.files:
coloring = X['coloring'].tolist()
else:
coloring = None
if 'data' in X.files:
return [X['data'], X['data'].dtype, coloring]
else:
return [None, None, coloring]
else:
return [X, X.dtype, None] | Load a numpy binary file or archive created by tabular.io.savebinary.
Load a numpy binary file (``.npy``) or archive (``.npz``) created by
:func:`tabular.io.savebinary`.
The data and associated data type (e.g. `dtype`, including if given, column
names) are loaded and reconstituted.
If `fname` is a numpy archive, it may contain additional data giving
hierarchical column-oriented structure (e.g. `coloring`). See
:func:`tabular.tab.tabarray.__new__` for more information about coloring.
The ``.npz`` file is a zipped archive created using :func:`numpy.savez` and
containing one or more ``.npy`` files, which are NumPy binary files created
by :func:`numpy.save`.
**Parameters**
**fname** : string or file-like object
File name or open numpy binary file (``.npy``) or archive
(``.npz``) created by :func:`tabular.io.savebinary`.
* When `fname` is a ``.npy`` binary file, it is reconstituted as a
flat ndarray of data, with structured dtype.
* When `fname` is a ``.npz`` archive, it contains at least one
``.npy`` binary file and optionally another:
* ``data.npy`` must be in the archive, and is reconstituted as `X`,
a flat ndarray of data, with structured dtype, `dtype`.
* ``coloring.npy``, if present is reconstitued as `coloring`, a
dictionary.
**Returns**
**X** : numpy ndarray with structured dtype
The data, where each column is named and is of a uniform NumPy data
type.
**dtype** : numpy dtype object
The data type of `X`, e.g. `X.dtype`.
**coloring** : dictionary, or None
Hierarchical structure on the columns given in the header of the
file; an attribute of tabarrays.
See :func:`tabular.tab.tabarray.__new__` for more information about
coloring.
**See Also:**
:func:`tabular.io.savebinary`, :func:`numpy.load`,
:func:`numpy.save`, :func:`numpy.savez` | Below is the the instruction that describes the task:
### Input:
Load a numpy binary file or archive created by tabular.io.savebinary.
Load a numpy binary file (``.npy``) or archive (``.npz``) created by
:func:`tabular.io.savebinary`.
The data and associated data type (e.g. `dtype`, including if given, column
names) are loaded and reconstituted.
If `fname` is a numpy archive, it may contain additional data giving
hierarchical column-oriented structure (e.g. `coloring`). See
:func:`tabular.tab.tabarray.__new__` for more information about coloring.
The ``.npz`` file is a zipped archive created using :func:`numpy.savez` and
containing one or more ``.npy`` files, which are NumPy binary files created
by :func:`numpy.save`.
**Parameters**
**fname** : string or file-like object
File name or open numpy binary file (``.npy``) or archive
(``.npz``) created by :func:`tabular.io.savebinary`.
* When `fname` is a ``.npy`` binary file, it is reconstituted as a
flat ndarray of data, with structured dtype.
* When `fname` is a ``.npz`` archive, it contains at least one
``.npy`` binary file and optionally another:
* ``data.npy`` must be in the archive, and is reconstituted as `X`,
a flat ndarray of data, with structured dtype, `dtype`.
* ``coloring.npy``, if present is reconstitued as `coloring`, a
dictionary.
**Returns**
**X** : numpy ndarray with structured dtype
The data, where each column is named and is of a uniform NumPy data
type.
**dtype** : numpy dtype object
The data type of `X`, e.g. `X.dtype`.
**coloring** : dictionary, or None
Hierarchical structure on the columns given in the header of the
file; an attribute of tabarrays.
See :func:`tabular.tab.tabarray.__new__` for more information about
coloring.
**See Also:**
:func:`tabular.io.savebinary`, :func:`numpy.load`,
:func:`numpy.save`, :func:`numpy.savez`
### Response:
def loadbinary(fname):
"""
Load a numpy binary file or archive created by tabular.io.savebinary.
Load a numpy binary file (``.npy``) or archive (``.npz``) created by
:func:`tabular.io.savebinary`.
The data and associated data type (e.g. `dtype`, including if given, column
names) are loaded and reconstituted.
If `fname` is a numpy archive, it may contain additional data giving
hierarchical column-oriented structure (e.g. `coloring`). See
:func:`tabular.tab.tabarray.__new__` for more information about coloring.
The ``.npz`` file is a zipped archive created using :func:`numpy.savez` and
containing one or more ``.npy`` files, which are NumPy binary files created
by :func:`numpy.save`.
**Parameters**
**fname** : string or file-like object
File name or open numpy binary file (``.npy``) or archive
(``.npz``) created by :func:`tabular.io.savebinary`.
* When `fname` is a ``.npy`` binary file, it is reconstituted as a
flat ndarray of data, with structured dtype.
* When `fname` is a ``.npz`` archive, it contains at least one
``.npy`` binary file and optionally another:
* ``data.npy`` must be in the archive, and is reconstituted as `X`,
a flat ndarray of data, with structured dtype, `dtype`.
* ``coloring.npy``, if present is reconstitued as `coloring`, a
dictionary.
**Returns**
**X** : numpy ndarray with structured dtype
The data, where each column is named and is of a uniform NumPy data
type.
**dtype** : numpy dtype object
The data type of `X`, e.g. `X.dtype`.
**coloring** : dictionary, or None
Hierarchical structure on the columns given in the header of the
file; an attribute of tabarrays.
See :func:`tabular.tab.tabarray.__new__` for more information about
coloring.
**See Also:**
:func:`tabular.io.savebinary`, :func:`numpy.load`,
:func:`numpy.save`, :func:`numpy.savez`
"""
X = np.load(fname)
if isinstance(X, np.lib.npyio.NpzFile):
if 'coloring' in X.files:
coloring = X['coloring'].tolist()
else:
coloring = None
if 'data' in X.files:
return [X['data'], X['data'].dtype, coloring]
else:
return [None, None, coloring]
else:
return [X, X.dtype, None] |
def add_attribute_model(self,
name, # type: str
attr, # type: AttributeModel
writeable_func=None, # type: Optional[Callable]
):
# type: (...) -> AttributeModel
"""Register a pre-existing AttributeModel to be added to the Block"""
return self._field_registry.add_attribute_model(
name, attr, writeable_func, self._part) | Register a pre-existing AttributeModel to be added to the Block | Below is the the instruction that describes the task:
### Input:
Register a pre-existing AttributeModel to be added to the Block
### Response:
def add_attribute_model(self,
name, # type: str
attr, # type: AttributeModel
writeable_func=None, # type: Optional[Callable]
):
# type: (...) -> AttributeModel
"""Register a pre-existing AttributeModel to be added to the Block"""
return self._field_registry.add_attribute_model(
name, attr, writeable_func, self._part) |
def queue_instances(instances):
'''
Queue a set of instances to be provisioned later. Expects a list.
Currently this only queries node data, and then places it in the cloud
cache (if configured). If the salt-cloud-reactor is being used, these
instances will be automatically provisioned using that.
For more information about the salt-cloud-reactor, see:
https://github.com/saltstack-formulas/salt-cloud-reactor
'''
for instance_id in instances:
node = _get_node(instance_id=instance_id)
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) | Queue a set of instances to be provisioned later. Expects a list.
Currently this only queries node data, and then places it in the cloud
cache (if configured). If the salt-cloud-reactor is being used, these
instances will be automatically provisioned using that.
For more information about the salt-cloud-reactor, see:
https://github.com/saltstack-formulas/salt-cloud-reactor | Below is the the instruction that describes the task:
### Input:
Queue a set of instances to be provisioned later. Expects a list.
Currently this only queries node data, and then places it in the cloud
cache (if configured). If the salt-cloud-reactor is being used, these
instances will be automatically provisioned using that.
For more information about the salt-cloud-reactor, see:
https://github.com/saltstack-formulas/salt-cloud-reactor
### Response:
def queue_instances(instances):
'''
Queue a set of instances to be provisioned later. Expects a list.
Currently this only queries node data, and then places it in the cloud
cache (if configured). If the salt-cloud-reactor is being used, these
instances will be automatically provisioned using that.
For more information about the salt-cloud-reactor, see:
https://github.com/saltstack-formulas/salt-cloud-reactor
'''
for instance_id in instances:
node = _get_node(instance_id=instance_id)
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__) |
def _load_entities(self):
"""
Must load all the entities it needs from cache, and
return ``False`` if it could not find all of them.
"""
if not self._chat_peer:
return True # Nothing to load (e.g. MessageDeleted)
self._chat, self._input_chat = self._get_entity_pair(self.chat_id)
return self._input_chat is not None | Must load all the entities it needs from cache, and
return ``False`` if it could not find all of them. | Below is the the instruction that describes the task:
### Input:
Must load all the entities it needs from cache, and
return ``False`` if it could not find all of them.
### Response:
def _load_entities(self):
"""
Must load all the entities it needs from cache, and
return ``False`` if it could not find all of them.
"""
if not self._chat_peer:
return True # Nothing to load (e.g. MessageDeleted)
self._chat, self._input_chat = self._get_entity_pair(self.chat_id)
return self._input_chat is not None |
def print_stats(correctness, confidence, name):
"""
Prints out accuracy, coverage, etc. statistics
:param correctness: ndarray
One bool per example specifying whether it was correctly classified
:param confidence: ndarray
The probability associated with each prediction
:param name: str
The name of this type of data (e.g. "clean", "MaxConfidence")
"""
accuracy = correctness.mean()
wrongness = 1 - correctness
denom1 = np.maximum(1, wrongness.sum())
ave_prob_on_mistake = (wrongness * confidence).sum() / denom1
assert ave_prob_on_mistake <= 1., ave_prob_on_mistake
denom2 = np.maximum(1, correctness.sum())
ave_prob_on_correct = (correctness * confidence).sum() / denom2
covered = confidence > 0.5
cov_half = covered.mean()
acc_half = (correctness * covered).sum() / np.maximum(1, covered.sum())
print('Accuracy on %s examples: %0.4f' % (name, accuracy))
print("Average prob on mistakes: %0.4f" % ave_prob_on_mistake)
print("Average prob on correct: %0.4f" % ave_prob_on_correct)
print("Accuracy when prob thresholded at .5: %0.4f" % acc_half)
print("Coverage when prob thresholded at .5: %0.4f" % cov_half)
success_rate = acc_half * cov_half
# Success is correctly classifying a covered example
print("Success rate at .5: %0.4f" % success_rate)
# Failure is misclassifying a covered example
failure_rate = (1. - acc_half) * cov_half
print("Failure rate at .5: %0.4f" % failure_rate)
print() | Prints out accuracy, coverage, etc. statistics
:param correctness: ndarray
One bool per example specifying whether it was correctly classified
:param confidence: ndarray
The probability associated with each prediction
:param name: str
The name of this type of data (e.g. "clean", "MaxConfidence") | Below is the the instruction that describes the task:
### Input:
Prints out accuracy, coverage, etc. statistics
:param correctness: ndarray
One bool per example specifying whether it was correctly classified
:param confidence: ndarray
The probability associated with each prediction
:param name: str
The name of this type of data (e.g. "clean", "MaxConfidence")
### Response:
def print_stats(correctness, confidence, name):
"""
Prints out accuracy, coverage, etc. statistics
:param correctness: ndarray
One bool per example specifying whether it was correctly classified
:param confidence: ndarray
The probability associated with each prediction
:param name: str
The name of this type of data (e.g. "clean", "MaxConfidence")
"""
accuracy = correctness.mean()
wrongness = 1 - correctness
denom1 = np.maximum(1, wrongness.sum())
ave_prob_on_mistake = (wrongness * confidence).sum() / denom1
assert ave_prob_on_mistake <= 1., ave_prob_on_mistake
denom2 = np.maximum(1, correctness.sum())
ave_prob_on_correct = (correctness * confidence).sum() / denom2
covered = confidence > 0.5
cov_half = covered.mean()
acc_half = (correctness * covered).sum() / np.maximum(1, covered.sum())
print('Accuracy on %s examples: %0.4f' % (name, accuracy))
print("Average prob on mistakes: %0.4f" % ave_prob_on_mistake)
print("Average prob on correct: %0.4f" % ave_prob_on_correct)
print("Accuracy when prob thresholded at .5: %0.4f" % acc_half)
print("Coverage when prob thresholded at .5: %0.4f" % cov_half)
success_rate = acc_half * cov_half
# Success is correctly classifying a covered example
print("Success rate at .5: %0.4f" % success_rate)
# Failure is misclassifying a covered example
failure_rate = (1. - acc_half) * cov_half
print("Failure rate at .5: %0.4f" % failure_rate)
print() |
def origin_central_asia(origin):
"""\
Returns if the origin is located in Central Asia.
Holds true for the following countries:
* Afghanistan
* Kazakhstan
* Kyrgyzstan
* Tajikistan
* Turkmenistan
* Uzbekistan
`origin`
The origin to check.
"""
return origin_afghanistan(origin) or origin_kazakhstan(origin) \
or origin_kyrgyzstan(origin) or origin_tajikistan(origin) \
or origin_turkmenistan(origin) or origin_uzbekistan(origin) | \
Returns if the origin is located in Central Asia.
Holds true for the following countries:
* Afghanistan
* Kazakhstan
* Kyrgyzstan
* Tajikistan
* Turkmenistan
* Uzbekistan
`origin`
The origin to check. | Below is the the instruction that describes the task:
### Input:
\
Returns if the origin is located in Central Asia.
Holds true for the following countries:
* Afghanistan
* Kazakhstan
* Kyrgyzstan
* Tajikistan
* Turkmenistan
* Uzbekistan
`origin`
The origin to check.
### Response:
def origin_central_asia(origin):
"""\
Returns if the origin is located in Central Asia.
Holds true for the following countries:
* Afghanistan
* Kazakhstan
* Kyrgyzstan
* Tajikistan
* Turkmenistan
* Uzbekistan
`origin`
The origin to check.
"""
return origin_afghanistan(origin) or origin_kazakhstan(origin) \
or origin_kyrgyzstan(origin) or origin_tajikistan(origin) \
or origin_turkmenistan(origin) or origin_uzbekistan(origin) |
def derivatives(self, x, y, Rs, theta_Rs, e1, e2, center_x=0, center_y=0):
"""
returns df/dx and df/dy of the function (integral of NFW)
"""
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
e = min(abs(1. - q), 0.99)
xt1 = (cos_phi*x_shift+sin_phi*y_shift)*np.sqrt(1 - e)
xt2 = (-sin_phi*x_shift+cos_phi*y_shift)*np.sqrt(1 + e)
R_ = np.sqrt(xt1**2 + xt2**2)
rho0_input = self.nfw._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs)
if Rs < 0.0000001:
Rs = 0.0000001
f_x_prim, f_y_prim = self.nfw.nfwAlpha(R_, Rs, rho0_input, xt1, xt2)
f_x_prim *= np.sqrt(1 - e)
f_y_prim *= np.sqrt(1 + e)
f_x = cos_phi*f_x_prim-sin_phi*f_y_prim
f_y = sin_phi*f_x_prim+cos_phi*f_y_prim
return f_x, f_y | returns df/dx and df/dy of the function (integral of NFW) | Below is the the instruction that describes the task:
### Input:
returns df/dx and df/dy of the function (integral of NFW)
### Response:
def derivatives(self, x, y, Rs, theta_Rs, e1, e2, center_x=0, center_y=0):
"""
returns df/dx and df/dy of the function (integral of NFW)
"""
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
e = min(abs(1. - q), 0.99)
xt1 = (cos_phi*x_shift+sin_phi*y_shift)*np.sqrt(1 - e)
xt2 = (-sin_phi*x_shift+cos_phi*y_shift)*np.sqrt(1 + e)
R_ = np.sqrt(xt1**2 + xt2**2)
rho0_input = self.nfw._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs)
if Rs < 0.0000001:
Rs = 0.0000001
f_x_prim, f_y_prim = self.nfw.nfwAlpha(R_, Rs, rho0_input, xt1, xt2)
f_x_prim *= np.sqrt(1 - e)
f_y_prim *= np.sqrt(1 + e)
f_x = cos_phi*f_x_prim-sin_phi*f_y_prim
f_y = sin_phi*f_x_prim+cos_phi*f_y_prim
return f_x, f_y |
def exception(self, timeout=None):
"""Return the exception raised by the call, if any.
This blocks until the message has successfully been published, and
returns the exception. If the call succeeded, return None.
Args:
timeout (Union[int, float]): The number of seconds before this call
times out and raises TimeoutError.
Raises:
TimeoutError: If the request times out.
Returns:
Exception: The exception raised by the call, if any.
"""
# Wait until the future is done.
if not self._completed.wait(timeout=timeout):
raise exceptions.TimeoutError("Timed out waiting for result.")
# If the batch completed successfully, this should return None.
if self._result != self._SENTINEL:
return None
# Okay, this batch had an error; this should return it.
return self._exception | Return the exception raised by the call, if any.
This blocks until the message has successfully been published, and
returns the exception. If the call succeeded, return None.
Args:
timeout (Union[int, float]): The number of seconds before this call
times out and raises TimeoutError.
Raises:
TimeoutError: If the request times out.
Returns:
Exception: The exception raised by the call, if any. | Below is the the instruction that describes the task:
### Input:
Return the exception raised by the call, if any.
This blocks until the message has successfully been published, and
returns the exception. If the call succeeded, return None.
Args:
timeout (Union[int, float]): The number of seconds before this call
times out and raises TimeoutError.
Raises:
TimeoutError: If the request times out.
Returns:
Exception: The exception raised by the call, if any.
### Response:
def exception(self, timeout=None):
"""Return the exception raised by the call, if any.
This blocks until the message has successfully been published, and
returns the exception. If the call succeeded, return None.
Args:
timeout (Union[int, float]): The number of seconds before this call
times out and raises TimeoutError.
Raises:
TimeoutError: If the request times out.
Returns:
Exception: The exception raised by the call, if any.
"""
# Wait until the future is done.
if not self._completed.wait(timeout=timeout):
raise exceptions.TimeoutError("Timed out waiting for result.")
# If the batch completed successfully, this should return None.
if self._result != self._SENTINEL:
return None
# Okay, this batch had an error; this should return it.
return self._exception |
def add_host(host):
""" Put your host information in the prefix object. """
p = new_prefix()
p.prefix = str(host['ipaddr'])
p.type = "host"
p.description = host['description']
p.node = host['fqdn']
p.avps = {}
# Use remaining data from ipplan to populate comment field.
if 'additional' in host:
p.comment = host['additional']
# Use specific info to create extra attributes.
if len(host['location']) > 0:
p.avps['location'] = host['location']
if len(host['mac']) > 0:
p.avps['mac'] = host['mac']
if len(host['phone']) > 0:
p.avps['phone'] = host['phone']
if len(host['user']) > 0:
p.avps['user'] = host['user']
return p | Put your host information in the prefix object. | Below is the the instruction that describes the task:
### Input:
Put your host information in the prefix object.
### Response:
def add_host(host):
""" Put your host information in the prefix object. """
p = new_prefix()
p.prefix = str(host['ipaddr'])
p.type = "host"
p.description = host['description']
p.node = host['fqdn']
p.avps = {}
# Use remaining data from ipplan to populate comment field.
if 'additional' in host:
p.comment = host['additional']
# Use specific info to create extra attributes.
if len(host['location']) > 0:
p.avps['location'] = host['location']
if len(host['mac']) > 0:
p.avps['mac'] = host['mac']
if len(host['phone']) > 0:
p.avps['phone'] = host['phone']
if len(host['user']) > 0:
p.avps['user'] = host['user']
return p |
def build_vocab(self, *args, **kwargs):
"""Construct the Vocab object for this field from one or more datasets.
Arguments:
Positional arguments: Dataset objects or other iterable data
sources from which to construct the Vocab object that
represents the set of possible values for this field. If
a Dataset object is provided, all columns corresponding
to this field are used; individual columns can also be
provided directly.
Remaining keyword arguments: Passed to the constructor of Vocab.
"""
counter = Counter()
sources = []
for arg in args:
if isinstance(arg, Dataset):
sources += [getattr(arg, name) for name, field in
arg.fields.items() if field is self]
else:
sources.append(arg)
for data in sources:
for x in data:
if not self.sequential:
x = [x]
try:
counter.update(x)
except TypeError:
counter.update(chain.from_iterable(x))
specials = list(OrderedDict.fromkeys(
tok for tok in [self.unk_token, self.pad_token, self.init_token,
self.eos_token] + kwargs.pop('specials', [])
if tok is not None))
self.vocab = self.vocab_cls(counter, specials=specials, **kwargs) | Construct the Vocab object for this field from one or more datasets.
Arguments:
Positional arguments: Dataset objects or other iterable data
sources from which to construct the Vocab object that
represents the set of possible values for this field. If
a Dataset object is provided, all columns corresponding
to this field are used; individual columns can also be
provided directly.
Remaining keyword arguments: Passed to the constructor of Vocab. | Below is the the instruction that describes the task:
### Input:
Construct the Vocab object for this field from one or more datasets.
Arguments:
Positional arguments: Dataset objects or other iterable data
sources from which to construct the Vocab object that
represents the set of possible values for this field. If
a Dataset object is provided, all columns corresponding
to this field are used; individual columns can also be
provided directly.
Remaining keyword arguments: Passed to the constructor of Vocab.
### Response:
def build_vocab(self, *args, **kwargs):
"""Construct the Vocab object for this field from one or more datasets.
Arguments:
Positional arguments: Dataset objects or other iterable data
sources from which to construct the Vocab object that
represents the set of possible values for this field. If
a Dataset object is provided, all columns corresponding
to this field are used; individual columns can also be
provided directly.
Remaining keyword arguments: Passed to the constructor of Vocab.
"""
counter = Counter()
sources = []
for arg in args:
if isinstance(arg, Dataset):
sources += [getattr(arg, name) for name, field in
arg.fields.items() if field is self]
else:
sources.append(arg)
for data in sources:
for x in data:
if not self.sequential:
x = [x]
try:
counter.update(x)
except TypeError:
counter.update(chain.from_iterable(x))
specials = list(OrderedDict.fromkeys(
tok for tok in [self.unk_token, self.pad_token, self.init_token,
self.eos_token] + kwargs.pop('specials', [])
if tok is not None))
self.vocab = self.vocab_cls(counter, specials=specials, **kwargs) |
def update_asset_ddo(self, did, ddo):
"""
Update the ddo of a did already registered.
:param did: Asset DID string
:param ddo: DDO instance
:return: API response (depends on implementation)
"""
response = self.requests_session.put(f'{self.url}/{did}', data=ddo.as_text(),
headers=self._headers)
if response.status_code == 200 or response.status_code == 201:
return json.loads(response.content)
else:
raise Exception(f'Unable to update DDO: {response.content}') | Update the ddo of a did already registered.
:param did: Asset DID string
:param ddo: DDO instance
:return: API response (depends on implementation) | Below is the the instruction that describes the task:
### Input:
Update the ddo of a did already registered.
:param did: Asset DID string
:param ddo: DDO instance
:return: API response (depends on implementation)
### Response:
def update_asset_ddo(self, did, ddo):
"""
Update the ddo of a did already registered.
:param did: Asset DID string
:param ddo: DDO instance
:return: API response (depends on implementation)
"""
response = self.requests_session.put(f'{self.url}/{did}', data=ddo.as_text(),
headers=self._headers)
if response.status_code == 200 or response.status_code == 201:
return json.loads(response.content)
else:
raise Exception(f'Unable to update DDO: {response.content}') |
def create_group(self, name):
"""Create a new group.
Args:
name (string): Name of the group to create.
Raises:
requests.HTTPError on failure.
"""
self.service.create_group(
name, self.url_prefix, self.auth, self.session,
self.session_send_opts) | Create a new group.
Args:
name (string): Name of the group to create.
Raises:
requests.HTTPError on failure. | Below is the the instruction that describes the task:
### Input:
Create a new group.
Args:
name (string): Name of the group to create.
Raises:
requests.HTTPError on failure.
### Response:
def create_group(self, name):
"""Create a new group.
Args:
name (string): Name of the group to create.
Raises:
requests.HTTPError on failure.
"""
self.service.create_group(
name, self.url_prefix, self.auth, self.session,
self.session_send_opts) |
def handle_relative(self, event):
"""Relative mouse movement."""
delta_x, delta_y = self._get_relative(event)
if delta_x:
self.events.append(
self.emulate_rel(0x00,
delta_x,
self.timeval))
if delta_y:
self.events.append(
self.emulate_rel(0x01,
delta_y,
self.timeval)) | Relative mouse movement. | Below is the the instruction that describes the task:
### Input:
Relative mouse movement.
### Response:
def handle_relative(self, event):
"""Relative mouse movement."""
delta_x, delta_y = self._get_relative(event)
if delta_x:
self.events.append(
self.emulate_rel(0x00,
delta_x,
self.timeval))
if delta_y:
self.events.append(
self.emulate_rel(0x01,
delta_y,
self.timeval)) |
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
pretty = '{}namespace {}:\n'.format(spaces, self.name)
pretty += '\n\n'.join(c.pretty_str(indent + 2) for c in self.children)
return pretty | Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation. | Below is the the instruction that describes the task:
### Input:
Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
### Response:
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
pretty = '{}namespace {}:\n'.format(spaces, self.name)
pretty += '\n\n'.join(c.pretty_str(indent + 2) for c in self.children)
return pretty |
def _parse_proc_pid_cgroup(content):
"""
Parse a /proc/*/cgroup file into tuples of (subsystem,cgroup).
@param content: An iterable over the lines of the file.
@return: a generator of tuples
"""
for ownCgroup in content:
#each line is "id:subsystem,subsystem:path"
ownCgroup = ownCgroup.strip().split(':')
try:
path = ownCgroup[2][1:] # remove leading /
except IndexError:
raise IndexError("index out of range for " + str(ownCgroup))
for subsystem in ownCgroup[1].split(','):
yield (subsystem, path) | Parse a /proc/*/cgroup file into tuples of (subsystem,cgroup).
@param content: An iterable over the lines of the file.
@return: a generator of tuples | Below is the the instruction that describes the task:
### Input:
Parse a /proc/*/cgroup file into tuples of (subsystem,cgroup).
@param content: An iterable over the lines of the file.
@return: a generator of tuples
### Response:
def _parse_proc_pid_cgroup(content):
"""
Parse a /proc/*/cgroup file into tuples of (subsystem,cgroup).
@param content: An iterable over the lines of the file.
@return: a generator of tuples
"""
for ownCgroup in content:
#each line is "id:subsystem,subsystem:path"
ownCgroup = ownCgroup.strip().split(':')
try:
path = ownCgroup[2][1:] # remove leading /
except IndexError:
raise IndexError("index out of range for " + str(ownCgroup))
for subsystem in ownCgroup[1].split(','):
yield (subsystem, path) |
def find(file_node, dirs=ICON_DIRS, default_name=None, file_ext='.png'):
"""
Iterating all icon dirs, try to find a file called like the node's
extension / mime subtype / mime type (in that order).
For instance, for an MP3 file ("audio/mpeg"), this would look for:
"mp3.png" / "audio/mpeg.png" / "audio.png"
"""
names = []
for attr_name in ('extension', 'mimetype', 'mime_supertype'):
attr = getattr(file_node, attr_name)
if attr:
names.append(attr)
if default_name:
names.append(default_name)
icon_path = StaticPathFinder.find(names, dirs, file_ext)
if icon_path:
return StaticIconFile(file_node, icon_path) | Iterating all icon dirs, try to find a file called like the node's
extension / mime subtype / mime type (in that order).
For instance, for an MP3 file ("audio/mpeg"), this would look for:
"mp3.png" / "audio/mpeg.png" / "audio.png" | Below is the the instruction that describes the task:
### Input:
Iterating all icon dirs, try to find a file called like the node's
extension / mime subtype / mime type (in that order).
For instance, for an MP3 file ("audio/mpeg"), this would look for:
"mp3.png" / "audio/mpeg.png" / "audio.png"
### Response:
def find(file_node, dirs=ICON_DIRS, default_name=None, file_ext='.png'):
"""
Iterating all icon dirs, try to find a file called like the node's
extension / mime subtype / mime type (in that order).
For instance, for an MP3 file ("audio/mpeg"), this would look for:
"mp3.png" / "audio/mpeg.png" / "audio.png"
"""
names = []
for attr_name in ('extension', 'mimetype', 'mime_supertype'):
attr = getattr(file_node, attr_name)
if attr:
names.append(attr)
if default_name:
names.append(default_name)
icon_path = StaticPathFinder.find(names, dirs, file_ext)
if icon_path:
return StaticIconFile(file_node, icon_path) |
def lookup_field_class(self, field, obj=None, default=None):
"""
Looks up any additional class we should include when rendering this field
"""
css = ""
# is there a class specified for this field
if field in self.field_config and 'class' in self.field_config[field]:
css = self.field_config[field]['class']
# if we were given a default, use that
elif default:
css = default
return css | Looks up any additional class we should include when rendering this field | Below is the the instruction that describes the task:
### Input:
Looks up any additional class we should include when rendering this field
### Response:
def lookup_field_class(self, field, obj=None, default=None):
"""
Looks up any additional class we should include when rendering this field
"""
css = ""
# is there a class specified for this field
if field in self.field_config and 'class' in self.field_config[field]:
css = self.field_config[field]['class']
# if we were given a default, use that
elif default:
css = default
return css |
def decode_timeseries_row(self, tsrow, tscols=None,
convert_timestamp=False):
"""
Decodes a TsRow into a list
:param tsrow: the protobuf TsRow to decode.
:type tsrow: riak.pb.riak_ts_pb2.TsRow
:param tscols: the protobuf TsColumn data to help decode.
:type tscols: list
:rtype list
"""
row = []
for i, cell in enumerate(tsrow.cells):
col = None
if tscols is not None:
col = tscols[i]
if cell.HasField('varchar_value'):
if col and not (col.type == TsColumnType.Value('VARCHAR') or
col.type == TsColumnType.Value('BLOB')):
raise TypeError('expected VARCHAR or BLOB column')
else:
row.append(cell.varchar_value)
elif cell.HasField('sint64_value'):
if col and col.type != TsColumnType.Value('SINT64'):
raise TypeError('expected SINT64 column')
else:
row.append(cell.sint64_value)
elif cell.HasField('double_value'):
if col and col.type != TsColumnType.Value('DOUBLE'):
raise TypeError('expected DOUBLE column')
else:
row.append(cell.double_value)
elif cell.HasField('timestamp_value'):
if col and col.type != TsColumnType.Value('TIMESTAMP'):
raise TypeError('expected TIMESTAMP column')
else:
dt = cell.timestamp_value
if convert_timestamp:
dt = datetime_from_unix_time_millis(
cell.timestamp_value)
row.append(dt)
elif cell.HasField('boolean_value'):
if col and col.type != TsColumnType.Value('BOOLEAN'):
raise TypeError('expected BOOLEAN column')
else:
row.append(cell.boolean_value)
else:
row.append(None)
return row | Decodes a TsRow into a list
:param tsrow: the protobuf TsRow to decode.
:type tsrow: riak.pb.riak_ts_pb2.TsRow
:param tscols: the protobuf TsColumn data to help decode.
:type tscols: list
:rtype list | Below is the the instruction that describes the task:
### Input:
Decodes a TsRow into a list
:param tsrow: the protobuf TsRow to decode.
:type tsrow: riak.pb.riak_ts_pb2.TsRow
:param tscols: the protobuf TsColumn data to help decode.
:type tscols: list
:rtype list
### Response:
def decode_timeseries_row(self, tsrow, tscols=None,
convert_timestamp=False):
"""
Decodes a TsRow into a list
:param tsrow: the protobuf TsRow to decode.
:type tsrow: riak.pb.riak_ts_pb2.TsRow
:param tscols: the protobuf TsColumn data to help decode.
:type tscols: list
:rtype list
"""
row = []
for i, cell in enumerate(tsrow.cells):
col = None
if tscols is not None:
col = tscols[i]
if cell.HasField('varchar_value'):
if col and not (col.type == TsColumnType.Value('VARCHAR') or
col.type == TsColumnType.Value('BLOB')):
raise TypeError('expected VARCHAR or BLOB column')
else:
row.append(cell.varchar_value)
elif cell.HasField('sint64_value'):
if col and col.type != TsColumnType.Value('SINT64'):
raise TypeError('expected SINT64 column')
else:
row.append(cell.sint64_value)
elif cell.HasField('double_value'):
if col and col.type != TsColumnType.Value('DOUBLE'):
raise TypeError('expected DOUBLE column')
else:
row.append(cell.double_value)
elif cell.HasField('timestamp_value'):
if col and col.type != TsColumnType.Value('TIMESTAMP'):
raise TypeError('expected TIMESTAMP column')
else:
dt = cell.timestamp_value
if convert_timestamp:
dt = datetime_from_unix_time_millis(
cell.timestamp_value)
row.append(dt)
elif cell.HasField('boolean_value'):
if col and col.type != TsColumnType.Value('BOOLEAN'):
raise TypeError('expected BOOLEAN column')
else:
row.append(cell.boolean_value)
else:
row.append(None)
return row |
def _parse_total_magnetization(line, lines):
"""Parse the total magnetization, which is somewhat hidden"""
toks = line.split()
res = {"number of electrons": float(toks[3])}
if len(toks) > 5:
res["total magnetization"] = float(toks[5])
return res | Parse the total magnetization, which is somewhat hidden | Below is the the instruction that describes the task:
### Input:
Parse the total magnetization, which is somewhat hidden
### Response:
def _parse_total_magnetization(line, lines):
"""Parse the total magnetization, which is somewhat hidden"""
toks = line.split()
res = {"number of electrons": float(toks[3])}
if len(toks) > 5:
res["total magnetization"] = float(toks[5])
return res |
def get_labels(self, depth=None):
"""
Returns a list with a copy of the labels in this cell.
Parameters
----------
depth : integer or ``None``
If not ``None``, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of ``Label``
List containing the labels in this cell and its references.
"""
labels = libCopy.deepcopy(self.labels)
if depth is None or depth > 0:
for element in self.elements:
if isinstance(element, CellReference):
labels.extend(
element.get_labels(None if depth is None else depth -
1))
elif isinstance(element, CellArray):
labels.extend(
element.get_labels(None if depth is None else depth -
1))
return labels | Returns a list with a copy of the labels in this cell.
Parameters
----------
depth : integer or ``None``
If not ``None``, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of ``Label``
List containing the labels in this cell and its references. | Below is the the instruction that describes the task:
### Input:
Returns a list with a copy of the labels in this cell.
Parameters
----------
depth : integer or ``None``
If not ``None``, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of ``Label``
List containing the labels in this cell and its references.
### Response:
def get_labels(self, depth=None):
"""
Returns a list with a copy of the labels in this cell.
Parameters
----------
depth : integer or ``None``
If not ``None``, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of ``Label``
List containing the labels in this cell and its references.
"""
labels = libCopy.deepcopy(self.labels)
if depth is None or depth > 0:
for element in self.elements:
if isinstance(element, CellReference):
labels.extend(
element.get_labels(None if depth is None else depth -
1))
elif isinstance(element, CellArray):
labels.extend(
element.get_labels(None if depth is None else depth -
1))
return labels |
def compare(array, other, op, ty_str):
"""
Performs passed-in comparison op between every element in the passed-in
array and other, and returns an array of booleans.
Args:
array (WeldObject / Numpy.ndarray): Input array
other (WeldObject / Numpy.ndarray): Second input array
op (str): Op string used for element-wise comparison (== >= <= !=)
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
# Strings need to be encoded into vec[char] array.
# Constants can be added directly to NVL snippet.
if isinstance(other, str) or isinstance(other, WeldObject):
other_var = weld_obj.update(other)
if isinstance(other, WeldObject):
other_var = tmp.obj_id
weld_obj.dependencies[other_var] = other
else:
other_var = "%s(%s)" % (ty_str, str(other))
weld_template = """
map(
%(array)s,
|a: %(ty)s| a %(op)s %(other)s
)
"""
weld_obj.weld_code = weld_template % {"array": array_var,
"other": other_var,
"op": op, "ty": ty_str}
return weld_obj | Performs passed-in comparison op between every element in the passed-in
array and other, and returns an array of booleans.
Args:
array (WeldObject / Numpy.ndarray): Input array
other (WeldObject / Numpy.ndarray): Second input array
op (str): Op string used for element-wise comparison (== >= <= !=)
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation | Below is the the instruction that describes the task:
### Input:
Performs passed-in comparison op between every element in the passed-in
array and other, and returns an array of booleans.
Args:
array (WeldObject / Numpy.ndarray): Input array
other (WeldObject / Numpy.ndarray): Second input array
op (str): Op string used for element-wise comparison (== >= <= !=)
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
### Response:
def compare(array, other, op, ty_str):
"""
Performs passed-in comparison op between every element in the passed-in
array and other, and returns an array of booleans.
Args:
array (WeldObject / Numpy.ndarray): Input array
other (WeldObject / Numpy.ndarray): Second input array
op (str): Op string used for element-wise comparison (== >= <= !=)
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
# Strings need to be encoded into vec[char] array.
# Constants can be added directly to NVL snippet.
if isinstance(other, str) or isinstance(other, WeldObject):
other_var = weld_obj.update(other)
if isinstance(other, WeldObject):
other_var = tmp.obj_id
weld_obj.dependencies[other_var] = other
else:
other_var = "%s(%s)" % (ty_str, str(other))
weld_template = """
map(
%(array)s,
|a: %(ty)s| a %(op)s %(other)s
)
"""
weld_obj.weld_code = weld_template % {"array": array_var,
"other": other_var,
"op": op, "ty": ty_str}
return weld_obj |
def mean_length(infile, limit=None):
'''Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences, use limit=N'''
total = 0
count = 0
seq_reader = sequences.file_reader(infile)
for seq in seq_reader:
total += len(seq)
count += 1
if limit is not None and count >= limit:
break
assert count > 0
return total / count | Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences, use limit=N | Below is the the instruction that describes the task:
### Input:
Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences, use limit=N
### Response:
def mean_length(infile, limit=None):
'''Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences, use limit=N'''
total = 0
count = 0
seq_reader = sequences.file_reader(infile)
for seq in seq_reader:
total += len(seq)
count += 1
if limit is not None and count >= limit:
break
assert count > 0
return total / count |
def get_file_extension(file_path):
"""
>>> get_file_extension("/a/b/c")
''
>>> get_file_extension("/a/b.txt")
'txt'
>>> get_file_extension("/a/b/c.tar.xz")
'xz'
"""
_ext = os.path.splitext(file_path)[-1]
if _ext:
return _ext[1:] if _ext.startswith('.') else _ext
return "" | >>> get_file_extension("/a/b/c")
''
>>> get_file_extension("/a/b.txt")
'txt'
>>> get_file_extension("/a/b/c.tar.xz")
'xz' | Below is the the instruction that describes the task:
### Input:
>>> get_file_extension("/a/b/c")
''
>>> get_file_extension("/a/b.txt")
'txt'
>>> get_file_extension("/a/b/c.tar.xz")
'xz'
### Response:
def get_file_extension(file_path):
"""
>>> get_file_extension("/a/b/c")
''
>>> get_file_extension("/a/b.txt")
'txt'
>>> get_file_extension("/a/b/c.tar.xz")
'xz'
"""
_ext = os.path.splitext(file_path)[-1]
if _ext:
return _ext[1:] if _ext.startswith('.') else _ext
return "" |
def p_jsonpath_named_operator(self, p):
"jsonpath : NAMED_OPERATOR"
if p[1] == 'this':
p[0] = This()
elif p[1] == 'parent':
p[0] = Parent()
else:
raise Exception('Unknown named operator `%s` at %s:%s' % (p[1], p.lineno(1), p.lexpos(1))) | jsonpath : NAMED_OPERATOR | Below is the the instruction that describes the task:
### Input:
jsonpath : NAMED_OPERATOR
### Response:
def p_jsonpath_named_operator(self, p):
"jsonpath : NAMED_OPERATOR"
if p[1] == 'this':
p[0] = This()
elif p[1] == 'parent':
p[0] = Parent()
else:
raise Exception('Unknown named operator `%s` at %s:%s' % (p[1], p.lineno(1), p.lexpos(1))) |
def sim(self, src, tar):
"""Return the prefix similarity of two strings.
Prefix similarity is the ratio of the length of the shorter term that
exactly matches the longer term to the length of the shorter term,
beginning at the start of both terms.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Prefix similarity
Examples
--------
>>> cmp = Prefix()
>>> cmp.sim('cat', 'hat')
0.0
>>> cmp.sim('Niall', 'Neil')
0.25
>>> cmp.sim('aluminum', 'Catalan')
0.0
>>> cmp.sim('ATCG', 'TAGC')
0.0
"""
if src == tar:
return 1.0
if not src or not tar:
return 0.0
min_word, max_word = (src, tar) if len(src) < len(tar) else (tar, src)
min_len = len(min_word)
for i in range(min_len, 0, -1):
if min_word[:i] == max_word[:i]:
return i / min_len
return 0.0 | Return the prefix similarity of two strings.
Prefix similarity is the ratio of the length of the shorter term that
exactly matches the longer term to the length of the shorter term,
beginning at the start of both terms.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Prefix similarity
Examples
--------
>>> cmp = Prefix()
>>> cmp.sim('cat', 'hat')
0.0
>>> cmp.sim('Niall', 'Neil')
0.25
>>> cmp.sim('aluminum', 'Catalan')
0.0
>>> cmp.sim('ATCG', 'TAGC')
0.0 | Below is the the instruction that describes the task:
### Input:
Return the prefix similarity of two strings.
Prefix similarity is the ratio of the length of the shorter term that
exactly matches the longer term to the length of the shorter term,
beginning at the start of both terms.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Prefix similarity
Examples
--------
>>> cmp = Prefix()
>>> cmp.sim('cat', 'hat')
0.0
>>> cmp.sim('Niall', 'Neil')
0.25
>>> cmp.sim('aluminum', 'Catalan')
0.0
>>> cmp.sim('ATCG', 'TAGC')
0.0
### Response:
def sim(self, src, tar):
"""Return the prefix similarity of two strings.
Prefix similarity is the ratio of the length of the shorter term that
exactly matches the longer term to the length of the shorter term,
beginning at the start of both terms.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Prefix similarity
Examples
--------
>>> cmp = Prefix()
>>> cmp.sim('cat', 'hat')
0.0
>>> cmp.sim('Niall', 'Neil')
0.25
>>> cmp.sim('aluminum', 'Catalan')
0.0
>>> cmp.sim('ATCG', 'TAGC')
0.0
"""
if src == tar:
return 1.0
if not src or not tar:
return 0.0
min_word, max_word = (src, tar) if len(src) < len(tar) else (tar, src)
min_len = len(min_word)
for i in range(min_len, 0, -1):
if min_word[:i] == max_word[:i]:
return i / min_len
return 0.0 |
def delete_firewall_rule(self, server_uuid, firewall_rule_position):
"""
Delete a firewall rule based on a server uuid and rule position.
"""
url = '/server/{0}/firewall_rule/{1}'.format(server_uuid, firewall_rule_position)
return self.request('DELETE', url) | Delete a firewall rule based on a server uuid and rule position. | Below is the the instruction that describes the task:
### Input:
Delete a firewall rule based on a server uuid and rule position.
### Response:
def delete_firewall_rule(self, server_uuid, firewall_rule_position):
"""
Delete a firewall rule based on a server uuid and rule position.
"""
url = '/server/{0}/firewall_rule/{1}'.format(server_uuid, firewall_rule_position)
return self.request('DELETE', url) |
def executable_path(conn, executable):
"""
Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found.
"""
executable_path = conn.remote_module.which(executable)
if not executable_path:
raise ExecutableNotFound(executable, conn.hostname)
return executable_path | Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found. | Below is the the instruction that describes the task:
### Input:
Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found.
### Response:
def executable_path(conn, executable):
"""
Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found.
"""
executable_path = conn.remote_module.which(executable)
if not executable_path:
raise ExecutableNotFound(executable, conn.hostname)
return executable_path |
def buffer_typechecks_and_display(self, call_id, payload):
"""Adds typecheck events to the buffer, and displays them right away.
This is a workaround for this issue:
https://github.com/ensime/ensime-server/issues/1616
"""
self.buffer_typechecks(call_id, payload)
self.editor.display_notes(self.buffered_notes) | Adds typecheck events to the buffer, and displays them right away.
This is a workaround for this issue:
https://github.com/ensime/ensime-server/issues/1616 | Below is the the instruction that describes the task:
### Input:
Adds typecheck events to the buffer, and displays them right away.
This is a workaround for this issue:
https://github.com/ensime/ensime-server/issues/1616
### Response:
def buffer_typechecks_and_display(self, call_id, payload):
"""Adds typecheck events to the buffer, and displays them right away.
This is a workaround for this issue:
https://github.com/ensime/ensime-server/issues/1616
"""
self.buffer_typechecks(call_id, payload)
self.editor.display_notes(self.buffered_notes) |
def setCurrentAction(self, action):
"""
Sets the current action for this widget that highlights the size
for this toolbar.
:param action | <QAction>
"""
if action == self._currentAction:
return
self._currentAction = action
self.currentActionChanged.emit(action)
labels = self.actionLabels()
anim_grp = QParallelAnimationGroup(self)
max_size = self.maximumPixmapSize()
min_size = self.minimumPixmapSize()
if action:
label = self.labelForAction(action)
index = labels.index(label)
# create the highlight effect
palette = self.palette()
effect = QGraphicsDropShadowEffect(label)
effect.setXOffset(0)
effect.setYOffset(0)
effect.setBlurRadius(20)
effect.setColor(QColor(40, 40, 40))
label.setGraphicsEffect(effect)
offset = self.padding()
if self.position() in (XDockToolbar.Position.East,
XDockToolbar.Position.West):
self.resize(max_size.width() + offset, self.height())
elif self.position() in (XDockToolbar.Position.North,
XDockToolbar.Position.South):
self.resize(self.width(), max_size.height() + offset)
w = max_size.width()
h = max_size.height()
dw = (max_size.width() - min_size.width()) / 3
dh = (max_size.height() - min_size.height()) / 3
for i in range(4):
before = index - i
after = index + i
if 0 <= before and before < len(labels):
anim = XObjectAnimation(labels[before],
'setPixmapSize',
anim_grp)
anim.setEasingCurve(self.easingCurve())
anim.setStartValue(labels[before].pixmapSize())
anim.setEndValue(QSize(w, h))
anim.setDuration(self.duration())
anim_grp.addAnimation(anim)
if i:
labels[before].setGraphicsEffect(None)
if after != before and 0 <= after and after < len(labels):
anim = XObjectAnimation(labels[after],
'setPixmapSize',
anim_grp)
anim.setEasingCurve(self.easingCurve())
anim.setStartValue(labels[after].pixmapSize())
anim.setEndValue(QSize(w, h))
anim.setDuration(self.duration())
anim_grp.addAnimation(anim)
if i:
labels[after].setGraphicsEffect(None)
w -= dw
h -= dh
else:
offset = self.padding()
for label in self.actionLabels():
# clear the graphics effect
label.setGraphicsEffect(None)
# create the animation
anim = XObjectAnimation(label, 'setPixmapSize', self)
anim.setEasingCurve(self.easingCurve())
anim.setStartValue(label.pixmapSize())
anim.setEndValue(min_size)
anim.setDuration(self.duration())
anim_grp.addAnimation(anim)
anim_grp.finished.connect(self.resizeToMinimum)
anim_grp.start()
self._animating = True
anim_grp.finished.connect(anim_grp.deleteLater)
anim_grp.finished.connect(self.__markAnimatingFinished)
if self._currentAction:
self._hoverTimer.start()
else:
self._hoverTimer.stop() | Sets the current action for this widget that highlights the size
for this toolbar.
:param action | <QAction> | Below is the the instruction that describes the task:
### Input:
Sets the current action for this widget that highlights the size
for this toolbar.
:param action | <QAction>
### Response:
def setCurrentAction(self, action):
"""
Sets the current action for this widget that highlights the size
for this toolbar.
:param action | <QAction>
"""
if action == self._currentAction:
return
self._currentAction = action
self.currentActionChanged.emit(action)
labels = self.actionLabels()
anim_grp = QParallelAnimationGroup(self)
max_size = self.maximumPixmapSize()
min_size = self.minimumPixmapSize()
if action:
label = self.labelForAction(action)
index = labels.index(label)
# create the highlight effect
palette = self.palette()
effect = QGraphicsDropShadowEffect(label)
effect.setXOffset(0)
effect.setYOffset(0)
effect.setBlurRadius(20)
effect.setColor(QColor(40, 40, 40))
label.setGraphicsEffect(effect)
offset = self.padding()
if self.position() in (XDockToolbar.Position.East,
XDockToolbar.Position.West):
self.resize(max_size.width() + offset, self.height())
elif self.position() in (XDockToolbar.Position.North,
XDockToolbar.Position.South):
self.resize(self.width(), max_size.height() + offset)
w = max_size.width()
h = max_size.height()
dw = (max_size.width() - min_size.width()) / 3
dh = (max_size.height() - min_size.height()) / 3
for i in range(4):
before = index - i
after = index + i
if 0 <= before and before < len(labels):
anim = XObjectAnimation(labels[before],
'setPixmapSize',
anim_grp)
anim.setEasingCurve(self.easingCurve())
anim.setStartValue(labels[before].pixmapSize())
anim.setEndValue(QSize(w, h))
anim.setDuration(self.duration())
anim_grp.addAnimation(anim)
if i:
labels[before].setGraphicsEffect(None)
if after != before and 0 <= after and after < len(labels):
anim = XObjectAnimation(labels[after],
'setPixmapSize',
anim_grp)
anim.setEasingCurve(self.easingCurve())
anim.setStartValue(labels[after].pixmapSize())
anim.setEndValue(QSize(w, h))
anim.setDuration(self.duration())
anim_grp.addAnimation(anim)
if i:
labels[after].setGraphicsEffect(None)
w -= dw
h -= dh
else:
offset = self.padding()
for label in self.actionLabels():
# clear the graphics effect
label.setGraphicsEffect(None)
# create the animation
anim = XObjectAnimation(label, 'setPixmapSize', self)
anim.setEasingCurve(self.easingCurve())
anim.setStartValue(label.pixmapSize())
anim.setEndValue(min_size)
anim.setDuration(self.duration())
anim_grp.addAnimation(anim)
anim_grp.finished.connect(self.resizeToMinimum)
anim_grp.start()
self._animating = True
anim_grp.finished.connect(anim_grp.deleteLater)
anim_grp.finished.connect(self.__markAnimatingFinished)
if self._currentAction:
self._hoverTimer.start()
else:
self._hoverTimer.stop() |
def _get_all(self, *args, **kwargs):
"""If 'force' is in the headers, retrieve the list of keys from S3.
Otherwise, use the list() function to retrieve the keys from MimicDB.
"""
headers = kwargs.get('headers', args[2] if len(args) > 2 else None) or dict()
if 'force' in headers:
keys = super(Bucket, self)._get_all(*args, **kwargs)
for key in keys:
mimicdb.backend.sadd(tpl.bucket % self.name, key.name)
mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('"')))
key.name = key.name
return keys
prefix = kwargs.get('prefix', '')
return list(self.list(prefix=prefix)) | If 'force' is in the headers, retrieve the list of keys from S3.
Otherwise, use the list() function to retrieve the keys from MimicDB. | Below is the the instruction that describes the task:
### Input:
If 'force' is in the headers, retrieve the list of keys from S3.
Otherwise, use the list() function to retrieve the keys from MimicDB.
### Response:
def _get_all(self, *args, **kwargs):
"""If 'force' is in the headers, retrieve the list of keys from S3.
Otherwise, use the list() function to retrieve the keys from MimicDB.
"""
headers = kwargs.get('headers', args[2] if len(args) > 2 else None) or dict()
if 'force' in headers:
keys = super(Bucket, self)._get_all(*args, **kwargs)
for key in keys:
mimicdb.backend.sadd(tpl.bucket % self.name, key.name)
mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('"')))
key.name = key.name
return keys
prefix = kwargs.get('prefix', '')
return list(self.list(prefix=prefix)) |
def price_rounding(price, decimals=2):
"""Takes a decimal price and rounds to a number of decimal places"""
try:
exponent = D('.' + decimals * '0')
except InvalidOperation:
# Currencies with no decimal places, ex. JPY, HUF
exponent = D()
return price.quantize(exponent, rounding=ROUND_UP) | Takes a decimal price and rounds to a number of decimal places | Below is the the instruction that describes the task:
### Input:
Takes a decimal price and rounds to a number of decimal places
### Response:
def price_rounding(price, decimals=2):
"""Takes a decimal price and rounds to a number of decimal places"""
try:
exponent = D('.' + decimals * '0')
except InvalidOperation:
# Currencies with no decimal places, ex. JPY, HUF
exponent = D()
return price.quantize(exponent, rounding=ROUND_UP) |
def double_width(self, action):
'''Enable/cancel doublewidth printing
Args:
action: Enable or disable doublewidth printing. Options are 'on' and 'off'
Returns:
None
Raises:
RuntimeError: Invalid action.
'''
if action == 'on':
action = '1'
elif action == 'off':
action = '0'
else:
raise RuntimeError('Invalid action for function doubleWidth. Options are on and off')
self.send(chr(27)+'W'+action) | Enable/cancel doublewidth printing
Args:
action: Enable or disable doublewidth printing. Options are 'on' and 'off'
Returns:
None
Raises:
RuntimeError: Invalid action. | Below is the the instruction that describes the task:
### Input:
Enable/cancel doublewidth printing
Args:
action: Enable or disable doublewidth printing. Options are 'on' and 'off'
Returns:
None
Raises:
RuntimeError: Invalid action.
### Response:
def double_width(self, action):
'''Enable/cancel doublewidth printing
Args:
action: Enable or disable doublewidth printing. Options are 'on' and 'off'
Returns:
None
Raises:
RuntimeError: Invalid action.
'''
if action == 'on':
action = '1'
elif action == 'off':
action = '0'
else:
raise RuntimeError('Invalid action for function doubleWidth. Options are on and off')
self.send(chr(27)+'W'+action) |
def main(input_filename, songname, format, counter):
"""
Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file
"""
# open the file & convert to wav
song_data = AudioSegment.from_file(input_filename, format=format)
song_data = song_data.set_channels(1) # convert to mono
wav_tmp = song_data.export(format="wav") # write to a tmp file buffer
wav_tmp.seek(0)
rate, wav_data = wavfile.read(wav_tmp)
# extract peaks and compute constellation hashes & offsets
peaks = resound.get_peaks(np.array(wav_data))
fingerprints = list(resound.hashes(peaks)) # hash, offset pairs
if not fingerprints:
raise RuntimeError("No fingerprints detected in source file - check your parameters passed to Resound.")
# Combine duplicate keys
for fp, abs_offset in fingerprints:
counter[fp].append((abs_offset, songname))
print " Identified {} keypoints in '{}'.".format(len(counter), songname)
return counter | Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file | Below is the the instruction that describes the task:
### Input:
Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file
### Response:
def main(input_filename, songname, format, counter):
"""
Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file
"""
# open the file & convert to wav
song_data = AudioSegment.from_file(input_filename, format=format)
song_data = song_data.set_channels(1) # convert to mono
wav_tmp = song_data.export(format="wav") # write to a tmp file buffer
wav_tmp.seek(0)
rate, wav_data = wavfile.read(wav_tmp)
# extract peaks and compute constellation hashes & offsets
peaks = resound.get_peaks(np.array(wav_data))
fingerprints = list(resound.hashes(peaks)) # hash, offset pairs
if not fingerprints:
raise RuntimeError("No fingerprints detected in source file - check your parameters passed to Resound.")
# Combine duplicate keys
for fp, abs_offset in fingerprints:
counter[fp].append((abs_offset, songname))
print " Identified {} keypoints in '{}'.".format(len(counter), songname)
return counter |
def control(self, on=[], off=[]):
"""
This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan")
"""
controls = {"light", "valve", "fan", "pump"}
def cast_arg(arg):
if type(arg) is str:
if arg == "all":
return controls
else:
return {arg} & controls
else:
return set(arg) & controls
# User has requested individual controls.
for item in cast_arg(on):
self.manage(item, "on")
for item in cast_arg(off):
self.manage(item, "off")
sleep(.01) # Force delay to throttle requests
return self.update() | This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan") | Below is the the instruction that describes the task:
### Input:
This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan")
### Response:
def control(self, on=[], off=[]):
"""
This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan")
"""
controls = {"light", "valve", "fan", "pump"}
def cast_arg(arg):
if type(arg) is str:
if arg == "all":
return controls
else:
return {arg} & controls
else:
return set(arg) & controls
# User has requested individual controls.
for item in cast_arg(on):
self.manage(item, "on")
for item in cast_arg(off):
self.manage(item, "off")
sleep(.01) # Force delay to throttle requests
return self.update() |
def build(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
):
"""Builds the file bundle.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Get the absolute path to the output directory and create it if it doesn't
# already exist.
dist_directory = cfg.get('dist_directory', 'dist')
path_to_dist = os.path.join(src, dist_directory)
mkdir(path_to_dist)
# Combine the name of the Lambda function with the current timestamp to use
# for the output filename.
function_name = cfg.get('function_name')
output_filename = '{0}-{1}.zip'.format(timestamp(), function_name)
path_to_temp = mkdtemp(prefix='aws-lambda')
pip_install_to_target(
path_to_temp,
requirements=requirements,
local_package=local_package,
)
# Hack for Zope.
if 'zope' in os.listdir(path_to_temp):
print(
'Zope packages detected; fixing Zope package paths to '
'make them importable.',
)
# Touch.
with open(os.path.join(path_to_temp, 'zope/__init__.py'), 'wb'):
pass
# Gracefully handle whether ".zip" was included in the filename or not.
output_filename = (
'{0}.zip'.format(output_filename)
if not output_filename.endswith('.zip')
else output_filename
)
# Allow definition of source code directories we want to build into our
# zipped package.
build_config = defaultdict(**cfg.get('build', {}))
build_source_directories = build_config.get('source_directories', '')
build_source_directories = (
build_source_directories
if build_source_directories is not None
else ''
)
source_directories = [
d.strip() for d in build_source_directories.split(',')
]
files = []
for filename in os.listdir(src):
if os.path.isfile(filename):
if filename == '.DS_Store':
continue
if filename == config_file:
continue
print('Bundling: %r' % filename)
files.append(os.path.join(src, filename))
elif os.path.isdir(filename) and filename in source_directories:
print('Bundling directory: %r' % filename)
files.append(os.path.join(src, filename))
# "cd" into `temp_path` directory.
os.chdir(path_to_temp)
for f in files:
if os.path.isfile(f):
_, filename = os.path.split(f)
# Copy handler file into root of the packages folder.
copyfile(f, os.path.join(path_to_temp, filename))
copystat(f, os.path.join(path_to_temp, filename))
elif os.path.isdir(f):
destination_folder = os.path.join(path_to_temp, f[len(src) + 1:])
copytree(f, destination_folder)
# Zip them together into a single file.
# TODO: Delete temp directory created once the archive has been compiled.
path_to_zip_file = archive('./', path_to_dist, output_filename)
return path_to_zip_file | Builds the file bundle.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi) | Below is the the instruction that describes the task:
### Input:
Builds the file bundle.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
### Response:
def build(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
):
"""Builds the file bundle.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Get the absolute path to the output directory and create it if it doesn't
# already exist.
dist_directory = cfg.get('dist_directory', 'dist')
path_to_dist = os.path.join(src, dist_directory)
mkdir(path_to_dist)
# Combine the name of the Lambda function with the current timestamp to use
# for the output filename.
function_name = cfg.get('function_name')
output_filename = '{0}-{1}.zip'.format(timestamp(), function_name)
path_to_temp = mkdtemp(prefix='aws-lambda')
pip_install_to_target(
path_to_temp,
requirements=requirements,
local_package=local_package,
)
# Hack for Zope.
if 'zope' in os.listdir(path_to_temp):
print(
'Zope packages detected; fixing Zope package paths to '
'make them importable.',
)
# Touch.
with open(os.path.join(path_to_temp, 'zope/__init__.py'), 'wb'):
pass
# Gracefully handle whether ".zip" was included in the filename or not.
output_filename = (
'{0}.zip'.format(output_filename)
if not output_filename.endswith('.zip')
else output_filename
)
# Allow definition of source code directories we want to build into our
# zipped package.
build_config = defaultdict(**cfg.get('build', {}))
build_source_directories = build_config.get('source_directories', '')
build_source_directories = (
build_source_directories
if build_source_directories is not None
else ''
)
source_directories = [
d.strip() for d in build_source_directories.split(',')
]
files = []
for filename in os.listdir(src):
if os.path.isfile(filename):
if filename == '.DS_Store':
continue
if filename == config_file:
continue
print('Bundling: %r' % filename)
files.append(os.path.join(src, filename))
elif os.path.isdir(filename) and filename in source_directories:
print('Bundling directory: %r' % filename)
files.append(os.path.join(src, filename))
# "cd" into `temp_path` directory.
os.chdir(path_to_temp)
for f in files:
if os.path.isfile(f):
_, filename = os.path.split(f)
# Copy handler file into root of the packages folder.
copyfile(f, os.path.join(path_to_temp, filename))
copystat(f, os.path.join(path_to_temp, filename))
elif os.path.isdir(f):
destination_folder = os.path.join(path_to_temp, f[len(src) + 1:])
copytree(f, destination_folder)
# Zip them together into a single file.
# TODO: Delete temp directory created once the archive has been compiled.
path_to_zip_file = archive('./', path_to_dist, output_filename)
return path_to_zip_file |
def index(self, key):
""" Return the index of the given item.
:param key:
:return:
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key) | Return the index of the given item.
:param key:
:return: | Below is the the instruction that describes the task:
### Input:
Return the index of the given item.
:param key:
:return:
### Response:
def index(self, key):
""" Return the index of the given item.
:param key:
:return:
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key) |
def receive(args, reactor=reactor, _debug_stash_wormhole=None):
"""I implement 'wormhole receive'. I return a Deferred that fires with
None (for success), or signals one of the following errors:
* WrongPasswordError: the two sides didn't use matching passwords
* Timeout: something didn't happen fast enough for our tastes
* TransferError: the sender rejected the transfer: verifier mismatch
* any other error: something unexpected happened
"""
r = Receiver(args, reactor)
d = r.go()
if _debug_stash_wormhole is not None:
_debug_stash_wormhole.append(r._w)
return d | I implement 'wormhole receive'. I return a Deferred that fires with
None (for success), or signals one of the following errors:
* WrongPasswordError: the two sides didn't use matching passwords
* Timeout: something didn't happen fast enough for our tastes
* TransferError: the sender rejected the transfer: verifier mismatch
* any other error: something unexpected happened | Below is the the instruction that describes the task:
### Input:
I implement 'wormhole receive'. I return a Deferred that fires with
None (for success), or signals one of the following errors:
* WrongPasswordError: the two sides didn't use matching passwords
* Timeout: something didn't happen fast enough for our tastes
* TransferError: the sender rejected the transfer: verifier mismatch
* any other error: something unexpected happened
### Response:
def receive(args, reactor=reactor, _debug_stash_wormhole=None):
"""I implement 'wormhole receive'. I return a Deferred that fires with
None (for success), or signals one of the following errors:
* WrongPasswordError: the two sides didn't use matching passwords
* Timeout: something didn't happen fast enough for our tastes
* TransferError: the sender rejected the transfer: verifier mismatch
* any other error: something unexpected happened
"""
r = Receiver(args, reactor)
d = r.go()
if _debug_stash_wormhole is not None:
_debug_stash_wormhole.append(r._w)
return d |
def _add_chrome_proxy_extension(
chrome_options, proxy_string, proxy_user, proxy_pass):
""" Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.) """
if not "".join(sys.argv) == "-c":
# Single-threaded
proxy_helper.create_proxy_zip(proxy_string, proxy_user, proxy_pass)
else:
# Pytest multi-threaded test
lock = threading.Lock()
with lock:
time.sleep(random.uniform(0.02, 0.15))
if not os.path.exists(PROXY_ZIP_PATH):
proxy_helper.create_proxy_zip(
proxy_string, proxy_user, proxy_pass)
time.sleep(random.uniform(0.1, 0.2))
proxy_zip = PROXY_ZIP_PATH
if not os.path.exists(PROXY_ZIP_PATH):
# Handle "Permission denied" on the default proxy.zip path
proxy_zip = PROXY_ZIP_PATH_2
chrome_options.add_extension(proxy_zip)
return chrome_options | Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.) | Below is the the instruction that describes the task:
### Input:
Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.)
### Response:
def _add_chrome_proxy_extension(
chrome_options, proxy_string, proxy_user, proxy_pass):
""" Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.) """
if not "".join(sys.argv) == "-c":
# Single-threaded
proxy_helper.create_proxy_zip(proxy_string, proxy_user, proxy_pass)
else:
# Pytest multi-threaded test
lock = threading.Lock()
with lock:
time.sleep(random.uniform(0.02, 0.15))
if not os.path.exists(PROXY_ZIP_PATH):
proxy_helper.create_proxy_zip(
proxy_string, proxy_user, proxy_pass)
time.sleep(random.uniform(0.1, 0.2))
proxy_zip = PROXY_ZIP_PATH
if not os.path.exists(PROXY_ZIP_PATH):
# Handle "Permission denied" on the default proxy.zip path
proxy_zip = PROXY_ZIP_PATH_2
chrome_options.add_extension(proxy_zip)
return chrome_options |
async def monitor_status(self, alarm_status_callback=None,
zone_changed_callback=None,
output_changed_callback=None):
"""Start monitoring of the alarm status.
Send command to satel integra to start sending updates. Read in a
loop and call respective callbacks when received messages.
"""
self._alarm_status_callback = alarm_status_callback
self._zone_changed_callback = zone_changed_callback
self._output_changed_callback = output_changed_callback
_LOGGER.info("Starting monitor_status loop")
while not self.closed:
_LOGGER.debug("Iteration... ")
while not self.connected:
_LOGGER.info("Not connected, re-connecting... ")
await self.connect()
if not self.connected:
_LOGGER.warning("Not connected, sleeping for 10s... ")
await asyncio.sleep(self._reconnection_timeout)
continue
await self.start_monitoring()
if not self.connected:
_LOGGER.warning("Start monitoring failed, sleeping for 10s...")
await asyncio.sleep(self._reconnection_timeout)
continue
while True:
await self._update_status()
_LOGGER.debug("Got status!")
if not self.connected:
_LOGGER.info("Got connection broken, reconnecting!")
break
_LOGGER.info("Closed, quit monitoring.") | Start monitoring of the alarm status.
Send command to satel integra to start sending updates. Read in a
loop and call respective callbacks when received messages. | Below is the the instruction that describes the task:
### Input:
Start monitoring of the alarm status.
Send command to satel integra to start sending updates. Read in a
loop and call respective callbacks when received messages.
### Response:
async def monitor_status(self, alarm_status_callback=None,
zone_changed_callback=None,
output_changed_callback=None):
"""Start monitoring of the alarm status.
Send command to satel integra to start sending updates. Read in a
loop and call respective callbacks when received messages.
"""
self._alarm_status_callback = alarm_status_callback
self._zone_changed_callback = zone_changed_callback
self._output_changed_callback = output_changed_callback
_LOGGER.info("Starting monitor_status loop")
while not self.closed:
_LOGGER.debug("Iteration... ")
while not self.connected:
_LOGGER.info("Not connected, re-connecting... ")
await self.connect()
if not self.connected:
_LOGGER.warning("Not connected, sleeping for 10s... ")
await asyncio.sleep(self._reconnection_timeout)
continue
await self.start_monitoring()
if not self.connected:
_LOGGER.warning("Start monitoring failed, sleeping for 10s...")
await asyncio.sleep(self._reconnection_timeout)
continue
while True:
await self._update_status()
_LOGGER.debug("Got status!")
if not self.connected:
_LOGGER.info("Got connection broken, reconnecting!")
break
_LOGGER.info("Closed, quit monitoring.") |
def _set_avg_session_metrics(session_group):
"""Sets the metrics for the group to be the average of its sessions.
The resulting session group metrics consist of the union of metrics across
the group's sessions. The value of each session group metric is the average
of that metric values across the sessions in the group. The 'step' and
'wall_time_secs' fields of the resulting MetricValue field in the session
group are populated with the corresponding averages (truncated for 'step')
as well.
Args:
session_group: A SessionGroup protobuffer.
"""
assert session_group.sessions, 'SessionGroup cannot be empty.'
# Algorithm: Iterate over all (session, metric) pairs and maintain a
# dict from _MetricIdentifier to _MetricStats objects.
# Then use the final dict state to compute the average for each metric.
metric_stats = collections.defaultdict(_MetricStats)
for session in session_group.sessions:
for metric_value in session.metric_values:
metric_name = _MetricIdentifier(group=metric_value.name.group,
tag=metric_value.name.tag)
stats = metric_stats[metric_name]
stats.total += metric_value.value
stats.count += 1
stats.total_step += metric_value.training_step
stats.total_wall_time_secs += metric_value.wall_time_secs
del session_group.metric_values[:]
for (metric_name, stats) in six.iteritems(metric_stats):
session_group.metric_values.add(
name=api_pb2.MetricName(group=metric_name.group, tag=metric_name.tag),
value=float(stats.total)/float(stats.count),
training_step=stats.total_step // stats.count,
wall_time_secs=stats.total_wall_time_secs / stats.count) | Sets the metrics for the group to be the average of its sessions.
The resulting session group metrics consist of the union of metrics across
the group's sessions. The value of each session group metric is the average
of that metric values across the sessions in the group. The 'step' and
'wall_time_secs' fields of the resulting MetricValue field in the session
group are populated with the corresponding averages (truncated for 'step')
as well.
Args:
session_group: A SessionGroup protobuffer. | Below is the the instruction that describes the task:
### Input:
Sets the metrics for the group to be the average of its sessions.
The resulting session group metrics consist of the union of metrics across
the group's sessions. The value of each session group metric is the average
of that metric values across the sessions in the group. The 'step' and
'wall_time_secs' fields of the resulting MetricValue field in the session
group are populated with the corresponding averages (truncated for 'step')
as well.
Args:
session_group: A SessionGroup protobuffer.
### Response:
def _set_avg_session_metrics(session_group):
"""Sets the metrics for the group to be the average of its sessions.
The resulting session group metrics consist of the union of metrics across
the group's sessions. The value of each session group metric is the average
of that metric values across the sessions in the group. The 'step' and
'wall_time_secs' fields of the resulting MetricValue field in the session
group are populated with the corresponding averages (truncated for 'step')
as well.
Args:
session_group: A SessionGroup protobuffer.
"""
assert session_group.sessions, 'SessionGroup cannot be empty.'
# Algorithm: Iterate over all (session, metric) pairs and maintain a
# dict from _MetricIdentifier to _MetricStats objects.
# Then use the final dict state to compute the average for each metric.
metric_stats = collections.defaultdict(_MetricStats)
for session in session_group.sessions:
for metric_value in session.metric_values:
metric_name = _MetricIdentifier(group=metric_value.name.group,
tag=metric_value.name.tag)
stats = metric_stats[metric_name]
stats.total += metric_value.value
stats.count += 1
stats.total_step += metric_value.training_step
stats.total_wall_time_secs += metric_value.wall_time_secs
del session_group.metric_values[:]
for (metric_name, stats) in six.iteritems(metric_stats):
session_group.metric_values.add(
name=api_pb2.MetricName(group=metric_name.group, tag=metric_name.tag),
value=float(stats.total)/float(stats.count),
training_step=stats.total_step // stats.count,
wall_time_secs=stats.total_wall_time_secs / stats.count) |
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in series_dict.items():
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in series_dict.items():
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output | Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries | Below is the the instruction that describes the task:
### Input:
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
### Response:
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in series_dict.items():
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in series_dict.items():
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output |
def _CalculateHashDataStream(self, file_entry, data_stream_name):
"""Calculates a message digest hash of the data of the file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): name of the data stream.
Returns:
bytes: digest hash or None.
"""
hash_context = hashlib.sha256()
try:
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
except IOError as exception:
logging.warning((
'Unable to open path specification:\n{0:s}'
'with error: {1!s}').format(
file_entry.path_spec.comparable, exception))
return None
if not file_object:
return None
try:
data = file_object.read(self._READ_BUFFER_SIZE)
while data:
hash_context.update(data)
data = file_object.read(self._READ_BUFFER_SIZE)
except IOError as exception:
logging.warning((
'Unable to read from path specification:\n{0:s}'
'with error: {1!s}').format(
file_entry.path_spec.comparable, exception))
return None
finally:
file_object.close()
return hash_context.hexdigest() | Calculates a message digest hash of the data of the file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): name of the data stream.
Returns:
bytes: digest hash or None. | Below is the the instruction that describes the task:
### Input:
Calculates a message digest hash of the data of the file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): name of the data stream.
Returns:
bytes: digest hash or None.
### Response:
def _CalculateHashDataStream(self, file_entry, data_stream_name):
"""Calculates a message digest hash of the data of the file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): name of the data stream.
Returns:
bytes: digest hash or None.
"""
hash_context = hashlib.sha256()
try:
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
except IOError as exception:
logging.warning((
'Unable to open path specification:\n{0:s}'
'with error: {1!s}').format(
file_entry.path_spec.comparable, exception))
return None
if not file_object:
return None
try:
data = file_object.read(self._READ_BUFFER_SIZE)
while data:
hash_context.update(data)
data = file_object.read(self._READ_BUFFER_SIZE)
except IOError as exception:
logging.warning((
'Unable to read from path specification:\n{0:s}'
'with error: {1!s}').format(
file_entry.path_spec.comparable, exception))
return None
finally:
file_object.close()
return hash_context.hexdigest() |
def Fritzsche(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1, E=1):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Fritzsche formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Fritzsche formula. Here, the form as in [1]_
is used but with all inputs in base SI units.
.. math::
Q = 93.500 \frac{T_s}{P_s}\left(\frac{P_1^2 - P_2^2}
{L\cdot {SG}^{0.8587} \cdot T_{avg}}\right)^{0.538}D^{2.69}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
This model is also presented in [1]_ with a leading constant of 2.827,
the same exponents as used here, units of mm (diameter), kPa, km (length),
and flow in m^3/hour.
This model is shown in base SI units in [2]_, and with a leading constant
of 94.2565, a diameter power of 2.6911, main group power of 0.5382
and a specific gravity power of 0.858. The difference is very small.
Examples
--------
>>> Fritzsche(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
39.421535157535565
References
----------
.. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [2] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
'''
# Rational('2.827E-3')/(3600*24)*(1000)**Rational('2.69')*(1000)**Rational('0.538')*1000/(1000**2)**Rational('0.538')
c5 = 93.50009798751128188757518688244137811221 # 14135*10**(57/125)/432
c2 = 0.8587
c3 = 0.538
c4 = 2.69
if Q is None and (None not in [L, D, P1, P2]):
return c5*E*(Ts/Ps)*((P1**2 - P2**2)/(SG**c2*Tavg*L*Zavg))**c3*D**c4
elif D is None and (None not in [L, Q, P1, P2]):
return (Ps*Q*(SG**(-c2)*(P1**2 - P2**2)/(L*Tavg*Zavg))**(-c3)/(E*Ts*c5))**(1./c4)
elif P1 is None and (None not in [L, Q, D, P2]):
return (L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P2**2)**0.5
elif P2 is None and (None not in [L, Q, D, P1]):
return (-L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P1**2)**0.5
elif L is None and (None not in [P2, Q, D, P1]):
return SG**(-c2)*(D**(-c4)*Ps*Q/(E*Ts*c5))**(-1./c3)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise Exception('This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.') | r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Fritzsche formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Fritzsche formula. Here, the form as in [1]_
is used but with all inputs in base SI units.
.. math::
Q = 93.500 \frac{T_s}{P_s}\left(\frac{P_1^2 - P_2^2}
{L\cdot {SG}^{0.8587} \cdot T_{avg}}\right)^{0.538}D^{2.69}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
This model is also presented in [1]_ with a leading constant of 2.827,
the same exponents as used here, units of mm (diameter), kPa, km (length),
and flow in m^3/hour.
This model is shown in base SI units in [2]_, and with a leading constant
of 94.2565, a diameter power of 2.6911, main group power of 0.5382
and a specific gravity power of 0.858. The difference is very small.
Examples
--------
>>> Fritzsche(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
39.421535157535565
References
----------
.. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [2] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005. | Below is the the instruction that describes the task:
### Input:
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Fritzsche formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Fritzsche formula. Here, the form as in [1]_
is used but with all inputs in base SI units.
.. math::
Q = 93.500 \frac{T_s}{P_s}\left(\frac{P_1^2 - P_2^2}
{L\cdot {SG}^{0.8587} \cdot T_{avg}}\right)^{0.538}D^{2.69}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
This model is also presented in [1]_ with a leading constant of 2.827,
the same exponents as used here, units of mm (diameter), kPa, km (length),
and flow in m^3/hour.
This model is shown in base SI units in [2]_, and with a leading constant
of 94.2565, a diameter power of 2.6911, main group power of 0.5382
and a specific gravity power of 0.858. The difference is very small.
Examples
--------
>>> Fritzsche(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
39.421535157535565
References
----------
.. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [2] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
### Response:
def Fritzsche(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1, E=1):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Fritzsche formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Fritzsche formula. Here, the form as in [1]_
is used but with all inputs in base SI units.
.. math::
Q = 93.500 \frac{T_s}{P_s}\left(\frac{P_1^2 - P_2^2}
{L\cdot {SG}^{0.8587} \cdot T_{avg}}\right)^{0.538}D^{2.69}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
This model is also presented in [1]_ with a leading constant of 2.827,
the same exponents as used here, units of mm (diameter), kPa, km (length),
and flow in m^3/hour.
This model is shown in base SI units in [2]_, and with a leading constant
of 94.2565, a diameter power of 2.6911, main group power of 0.5382
and a specific gravity power of 0.858. The difference is very small.
Examples
--------
>>> Fritzsche(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
39.421535157535565
References
----------
.. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [2] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
'''
# Rational('2.827E-3')/(3600*24)*(1000)**Rational('2.69')*(1000)**Rational('0.538')*1000/(1000**2)**Rational('0.538')
c5 = 93.50009798751128188757518688244137811221 # 14135*10**(57/125)/432
c2 = 0.8587
c3 = 0.538
c4 = 2.69
if Q is None and (None not in [L, D, P1, P2]):
return c5*E*(Ts/Ps)*((P1**2 - P2**2)/(SG**c2*Tavg*L*Zavg))**c3*D**c4
elif D is None and (None not in [L, Q, P1, P2]):
return (Ps*Q*(SG**(-c2)*(P1**2 - P2**2)/(L*Tavg*Zavg))**(-c3)/(E*Ts*c5))**(1./c4)
elif P1 is None and (None not in [L, Q, D, P2]):
return (L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P2**2)**0.5
elif P2 is None and (None not in [L, Q, D, P1]):
return (-L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P1**2)**0.5
elif L is None and (None not in [P2, Q, D, P1]):
return SG**(-c2)*(D**(-c4)*Ps*Q/(E*Ts*c5))**(-1./c3)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise Exception('This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.') |
def has_more_pages(self):
"""
:return: ``True`` if there are more pages available on the server.
"""
# if has_next property exists, it represents whether more pages exist
if self.has_next is not None:
return self.has_next
# otherwise, try to compute whether or not more pages exist
total_pages = self.get_total_pages()
if self.page_number is None or total_pages is None:
return None
else:
return self.page_number + 1 < total_pages | :return: ``True`` if there are more pages available on the server. | Below is the the instruction that describes the task:
### Input:
:return: ``True`` if there are more pages available on the server.
### Response:
def has_more_pages(self):
"""
:return: ``True`` if there are more pages available on the server.
"""
# if has_next property exists, it represents whether more pages exist
if self.has_next is not None:
return self.has_next
# otherwise, try to compute whether or not more pages exist
total_pages = self.get_total_pages()
if self.page_number is None or total_pages is None:
return None
else:
return self.page_number + 1 < total_pages |
def remove_library_from_file_system(self, library_path, library_name):
"""Remove library from hard disk."""
library_file_system_path = self.get_os_path_to_library(library_path, library_name)[0]
shutil.rmtree(library_file_system_path)
self.refresh_libraries() | Remove library from hard disk. | Below is the the instruction that describes the task:
### Input:
Remove library from hard disk.
### Response:
def remove_library_from_file_system(self, library_path, library_name):
"""Remove library from hard disk."""
library_file_system_path = self.get_os_path_to_library(library_path, library_name)[0]
shutil.rmtree(library_file_system_path)
self.refresh_libraries() |
def summary(self):
"""
A succinct summary of the Launcher configuration. Unlike the
repr, a summary does not have to be complete but must supply
key information relevant to the user.
"""
print("Type: %s" % self.__class__.__name__)
print("Batch Name: %r" % self.batch_name)
if self.tag:
print("Tag: %s" % self.tag)
print("Root directory: %r" % self.get_root_directory())
print("Maximum concurrency: %s" % self.max_concurrency)
if self.description:
print("Description: %s" % self.description) | A succinct summary of the Launcher configuration. Unlike the
repr, a summary does not have to be complete but must supply
key information relevant to the user. | Below is the the instruction that describes the task:
### Input:
A succinct summary of the Launcher configuration. Unlike the
repr, a summary does not have to be complete but must supply
key information relevant to the user.
### Response:
def summary(self):
"""
A succinct summary of the Launcher configuration. Unlike the
repr, a summary does not have to be complete but must supply
key information relevant to the user.
"""
print("Type: %s" % self.__class__.__name__)
print("Batch Name: %r" % self.batch_name)
if self.tag:
print("Tag: %s" % self.tag)
print("Root directory: %r" % self.get_root_directory())
print("Maximum concurrency: %s" % self.max_concurrency)
if self.description:
print("Description: %s" % self.description) |
def get_authzd_permissions(self, identifier, perm_domain):
"""
:type identifier: str
:type domain: str
:returns: a list of relevant json blobs, each a list of permission dicts
"""
related_perms = []
keys = ['*', perm_domain]
def query_permissions(self):
msg = ("Could not obtain cached permissions for [{0}]. "
"Will try to acquire permissions from account store."
.format(identifier))
logger.debug(msg)
# permissions is a dict: {'domain': json blob of lists of dicts}
permissions = self.account_store.get_authz_permissions(identifier)
if not permissions:
msg = "Could not get permissions from account_store for {0}".\
format(identifier)
raise ValueError(msg)
return permissions
try:
msg2 = ("Attempting to get cached authz_info for [{0}]"
.format(identifier))
logger.debug(msg2)
domain = 'authorization:permissions:' + self.name
# related_perms is a list of json blobs whose contents are ordered
# such that the order matches that in the keys parameter:
related_perms = self.cache_handler.\
hmget_or_create(domain=domain,
identifier=identifier,
keys=keys,
creator_func=query_permissions,
creator=self)
except ValueError:
msg3 = ("No permissions found for identifiers [{0}]. "
"Returning None.".format(identifier))
logger.warning(msg3)
except AttributeError:
# this means the cache_handler isn't configured
queried_permissions = query_permissions(self)
related_perms = [queried_permissions.get('*'),
queried_permissions.get(perm_domain)]
return related_perms | :type identifier: str
:type domain: str
:returns: a list of relevant json blobs, each a list of permission dicts | Below is the the instruction that describes the task:
### Input:
:type identifier: str
:type domain: str
:returns: a list of relevant json blobs, each a list of permission dicts
### Response:
def get_authzd_permissions(self, identifier, perm_domain):
"""
:type identifier: str
:type domain: str
:returns: a list of relevant json blobs, each a list of permission dicts
"""
related_perms = []
keys = ['*', perm_domain]
def query_permissions(self):
msg = ("Could not obtain cached permissions for [{0}]. "
"Will try to acquire permissions from account store."
.format(identifier))
logger.debug(msg)
# permissions is a dict: {'domain': json blob of lists of dicts}
permissions = self.account_store.get_authz_permissions(identifier)
if not permissions:
msg = "Could not get permissions from account_store for {0}".\
format(identifier)
raise ValueError(msg)
return permissions
try:
msg2 = ("Attempting to get cached authz_info for [{0}]"
.format(identifier))
logger.debug(msg2)
domain = 'authorization:permissions:' + self.name
# related_perms is a list of json blobs whose contents are ordered
# such that the order matches that in the keys parameter:
related_perms = self.cache_handler.\
hmget_or_create(domain=domain,
identifier=identifier,
keys=keys,
creator_func=query_permissions,
creator=self)
except ValueError:
msg3 = ("No permissions found for identifiers [{0}]. "
"Returning None.".format(identifier))
logger.warning(msg3)
except AttributeError:
# this means the cache_handler isn't configured
queried_permissions = query_permissions(self)
related_perms = [queried_permissions.get('*'),
queried_permissions.get(perm_domain)]
return related_perms |
def future_import(feature, node):
"""
This seems to work
"""
root = find_root(node)
if does_tree_import(u"__future__", feature, node):
return
# Look for a shebang or encoding line
shebang_encoding_idx = None
for idx, node in enumerate(root.children):
# Is it a shebang or encoding line?
if is_shebang_comment(node) or is_encoding_comment(node):
shebang_encoding_idx = idx
if is_docstring(node):
# skip over docstring
continue
names = check_future_import(node)
if not names:
# not a future statement; need to insert before this
break
if feature in names:
# already imported
return
import_ = FromImport(u'__future__', [Leaf(token.NAME, feature, prefix=" ")])
if shebang_encoding_idx == 0 and idx == 0:
# If this __future__ import would go on the first line,
# detach the shebang / encoding prefix from the current first line.
# and attach it to our new __future__ import node.
import_.prefix = root.children[0].prefix
root.children[0].prefix = u''
# End the __future__ import line with a newline and add a blank line
# afterwards:
children = [import_ , Newline()]
root.insert_child(idx, Node(syms.simple_stmt, children)) | This seems to work | Below is the the instruction that describes the task:
### Input:
This seems to work
### Response:
def future_import(feature, node):
"""
This seems to work
"""
root = find_root(node)
if does_tree_import(u"__future__", feature, node):
return
# Look for a shebang or encoding line
shebang_encoding_idx = None
for idx, node in enumerate(root.children):
# Is it a shebang or encoding line?
if is_shebang_comment(node) or is_encoding_comment(node):
shebang_encoding_idx = idx
if is_docstring(node):
# skip over docstring
continue
names = check_future_import(node)
if not names:
# not a future statement; need to insert before this
break
if feature in names:
# already imported
return
import_ = FromImport(u'__future__', [Leaf(token.NAME, feature, prefix=" ")])
if shebang_encoding_idx == 0 and idx == 0:
# If this __future__ import would go on the first line,
# detach the shebang / encoding prefix from the current first line.
# and attach it to our new __future__ import node.
import_.prefix = root.children[0].prefix
root.children[0].prefix = u''
# End the __future__ import line with a newline and add a blank line
# afterwards:
children = [import_ , Newline()]
root.insert_child(idx, Node(syms.simple_stmt, children)) |
def explode(self, escalations):
"""Create instance of Escalation for each HostEscalation object
:param escalations: list of escalation, used to add new ones
:type escalations: alignak.objects.escalation.Escalations
:return: None
"""
# Now we explode all escalations (host_name, hostgroup_name) to escalations
for escalation in self:
properties = escalation.__class__.properties
name = getattr(escalation, 'host_name', getattr(escalation, 'hostgroup_name', ''))
creation_dict = {
'escalation_name':
'Generated-HE-%s-%s' % (name, escalation.uuid)
}
for prop in properties:
if hasattr(escalation, prop):
creation_dict[prop] = getattr(escalation, prop)
escalations.add_escalation(Escalation(creation_dict)) | Create instance of Escalation for each HostEscalation object
:param escalations: list of escalation, used to add new ones
:type escalations: alignak.objects.escalation.Escalations
:return: None | Below is the the instruction that describes the task:
### Input:
Create instance of Escalation for each HostEscalation object
:param escalations: list of escalation, used to add new ones
:type escalations: alignak.objects.escalation.Escalations
:return: None
### Response:
def explode(self, escalations):
"""Create instance of Escalation for each HostEscalation object
:param escalations: list of escalation, used to add new ones
:type escalations: alignak.objects.escalation.Escalations
:return: None
"""
# Now we explode all escalations (host_name, hostgroup_name) to escalations
for escalation in self:
properties = escalation.__class__.properties
name = getattr(escalation, 'host_name', getattr(escalation, 'hostgroup_name', ''))
creation_dict = {
'escalation_name':
'Generated-HE-%s-%s' % (name, escalation.uuid)
}
for prop in properties:
if hasattr(escalation, prop):
creation_dict[prop] = getattr(escalation, prop)
escalations.add_escalation(Escalation(creation_dict)) |
def _init_idxs_strpat(self, usr_hdrs):
"""List of indexes whose values will be strings."""
strpat = self.strpat_hdrs.keys()
self.idxs_strpat = [
Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat] | List of indexes whose values will be strings. | Below is the the instruction that describes the task:
### Input:
List of indexes whose values will be strings.
### Response:
def _init_idxs_strpat(self, usr_hdrs):
"""List of indexes whose values will be strings."""
strpat = self.strpat_hdrs.keys()
self.idxs_strpat = [
Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat] |
def save(self, path=None, debug=False):
"""Save project to file.
:param path: Path or file pointer.
If you pass a file pointer, you're responsible for closing
it.
If path is not given, the :attr:`path` attribute is used,
usually the original path given to :attr:`load()`.
If `path` has the extension of an existing plugin, the
project will be converted using :attr:`convert`.
Otherwise, the extension will be replaced with the
extension of the current plugin.
(Note that log output for the conversion will be printed
to stdout. If you want to deal with the output, call
:attr:`convert` directly.)
If the path ends in a folder instead of a file, the
filename is based on the project's :attr:`name`.
:param debug: If true, return debugging information from the format
plugin instead of the path.
:raises: :py:class:`ValueError` if there's no path or name.
:returns: path to the saved file.
"""
p = self.copy()
plugin = p._plugin
# require path
p.path = path or self.path
if not p.path:
raise ValueError, "path is required"
if isinstance(p.path, basestring):
# split path
(folder, filename) = os.path.split(p.path)
(name, extension) = os.path.splitext(filename)
# get plugin from extension
if path: # only if not using self.path
try:
plugin = kurt.plugin.Kurt.get_plugin(extension=extension)
except ValueError:
pass
# build output path
if not name:
name = _clean_filename(self.name)
if not name:
raise ValueError, "name is required"
filename = name + plugin.extension
p.path = os.path.join(folder, filename)
# open
fp = open(p.path, "wb")
else:
fp = p.path
path = None
if not plugin:
raise ValueError, "must convert project to a format before saving"
for m in p.convert(plugin):
print m
result = p._save(fp)
if path:
fp.close()
return result if debug else p.path | Save project to file.
:param path: Path or file pointer.
If you pass a file pointer, you're responsible for closing
it.
If path is not given, the :attr:`path` attribute is used,
usually the original path given to :attr:`load()`.
If `path` has the extension of an existing plugin, the
project will be converted using :attr:`convert`.
Otherwise, the extension will be replaced with the
extension of the current plugin.
(Note that log output for the conversion will be printed
to stdout. If you want to deal with the output, call
:attr:`convert` directly.)
If the path ends in a folder instead of a file, the
filename is based on the project's :attr:`name`.
:param debug: If true, return debugging information from the format
plugin instead of the path.
:raises: :py:class:`ValueError` if there's no path or name.
:returns: path to the saved file. | Below is the the instruction that describes the task:
### Input:
Save project to file.
:param path: Path or file pointer.
If you pass a file pointer, you're responsible for closing
it.
If path is not given, the :attr:`path` attribute is used,
usually the original path given to :attr:`load()`.
If `path` has the extension of an existing plugin, the
project will be converted using :attr:`convert`.
Otherwise, the extension will be replaced with the
extension of the current plugin.
(Note that log output for the conversion will be printed
to stdout. If you want to deal with the output, call
:attr:`convert` directly.)
If the path ends in a folder instead of a file, the
filename is based on the project's :attr:`name`.
:param debug: If true, return debugging information from the format
plugin instead of the path.
:raises: :py:class:`ValueError` if there's no path or name.
:returns: path to the saved file.
### Response:
def save(self, path=None, debug=False):
"""Save project to file.
:param path: Path or file pointer.
If you pass a file pointer, you're responsible for closing
it.
If path is not given, the :attr:`path` attribute is used,
usually the original path given to :attr:`load()`.
If `path` has the extension of an existing plugin, the
project will be converted using :attr:`convert`.
Otherwise, the extension will be replaced with the
extension of the current plugin.
(Note that log output for the conversion will be printed
to stdout. If you want to deal with the output, call
:attr:`convert` directly.)
If the path ends in a folder instead of a file, the
filename is based on the project's :attr:`name`.
:param debug: If true, return debugging information from the format
plugin instead of the path.
:raises: :py:class:`ValueError` if there's no path or name.
:returns: path to the saved file.
"""
p = self.copy()
plugin = p._plugin
# require path
p.path = path or self.path
if not p.path:
raise ValueError, "path is required"
if isinstance(p.path, basestring):
# split path
(folder, filename) = os.path.split(p.path)
(name, extension) = os.path.splitext(filename)
# get plugin from extension
if path: # only if not using self.path
try:
plugin = kurt.plugin.Kurt.get_plugin(extension=extension)
except ValueError:
pass
# build output path
if not name:
name = _clean_filename(self.name)
if not name:
raise ValueError, "name is required"
filename = name + plugin.extension
p.path = os.path.join(folder, filename)
# open
fp = open(p.path, "wb")
else:
fp = p.path
path = None
if not plugin:
raise ValueError, "must convert project to a format before saving"
for m in p.convert(plugin):
print m
result = p._save(fp)
if path:
fp.close()
return result if debug else p.path |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.