code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def xception_exit(inputs):
"""Xception exit flow."""
with tf.variable_scope("xception_exit"):
x = inputs
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
length_float = tf.to_float(tf.shape(x)[1])
length_float *= tf.to_float(tf.shape(x)[2])
spatial_dim_float = tf.sqrt(length_float)
spatial_dim = tf.to_int32(spatial_dim_float)
x_depth = x_shape[3]
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
elif x_shape[1] != x_shape[2]:
spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2])))
if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]:
raise ValueError("Assumed inputs were square-able but they were "
"not. Shape: %s" % x_shape)
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME")
return tf.nn.relu(x) | Xception exit flow. | Below is the the instruction that describes the task:
### Input:
Xception exit flow.
### Response:
def xception_exit(inputs):
"""Xception exit flow."""
with tf.variable_scope("xception_exit"):
x = inputs
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
length_float = tf.to_float(tf.shape(x)[1])
length_float *= tf.to_float(tf.shape(x)[2])
spatial_dim_float = tf.sqrt(length_float)
spatial_dim = tf.to_int32(spatial_dim_float)
x_depth = x_shape[3]
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
elif x_shape[1] != x_shape[2]:
spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2])))
if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]:
raise ValueError("Assumed inputs were square-able but they were "
"not. Shape: %s" % x_shape)
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME")
return tf.nn.relu(x) |
def _convert_eq(self, eq):
"""WORKS INPLACE on eq
"""
rename = dict(enumerate(self.index))
eq['eq_sets'] = {rename[k]: {rename[x] for x in v}
for k, v in eq['eq_sets'].items()}
eq['sym_ops'] = {rename[k]: {rename[x]: v[x] for x in v}
for k, v in eq['sym_ops'].items()}
try:
sym_mol = self.from_pymatgen_molecule(eq['sym_mol'])
sym_mol.index = self.index
eq['sym_mol'] = sym_mol._to_numeric()
except KeyError:
pass | WORKS INPLACE on eq | Below is the the instruction that describes the task:
### Input:
WORKS INPLACE on eq
### Response:
def _convert_eq(self, eq):
"""WORKS INPLACE on eq
"""
rename = dict(enumerate(self.index))
eq['eq_sets'] = {rename[k]: {rename[x] for x in v}
for k, v in eq['eq_sets'].items()}
eq['sym_ops'] = {rename[k]: {rename[x]: v[x] for x in v}
for k, v in eq['sym_ops'].items()}
try:
sym_mol = self.from_pymatgen_molecule(eq['sym_mol'])
sym_mol.index = self.index
eq['sym_mol'] = sym_mol._to_numeric()
except KeyError:
pass |
def load(self, specfiles=None):
"""Imports the specified ``fic`` files from the hard disk.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "FiContainer.load()": "%s" is'\
' not present in "FiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
fiPath = aux.joinpath(self.info[specfile]['path'],
specfile+'.fic'
)
with zipfile.ZipFile(fiPath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
#infoString = io.TextIOWrapper(containerZip.open('info'),
# encoding='utf-8'
# ).read()
self.container[specfile] = json.loads(jsonString,
object_hook=Fi.jsonHook
) | Imports the specified ``fic`` files from the hard disk.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str] | Below is the the instruction that describes the task:
### Input:
Imports the specified ``fic`` files from the hard disk.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
### Response:
def load(self, specfiles=None):
"""Imports the specified ``fic`` files from the hard disk.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "FiContainer.load()": "%s" is'\
' not present in "FiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
fiPath = aux.joinpath(self.info[specfile]['path'],
specfile+'.fic'
)
with zipfile.ZipFile(fiPath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
#infoString = io.TextIOWrapper(containerZip.open('info'),
# encoding='utf-8'
# ).read()
self.container[specfile] = json.loads(jsonString,
object_hook=Fi.jsonHook
) |
def printAllColorsToConsole(cls):
''' A simple enumeration of the colors to the console to help decide :) '''
for elem in cls.__dict__:
# ignore specials such as __class__ or __module__
if not elem.startswith("__"):
color_fmt = cls.__dict__[elem]
if isinstance(color_fmt, six.string_types) and color_fmt != "BOLD" and color_fmt != "DIM" and \
color_fmt != "UNDER" and color_fmt != "INV":
print("\033[{fmt}AnsiColors.{name}\033[0m".format(fmt=color_fmt, name=elem)) | A simple enumeration of the colors to the console to help decide :) | Below is the the instruction that describes the task:
### Input:
A simple enumeration of the colors to the console to help decide :)
### Response:
def printAllColorsToConsole(cls):
''' A simple enumeration of the colors to the console to help decide :) '''
for elem in cls.__dict__:
# ignore specials such as __class__ or __module__
if not elem.startswith("__"):
color_fmt = cls.__dict__[elem]
if isinstance(color_fmt, six.string_types) and color_fmt != "BOLD" and color_fmt != "DIM" and \
color_fmt != "UNDER" and color_fmt != "INV":
print("\033[{fmt}AnsiColors.{name}\033[0m".format(fmt=color_fmt, name=elem)) |
def get_widget_for(self, fieldname):
"""Lookup the widget
"""
field = self.context.getField(fieldname)
if not field:
return None
return field.widget | Lookup the widget | Below is the the instruction that describes the task:
### Input:
Lookup the widget
### Response:
def get_widget_for(self, fieldname):
"""Lookup the widget
"""
field = self.context.getField(fieldname)
if not field:
return None
return field.widget |
def mavgen(opts, args):
"""Generate mavlink message formatters and parsers (C and Python ) using options
and args where args are a list of xml files. This function allows python
scripts under Windows to control mavgen using the same interface as
shell scripts under Unix"""
xml = []
# Enable validation by default, disabling it if explicitly requested
if opts.validate:
try:
from lxml import etree
with open(schemaFile, 'r') as f:
xmlschema_root = etree.parse(f)
xmlschema = etree.XMLSchema(xmlschema_root)
except:
print("WARNING: Unable to load XML validator libraries. XML validation will not be performed", file=sys.stderr)
opts.validate = False
def mavgen_validate(xmlfile):
"""Uses lxml to validate an XML file. We define mavgen_validate
here because it relies on the XML libs that were loaded in mavgen(), so it can't be called standalone"""
xmlvalid = True
try:
with open(xmlfile, 'r') as f:
xmldocument = etree.parse(f)
xmlschema.assertValid(xmldocument)
forbidden_names_re = re.compile("^(break$|case$|class$|catch$|const$|continue$|debugger$|default$|delete$|do$|else$|\
export$|extends$|finally$|for$|function$|if$|import$|in$|instanceof$|let$|new$|\
return$|super$|switch$|this$|throw$|try$|typeof$|var$|void$|while$|with$|yield$|\
enum$|await$|implements$|package$|protected$|static$|interface$|private$|public$|\
abstract$|boolean$|byte$|char$|double$|final$|float$|goto$|int$|long$|native$|\
short$|synchronized$|transient$|volatile$).*", re.IGNORECASE)
for element in xmldocument.iter('enum', 'entry', 'message', 'field'):
if forbidden_names_re.search(element.get('name')):
print("Validation error:", file=sys.stderr)
print("Element : %s at line : %s contains forbidden word" % (element.tag, element.sourceline), file=sys.stderr)
xmlvalid = False
return xmlvalid
except etree.XMLSchemaError:
return False
# Process all XML files, validating them as necessary.
for fname in args:
if opts.validate:
print("Validating %s" % fname)
if not mavgen_validate(fname):
return False
else:
print("Validation skipped for %s." % fname)
print("Parsing %s" % fname)
xml.append(mavparse.MAVXML(fname, opts.wire_protocol))
# expand includes
for x in xml[:]:
for i in x.include:
fname = os.path.join(os.path.dirname(x.filename), i)
# Validate XML file with XSD file if possible.
if opts.validate:
print("Validating %s" % fname)
if not mavgen_validate(fname):
return False
else:
print("Validation skipped for %s." % fname)
# Parsing
print("Parsing %s" % fname)
xml.append(mavparse.MAVXML(fname, opts.wire_protocol))
# include message lengths and CRCs too
x.message_crcs.update(xml[-1].message_crcs)
x.message_lengths.update(xml[-1].message_lengths)
x.message_min_lengths.update(xml[-1].message_min_lengths)
x.message_flags.update(xml[-1].message_flags)
x.message_target_system_ofs.update(xml[-1].message_target_system_ofs)
x.message_target_component_ofs.update(xml[-1].message_target_component_ofs)
x.message_names.update(xml[-1].message_names)
x.largest_payload = max(x.largest_payload, xml[-1].largest_payload)
# work out max payload size across all includes
largest_payload = 0
for x in xml:
if x.largest_payload > largest_payload:
largest_payload = x.largest_payload
for x in xml:
x.largest_payload = largest_payload
if mavparse.check_duplicates(xml):
sys.exit(1)
print("Found %u MAVLink message types in %u XML files" % (
mavparse.total_msgs(xml), len(xml)))
# Convert language option to lowercase and validate
opts.language = opts.language.lower()
if opts.language == 'python':
from . import mavgen_python
mavgen_python.generate(opts.output, xml)
elif opts.language == 'c':
from . import mavgen_c
mavgen_c.generate(opts.output, xml)
elif opts.language == 'wlua':
from . import mavgen_wlua
mavgen_wlua.generate(opts.output, xml)
elif opts.language == 'cs':
from . import mavgen_cs
mavgen_cs.generate(opts.output, xml)
elif opts.language == 'javascript':
from . import mavgen_javascript
mavgen_javascript.generate(opts.output, xml)
elif opts.language == 'objc':
from . import mavgen_objc
mavgen_objc.generate(opts.output, xml)
elif opts.language == 'swift':
from . import mavgen_swift
mavgen_swift.generate(opts.output, xml)
elif opts.language == 'java':
from . import mavgen_java
mavgen_java.generate(opts.output, xml)
else:
print("Unsupported language %s" % opts.language)
return True | Generate mavlink message formatters and parsers (C and Python ) using options
and args where args are a list of xml files. This function allows python
scripts under Windows to control mavgen using the same interface as
shell scripts under Unix | Below is the the instruction that describes the task:
### Input:
Generate mavlink message formatters and parsers (C and Python ) using options
and args where args are a list of xml files. This function allows python
scripts under Windows to control mavgen using the same interface as
shell scripts under Unix
### Response:
def mavgen(opts, args):
"""Generate mavlink message formatters and parsers (C and Python ) using options
and args where args are a list of xml files. This function allows python
scripts under Windows to control mavgen using the same interface as
shell scripts under Unix"""
xml = []
# Enable validation by default, disabling it if explicitly requested
if opts.validate:
try:
from lxml import etree
with open(schemaFile, 'r') as f:
xmlschema_root = etree.parse(f)
xmlschema = etree.XMLSchema(xmlschema_root)
except:
print("WARNING: Unable to load XML validator libraries. XML validation will not be performed", file=sys.stderr)
opts.validate = False
def mavgen_validate(xmlfile):
"""Uses lxml to validate an XML file. We define mavgen_validate
here because it relies on the XML libs that were loaded in mavgen(), so it can't be called standalone"""
xmlvalid = True
try:
with open(xmlfile, 'r') as f:
xmldocument = etree.parse(f)
xmlschema.assertValid(xmldocument)
forbidden_names_re = re.compile("^(break$|case$|class$|catch$|const$|continue$|debugger$|default$|delete$|do$|else$|\
export$|extends$|finally$|for$|function$|if$|import$|in$|instanceof$|let$|new$|\
return$|super$|switch$|this$|throw$|try$|typeof$|var$|void$|while$|with$|yield$|\
enum$|await$|implements$|package$|protected$|static$|interface$|private$|public$|\
abstract$|boolean$|byte$|char$|double$|final$|float$|goto$|int$|long$|native$|\
short$|synchronized$|transient$|volatile$).*", re.IGNORECASE)
for element in xmldocument.iter('enum', 'entry', 'message', 'field'):
if forbidden_names_re.search(element.get('name')):
print("Validation error:", file=sys.stderr)
print("Element : %s at line : %s contains forbidden word" % (element.tag, element.sourceline), file=sys.stderr)
xmlvalid = False
return xmlvalid
except etree.XMLSchemaError:
return False
# Process all XML files, validating them as necessary.
for fname in args:
if opts.validate:
print("Validating %s" % fname)
if not mavgen_validate(fname):
return False
else:
print("Validation skipped for %s." % fname)
print("Parsing %s" % fname)
xml.append(mavparse.MAVXML(fname, opts.wire_protocol))
# expand includes
for x in xml[:]:
for i in x.include:
fname = os.path.join(os.path.dirname(x.filename), i)
# Validate XML file with XSD file if possible.
if opts.validate:
print("Validating %s" % fname)
if not mavgen_validate(fname):
return False
else:
print("Validation skipped for %s." % fname)
# Parsing
print("Parsing %s" % fname)
xml.append(mavparse.MAVXML(fname, opts.wire_protocol))
# include message lengths and CRCs too
x.message_crcs.update(xml[-1].message_crcs)
x.message_lengths.update(xml[-1].message_lengths)
x.message_min_lengths.update(xml[-1].message_min_lengths)
x.message_flags.update(xml[-1].message_flags)
x.message_target_system_ofs.update(xml[-1].message_target_system_ofs)
x.message_target_component_ofs.update(xml[-1].message_target_component_ofs)
x.message_names.update(xml[-1].message_names)
x.largest_payload = max(x.largest_payload, xml[-1].largest_payload)
# work out max payload size across all includes
largest_payload = 0
for x in xml:
if x.largest_payload > largest_payload:
largest_payload = x.largest_payload
for x in xml:
x.largest_payload = largest_payload
if mavparse.check_duplicates(xml):
sys.exit(1)
print("Found %u MAVLink message types in %u XML files" % (
mavparse.total_msgs(xml), len(xml)))
# Convert language option to lowercase and validate
opts.language = opts.language.lower()
if opts.language == 'python':
from . import mavgen_python
mavgen_python.generate(opts.output, xml)
elif opts.language == 'c':
from . import mavgen_c
mavgen_c.generate(opts.output, xml)
elif opts.language == 'wlua':
from . import mavgen_wlua
mavgen_wlua.generate(opts.output, xml)
elif opts.language == 'cs':
from . import mavgen_cs
mavgen_cs.generate(opts.output, xml)
elif opts.language == 'javascript':
from . import mavgen_javascript
mavgen_javascript.generate(opts.output, xml)
elif opts.language == 'objc':
from . import mavgen_objc
mavgen_objc.generate(opts.output, xml)
elif opts.language == 'swift':
from . import mavgen_swift
mavgen_swift.generate(opts.output, xml)
elif opts.language == 'java':
from . import mavgen_java
mavgen_java.generate(opts.output, xml)
else:
print("Unsupported language %s" % opts.language)
return True |
def get_features(self, organism=None, sequence=None):
"""
Get the features for an organism / sequence
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]})
"""
data = {}
data = self._update_data(data, organism, sequence)
return self.post('getFeatures', data) | Get the features for an organism / sequence
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]}) | Below is the the instruction that describes the task:
### Input:
Get the features for an organism / sequence
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]})
### Response:
def get_features(self, organism=None, sequence=None):
"""
Get the features for an organism / sequence
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]})
"""
data = {}
data = self._update_data(data, organism, sequence)
return self.post('getFeatures', data) |
def textrank(self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'), withFlag=False):
"""
Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list, it will be filtered.
- withFlag: if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
"""
self.pos_filt = frozenset(allowPOS)
g = UndirectWeightedGraph()
cm = defaultdict(int)
words = tuple(self.tokenizer.cut(sentence))
for i, wp in enumerate(words):
if self.pairfilter(wp):
for j in xrange(i + 1, i + self.span):
if j >= len(words):
break
if not self.pairfilter(words[j]):
continue
if allowPOS and withFlag:
cm[(wp, words[j])] += 1
else:
cm[(wp.word, words[j].word)] += 1
for terms, w in cm.items():
g.addEdge(terms[0], terms[1], w)
nodes_rank = g.rank()
if withWeight:
tags = sorted(nodes_rank.items(), key=itemgetter(1), reverse=True)
else:
tags = sorted(nodes_rank, key=nodes_rank.__getitem__, reverse=True)
if topK:
return tags[:topK]
else:
return tags | Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list, it will be filtered.
- withFlag: if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words | Below is the the instruction that describes the task:
### Input:
Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list, it will be filtered.
- withFlag: if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
### Response:
def textrank(self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'), withFlag=False):
"""
Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list, it will be filtered.
- withFlag: if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
"""
self.pos_filt = frozenset(allowPOS)
g = UndirectWeightedGraph()
cm = defaultdict(int)
words = tuple(self.tokenizer.cut(sentence))
for i, wp in enumerate(words):
if self.pairfilter(wp):
for j in xrange(i + 1, i + self.span):
if j >= len(words):
break
if not self.pairfilter(words[j]):
continue
if allowPOS and withFlag:
cm[(wp, words[j])] += 1
else:
cm[(wp.word, words[j].word)] += 1
for terms, w in cm.items():
g.addEdge(terms[0], terms[1], w)
nodes_rank = g.rank()
if withWeight:
tags = sorted(nodes_rank.items(), key=itemgetter(1), reverse=True)
else:
tags = sorted(nodes_rank, key=nodes_rank.__getitem__, reverse=True)
if topK:
return tags[:topK]
else:
return tags |
def process_value_pairs(self, tokens, type_):
"""
Metadata, Values, and Validation blocks can either
have string pairs or attributes
Attributes will already be processed
"""
key, body = self.check_composite_tokens(type_, tokens)
key_name = self.key_name(key)
d = CaseInsensitiveOrderedDict(CaseInsensitiveOrderedDict)
for t in body:
k = self.clean_string(t[0].value).lower()
v = self.clean_string(t[1].value)
if k in d.keys():
log.warning("A duplicate key ({}) was found in {}. Only the last value ({}) will be used. ".format(
k, type_, v))
d[k] = v
if self.include_position:
pd = self.create_position_dict(key, body)
d["__position__"] = pd
d["__type__"] = key_name
# return the token as well as the processed dict so the
# composites function works the same way
return d | Metadata, Values, and Validation blocks can either
have string pairs or attributes
Attributes will already be processed | Below is the the instruction that describes the task:
### Input:
Metadata, Values, and Validation blocks can either
have string pairs or attributes
Attributes will already be processed
### Response:
def process_value_pairs(self, tokens, type_):
"""
Metadata, Values, and Validation blocks can either
have string pairs or attributes
Attributes will already be processed
"""
key, body = self.check_composite_tokens(type_, tokens)
key_name = self.key_name(key)
d = CaseInsensitiveOrderedDict(CaseInsensitiveOrderedDict)
for t in body:
k = self.clean_string(t[0].value).lower()
v = self.clean_string(t[1].value)
if k in d.keys():
log.warning("A duplicate key ({}) was found in {}. Only the last value ({}) will be used. ".format(
k, type_, v))
d[k] = v
if self.include_position:
pd = self.create_position_dict(key, body)
d["__position__"] = pd
d["__type__"] = key_name
# return the token as well as the processed dict so the
# composites function works the same way
return d |
def bottom(self, features):
"""Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
"""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
# Transform features via its corresponding modality.
for feature_name, modality in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality_name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
# Use if-else clauses to preserve behavior of previous changes: namely,
# the variable scope name for the targets feature if there is only one
# target modality; and to reuse variable scopes for only input modalities.
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_name, feature_name)
else:
variable_scope_name = modality_name
bottom = self._hparams.bottom.get(
feature_name,
modalities.get_targets_bottom(modality))
# TODO(aidangomez): share variables?
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
else:
bottom = self._hparams.bottom.get(feature_name,
modalities.get_bottom(modality))
do_reuse = modality_name in all_previous_modalities
with tf.variable_scope(modality_name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
all_previous_modalities.append(modality_name)
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features | Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed. | Below is the the instruction that describes the task:
### Input:
Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
### Response:
def bottom(self, features):
"""Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
"""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
# Transform features via its corresponding modality.
for feature_name, modality in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality_name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
# Use if-else clauses to preserve behavior of previous changes: namely,
# the variable scope name for the targets feature if there is only one
# target modality; and to reuse variable scopes for only input modalities.
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_name, feature_name)
else:
variable_scope_name = modality_name
bottom = self._hparams.bottom.get(
feature_name,
modalities.get_targets_bottom(modality))
# TODO(aidangomez): share variables?
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
else:
bottom = self._hparams.bottom.get(feature_name,
modalities.get_bottom(modality))
do_reuse = modality_name in all_previous_modalities
with tf.variable_scope(modality_name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
all_previous_modalities.append(modality_name)
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features |
def post_public(self, path, data, is_json=True):
'''Make a post request requiring no auth.'''
return self._post(path, data, is_json) | Make a post request requiring no auth. | Below is the the instruction that describes the task:
### Input:
Make a post request requiring no auth.
### Response:
def post_public(self, path, data, is_json=True):
'''Make a post request requiring no auth.'''
return self._post(path, data, is_json) |
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names | Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed. | Below is the the instruction that describes the task:
### Input:
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
### Response:
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names |
def moses_multi_bleu(hypotheses, references, lowercase=False):
"""Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Parameters
------------
hypotheses : numpy.array.string
A numpy array of strings where each string is a single example.
references : numpy.array.string
A numpy array of strings where each string is a single example.
lowercase : boolean
If True, pass the "-lc" flag to the multi-bleu script
Examples
---------
>>> hypotheses = ["a bird is flying on the sky"]
>>> references = ["two birds are flying on the sky", "a bird is on the top of the tree", "an airplane is on the sky",]
>>> score = tl.nlp.moses_multi_bleu(hypotheses, references)
Returns
--------
float
The BLEU score
References
----------
- `Google/seq2seq/metric/bleu <https://github.com/google/seq2seq>`__
"""
if np.size(hypotheses) == 0:
return np.float32(0.0)
# Get MOSES multi-bleu script
try:
multi_bleu_path, _ = urllib.request.urlretrieve(
"https://raw.githubusercontent.com/moses-smt/mosesdecoder/"
"master/scripts/generic/multi-bleu.perl"
)
os.chmod(multi_bleu_path, 0o755)
except Exception: # pylint: disable=W0702
tl.logging.info("Unable to fetch multi-bleu.perl script, using local.")
metrics_dir = os.path.dirname(os.path.realpath(__file__))
bin_dir = os.path.abspath(os.path.join(metrics_dir, "..", "..", "bin"))
multi_bleu_path = os.path.join(bin_dir, "tools/multi-bleu.perl")
# Dump hypotheses and references to tempfiles
hypothesis_file = tempfile.NamedTemporaryFile()
hypothesis_file.write("\n".join(hypotheses).encode("utf-8"))
hypothesis_file.write(b"\n")
hypothesis_file.flush()
reference_file = tempfile.NamedTemporaryFile()
reference_file.write("\n".join(references).encode("utf-8"))
reference_file.write(b"\n")
reference_file.flush()
# Calculate BLEU using multi-bleu script
with open(hypothesis_file.name, "r") as read_pred:
bleu_cmd = [multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [reference_file.name]
try:
bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT)
bleu_out = bleu_out.decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
bleu_score = float(bleu_score)
except subprocess.CalledProcessError as error:
if error.output is not None:
tl.logging.warning("multi-bleu.perl script returned non-zero exit code")
tl.logging.warning(error.output)
bleu_score = np.float32(0.0)
# Close temp files
hypothesis_file.close()
reference_file.close()
return np.float32(bleu_score) | Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Parameters
------------
hypotheses : numpy.array.string
A numpy array of strings where each string is a single example.
references : numpy.array.string
A numpy array of strings where each string is a single example.
lowercase : boolean
If True, pass the "-lc" flag to the multi-bleu script
Examples
---------
>>> hypotheses = ["a bird is flying on the sky"]
>>> references = ["two birds are flying on the sky", "a bird is on the top of the tree", "an airplane is on the sky",]
>>> score = tl.nlp.moses_multi_bleu(hypotheses, references)
Returns
--------
float
The BLEU score
References
----------
- `Google/seq2seq/metric/bleu <https://github.com/google/seq2seq>`__ | Below is the the instruction that describes the task:
### Input:
Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Parameters
------------
hypotheses : numpy.array.string
A numpy array of strings where each string is a single example.
references : numpy.array.string
A numpy array of strings where each string is a single example.
lowercase : boolean
If True, pass the "-lc" flag to the multi-bleu script
Examples
---------
>>> hypotheses = ["a bird is flying on the sky"]
>>> references = ["two birds are flying on the sky", "a bird is on the top of the tree", "an airplane is on the sky",]
>>> score = tl.nlp.moses_multi_bleu(hypotheses, references)
Returns
--------
float
The BLEU score
References
----------
- `Google/seq2seq/metric/bleu <https://github.com/google/seq2seq>`__
### Response:
def moses_multi_bleu(hypotheses, references, lowercase=False):
"""Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Parameters
------------
hypotheses : numpy.array.string
A numpy array of strings where each string is a single example.
references : numpy.array.string
A numpy array of strings where each string is a single example.
lowercase : boolean
If True, pass the "-lc" flag to the multi-bleu script
Examples
---------
>>> hypotheses = ["a bird is flying on the sky"]
>>> references = ["two birds are flying on the sky", "a bird is on the top of the tree", "an airplane is on the sky",]
>>> score = tl.nlp.moses_multi_bleu(hypotheses, references)
Returns
--------
float
The BLEU score
References
----------
- `Google/seq2seq/metric/bleu <https://github.com/google/seq2seq>`__
"""
if np.size(hypotheses) == 0:
return np.float32(0.0)
# Get MOSES multi-bleu script
try:
multi_bleu_path, _ = urllib.request.urlretrieve(
"https://raw.githubusercontent.com/moses-smt/mosesdecoder/"
"master/scripts/generic/multi-bleu.perl"
)
os.chmod(multi_bleu_path, 0o755)
except Exception: # pylint: disable=W0702
tl.logging.info("Unable to fetch multi-bleu.perl script, using local.")
metrics_dir = os.path.dirname(os.path.realpath(__file__))
bin_dir = os.path.abspath(os.path.join(metrics_dir, "..", "..", "bin"))
multi_bleu_path = os.path.join(bin_dir, "tools/multi-bleu.perl")
# Dump hypotheses and references to tempfiles
hypothesis_file = tempfile.NamedTemporaryFile()
hypothesis_file.write("\n".join(hypotheses).encode("utf-8"))
hypothesis_file.write(b"\n")
hypothesis_file.flush()
reference_file = tempfile.NamedTemporaryFile()
reference_file.write("\n".join(references).encode("utf-8"))
reference_file.write(b"\n")
reference_file.flush()
# Calculate BLEU using multi-bleu script
with open(hypothesis_file.name, "r") as read_pred:
bleu_cmd = [multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [reference_file.name]
try:
bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT)
bleu_out = bleu_out.decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
bleu_score = float(bleu_score)
except subprocess.CalledProcessError as error:
if error.output is not None:
tl.logging.warning("multi-bleu.perl script returned non-zero exit code")
tl.logging.warning(error.output)
bleu_score = np.float32(0.0)
# Close temp files
hypothesis_file.close()
reference_file.close()
return np.float32(bleu_score) |
def add_query(self, name, filter, **kwargs):
"""Add a new query to device query service.
.. code-block:: python
f = api.add_query(
name = "Query name",
filter = {
"device_id": {"$eq": "01234"},
custom_attributes = {
"foo": {"$eq": "bar"}
}
}
)
print(f.created_at)
:param str name: Name of query (Required)
:param dict filter: Filter properties to apply (Required)
:param return: The newly created query object.
:return: the newly created query object
:rtype: Query
"""
# Ensure we have the correct types and get the new query object
filter_obj = filters.legacy_filter_formatter(
dict(filter=filter),
Device._get_attributes_map()
) if filter else None
query_map = Query._create_request_map(kwargs)
# Create the DeviceQuery object
f = DeviceQuery(name=name, query=filter_obj['filter'], **query_map)
api = self._get_api(device_directory.DefaultApi)
return Query(api.device_query_create(f)) | Add a new query to device query service.
.. code-block:: python
f = api.add_query(
name = "Query name",
filter = {
"device_id": {"$eq": "01234"},
custom_attributes = {
"foo": {"$eq": "bar"}
}
}
)
print(f.created_at)
:param str name: Name of query (Required)
:param dict filter: Filter properties to apply (Required)
:param return: The newly created query object.
:return: the newly created query object
:rtype: Query | Below is the the instruction that describes the task:
### Input:
Add a new query to device query service.
.. code-block:: python
f = api.add_query(
name = "Query name",
filter = {
"device_id": {"$eq": "01234"},
custom_attributes = {
"foo": {"$eq": "bar"}
}
}
)
print(f.created_at)
:param str name: Name of query (Required)
:param dict filter: Filter properties to apply (Required)
:param return: The newly created query object.
:return: the newly created query object
:rtype: Query
### Response:
def add_query(self, name, filter, **kwargs):
"""Add a new query to device query service.
.. code-block:: python
f = api.add_query(
name = "Query name",
filter = {
"device_id": {"$eq": "01234"},
custom_attributes = {
"foo": {"$eq": "bar"}
}
}
)
print(f.created_at)
:param str name: Name of query (Required)
:param dict filter: Filter properties to apply (Required)
:param return: The newly created query object.
:return: the newly created query object
:rtype: Query
"""
# Ensure we have the correct types and get the new query object
filter_obj = filters.legacy_filter_formatter(
dict(filter=filter),
Device._get_attributes_map()
) if filter else None
query_map = Query._create_request_map(kwargs)
# Create the DeviceQuery object
f = DeviceQuery(name=name, query=filter_obj['filter'], **query_map)
api = self._get_api(device_directory.DefaultApi)
return Query(api.device_query_create(f)) |
def get_dashboard_history(self, id, **kwargs): # noqa: E501
"""Get the version history of a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_history(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int offset:
:param int limit:
:return: ResponseContainerHistoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_dashboard_history_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_dashboard_history_with_http_info(id, **kwargs) # noqa: E501
return data | Get the version history of a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_history(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int offset:
:param int limit:
:return: ResponseContainerHistoryResponse
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Get the version history of a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_history(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int offset:
:param int limit:
:return: ResponseContainerHistoryResponse
If the method is called asynchronously,
returns the request thread.
### Response:
def get_dashboard_history(self, id, **kwargs): # noqa: E501
"""Get the version history of a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_history(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int offset:
:param int limit:
:return: ResponseContainerHistoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_dashboard_history_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_dashboard_history_with_http_info(id, **kwargs) # noqa: E501
return data |
def get_hash(name, password=None):
'''
Returns the hash of a certificate in the keychain.
name
The name of the certificate (which you can get from keychain.get_friendly_name) or the
location of a p12 file.
password
The password that is used in the certificate. Only required if your passing a p12 file.
Note: This will be outputted to logs
CLI Example:
.. code-block:: bash
salt '*' keychain.get_hash /tmp/test.p12 test123
'''
if '.p12' in name[-4:]:
cmd = 'openssl pkcs12 -in {0} -passin pass:{1} -passout pass:{1}'.format(name, password)
else:
cmd = 'security find-certificate -c "{0}" -m -p'.format(name)
out = __salt__['cmd.run'](cmd)
matches = re.search('-----BEGIN CERTIFICATE-----(.*)-----END CERTIFICATE-----', out, re.DOTALL | re.MULTILINE)
if matches:
return matches.group(1)
else:
return False | Returns the hash of a certificate in the keychain.
name
The name of the certificate (which you can get from keychain.get_friendly_name) or the
location of a p12 file.
password
The password that is used in the certificate. Only required if your passing a p12 file.
Note: This will be outputted to logs
CLI Example:
.. code-block:: bash
salt '*' keychain.get_hash /tmp/test.p12 test123 | Below is the the instruction that describes the task:
### Input:
Returns the hash of a certificate in the keychain.
name
The name of the certificate (which you can get from keychain.get_friendly_name) or the
location of a p12 file.
password
The password that is used in the certificate. Only required if your passing a p12 file.
Note: This will be outputted to logs
CLI Example:
.. code-block:: bash
salt '*' keychain.get_hash /tmp/test.p12 test123
### Response:
def get_hash(name, password=None):
'''
Returns the hash of a certificate in the keychain.
name
The name of the certificate (which you can get from keychain.get_friendly_name) or the
location of a p12 file.
password
The password that is used in the certificate. Only required if your passing a p12 file.
Note: This will be outputted to logs
CLI Example:
.. code-block:: bash
salt '*' keychain.get_hash /tmp/test.p12 test123
'''
if '.p12' in name[-4:]:
cmd = 'openssl pkcs12 -in {0} -passin pass:{1} -passout pass:{1}'.format(name, password)
else:
cmd = 'security find-certificate -c "{0}" -m -p'.format(name)
out = __salt__['cmd.run'](cmd)
matches = re.search('-----BEGIN CERTIFICATE-----(.*)-----END CERTIFICATE-----', out, re.DOTALL | re.MULTILINE)
if matches:
return matches.group(1)
else:
return False |
def contains(self, time: datetime.datetime,
inclusive: bool = True) -> bool:
"""
Does the interval contain a momentary time?
Args:
time: the ``datetime.datetime`` to check
inclusive: use inclusive rather than exclusive range checks?
"""
if inclusive:
return self.start <= time <= self.end
else:
return self.start < time < self.end | Does the interval contain a momentary time?
Args:
time: the ``datetime.datetime`` to check
inclusive: use inclusive rather than exclusive range checks? | Below is the the instruction that describes the task:
### Input:
Does the interval contain a momentary time?
Args:
time: the ``datetime.datetime`` to check
inclusive: use inclusive rather than exclusive range checks?
### Response:
def contains(self, time: datetime.datetime,
inclusive: bool = True) -> bool:
"""
Does the interval contain a momentary time?
Args:
time: the ``datetime.datetime`` to check
inclusive: use inclusive rather than exclusive range checks?
"""
if inclusive:
return self.start <= time <= self.end
else:
return self.start < time < self.end |
def set_client_cert(self, cert):
"""*Sets the client cert for the requests.*
The cert is either a path to a .pem file, or a JSON array, or a list
having the cert path and the key path.
Values ``null`` and ``${None}`` can be used for clearing the cert.
*Examples*
| `Set Client Cert` | ${CURDIR}/client.pem |
| `Set Client Cert` | ["${CURDIR}/client.cert", "${CURDIR}/client.key"] |
| `Set Client Cert` | ${paths_list} |
"""
self.request["cert"] = self._input_client_cert(cert)
return self.request["cert"] | *Sets the client cert for the requests.*
The cert is either a path to a .pem file, or a JSON array, or a list
having the cert path and the key path.
Values ``null`` and ``${None}`` can be used for clearing the cert.
*Examples*
| `Set Client Cert` | ${CURDIR}/client.pem |
| `Set Client Cert` | ["${CURDIR}/client.cert", "${CURDIR}/client.key"] |
| `Set Client Cert` | ${paths_list} | | Below is the the instruction that describes the task:
### Input:
*Sets the client cert for the requests.*
The cert is either a path to a .pem file, or a JSON array, or a list
having the cert path and the key path.
Values ``null`` and ``${None}`` can be used for clearing the cert.
*Examples*
| `Set Client Cert` | ${CURDIR}/client.pem |
| `Set Client Cert` | ["${CURDIR}/client.cert", "${CURDIR}/client.key"] |
| `Set Client Cert` | ${paths_list} |
### Response:
def set_client_cert(self, cert):
"""*Sets the client cert for the requests.*
The cert is either a path to a .pem file, or a JSON array, or a list
having the cert path and the key path.
Values ``null`` and ``${None}`` can be used for clearing the cert.
*Examples*
| `Set Client Cert` | ${CURDIR}/client.pem |
| `Set Client Cert` | ["${CURDIR}/client.cert", "${CURDIR}/client.key"] |
| `Set Client Cert` | ${paths_list} |
"""
self.request["cert"] = self._input_client_cert(cert)
return self.request["cert"] |
def fetch_items(self, category, **kwargs):
"""Fetch the bugs
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
logger.info("Looking for bugs: '%s' updated from '%s'",
self.url, str(from_date))
nbugs = 0
for bug in self.__fetch_and_parse_bugs(from_date):
nbugs += 1
yield bug
logger.info("Fetch process completed: %s bugs fetched", nbugs) | Fetch the bugs
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items | Below is the the instruction that describes the task:
### Input:
Fetch the bugs
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
### Response:
def fetch_items(self, category, **kwargs):
"""Fetch the bugs
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
logger.info("Looking for bugs: '%s' updated from '%s'",
self.url, str(from_date))
nbugs = 0
for bug in self.__fetch_and_parse_bugs(from_date):
nbugs += 1
yield bug
logger.info("Fetch process completed: %s bugs fetched", nbugs) |
def _disable(name, started, result=True, skip_verify=False, **kwargs):
'''
Disable the service
'''
ret = {}
if not skip_verify:
# is service available?
try:
if not _available(name, ret):
ret['result'] = True
return ret
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
# Set default expected result
ret['result'] = result
# is enable/disable available?
if 'service.disable' not in __salt__ or 'service.disabled' not in __salt__:
if started is True:
ret['comment'] = ('Disable is not available on this minion,'
' service {0} started').format(name)
elif started is None:
ret['comment'] = ('Disable is not available on this minion,'
' service {0} is in the desired state'
).format(name)
else:
ret['comment'] = ('Disable is not available on this minion,'
' service {0} is dead').format(name)
return ret
# Service can be disabled
if salt.utils.platform.is_windows():
# service.disabled in Windows returns True for services that are set to
# Manual start, so we need to check specifically for Disabled
before_toggle_disable_status = __salt__['service.info'](name)['StartType'] in ['Disabled']
else:
before_toggle_disable_status = __salt__['service.disabled'](name)
if before_toggle_disable_status:
# Service is disabled
if started is True:
ret['comment'] = ('Service {0} is already disabled,'
' and is running').format(name)
elif started is None:
# always be sure in this case to reset the changes dict
ret['changes'] = {}
ret['comment'] = ('Service {0} is already disabled,'
' and is in the desired state').format(name)
else:
ret['comment'] = ('Service {0} is already disabled,'
' and is dead').format(name)
return ret
# Service needs to be disabled
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Service {0} set to be disabled'.format(name)
return ret
if __salt__['service.disable'](name, **kwargs):
# Service has been disabled
ret['changes'] = {}
after_toggle_disable_status = __salt__['service.disabled'](name)
# on upstart, certain services like apparmor will always return
# False, even if correctly activated
# do not trigger a change
if before_toggle_disable_status != after_toggle_disable_status:
ret['changes'][name] = True
if started is True:
ret['comment'] = ('Service {0} has been disabled,'
' and is running').format(name)
elif started is None:
ret['comment'] = ('Service {0} has been disabled,'
' and is in the desired state').format(name)
else:
ret['comment'] = ('Service {0} has been disabled,'
' and is dead').format(name)
return ret
# Service failed to be disabled
ret['result'] = False
if started is True:
ret['comment'] = ('Failed when setting service {0} to not start'
' at boot, and is running').format(name)
elif started is None:
ret['comment'] = ('Failed when setting service {0} to not start'
' at boot, but the service was already running'
).format(name)
else:
ret['comment'] = ('Failed when setting service {0} to not start'
' at boot, and the service is dead').format(name)
return ret | Disable the service | Below is the the instruction that describes the task:
### Input:
Disable the service
### Response:
def _disable(name, started, result=True, skip_verify=False, **kwargs):
'''
Disable the service
'''
ret = {}
if not skip_verify:
# is service available?
try:
if not _available(name, ret):
ret['result'] = True
return ret
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
# Set default expected result
ret['result'] = result
# is enable/disable available?
if 'service.disable' not in __salt__ or 'service.disabled' not in __salt__:
if started is True:
ret['comment'] = ('Disable is not available on this minion,'
' service {0} started').format(name)
elif started is None:
ret['comment'] = ('Disable is not available on this minion,'
' service {0} is in the desired state'
).format(name)
else:
ret['comment'] = ('Disable is not available on this minion,'
' service {0} is dead').format(name)
return ret
# Service can be disabled
if salt.utils.platform.is_windows():
# service.disabled in Windows returns True for services that are set to
# Manual start, so we need to check specifically for Disabled
before_toggle_disable_status = __salt__['service.info'](name)['StartType'] in ['Disabled']
else:
before_toggle_disable_status = __salt__['service.disabled'](name)
if before_toggle_disable_status:
# Service is disabled
if started is True:
ret['comment'] = ('Service {0} is already disabled,'
' and is running').format(name)
elif started is None:
# always be sure in this case to reset the changes dict
ret['changes'] = {}
ret['comment'] = ('Service {0} is already disabled,'
' and is in the desired state').format(name)
else:
ret['comment'] = ('Service {0} is already disabled,'
' and is dead').format(name)
return ret
# Service needs to be disabled
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Service {0} set to be disabled'.format(name)
return ret
if __salt__['service.disable'](name, **kwargs):
# Service has been disabled
ret['changes'] = {}
after_toggle_disable_status = __salt__['service.disabled'](name)
# on upstart, certain services like apparmor will always return
# False, even if correctly activated
# do not trigger a change
if before_toggle_disable_status != after_toggle_disable_status:
ret['changes'][name] = True
if started is True:
ret['comment'] = ('Service {0} has been disabled,'
' and is running').format(name)
elif started is None:
ret['comment'] = ('Service {0} has been disabled,'
' and is in the desired state').format(name)
else:
ret['comment'] = ('Service {0} has been disabled,'
' and is dead').format(name)
return ret
# Service failed to be disabled
ret['result'] = False
if started is True:
ret['comment'] = ('Failed when setting service {0} to not start'
' at boot, and is running').format(name)
elif started is None:
ret['comment'] = ('Failed when setting service {0} to not start'
' at boot, but the service was already running'
).format(name)
else:
ret['comment'] = ('Failed when setting service {0} to not start'
' at boot, and the service is dead').format(name)
return ret |
def position(self):
"""
Returns the current position of the motor in pulses of the rotary
encoder. When the motor rotates clockwise, the position will increase.
Likewise, rotating counter-clockwise causes the position to decrease.
Writing will set the position to that value.
"""
self._position, value = self.get_attr_int(self._position, 'position')
return value | Returns the current position of the motor in pulses of the rotary
encoder. When the motor rotates clockwise, the position will increase.
Likewise, rotating counter-clockwise causes the position to decrease.
Writing will set the position to that value. | Below is the the instruction that describes the task:
### Input:
Returns the current position of the motor in pulses of the rotary
encoder. When the motor rotates clockwise, the position will increase.
Likewise, rotating counter-clockwise causes the position to decrease.
Writing will set the position to that value.
### Response:
def position(self):
"""
Returns the current position of the motor in pulses of the rotary
encoder. When the motor rotates clockwise, the position will increase.
Likewise, rotating counter-clockwise causes the position to decrease.
Writing will set the position to that value.
"""
self._position, value = self.get_attr_int(self._position, 'position')
return value |
def setup_icons(self, ):
"""Set all icons on buttons
:returns: None
:rtype: None
:raises: None
"""
folder_icon = get_icon('glyphicons_144_folder_open.png', asicon=True)
self.asset_open_path_tb.setIcon(folder_icon)
self.shot_open_path_tb.setIcon(folder_icon)
current_icon = get_icon('glyphicons_181_download_alt.png', asicon=True)
self.current_pb.setIcon(current_icon)
refresh_icon = get_icon('refresh.png', asicon=True)
self.refresh_tb.setIcon(refresh_icon) | Set all icons on buttons
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Set all icons on buttons
:returns: None
:rtype: None
:raises: None
### Response:
def setup_icons(self, ):
"""Set all icons on buttons
:returns: None
:rtype: None
:raises: None
"""
folder_icon = get_icon('glyphicons_144_folder_open.png', asicon=True)
self.asset_open_path_tb.setIcon(folder_icon)
self.shot_open_path_tb.setIcon(folder_icon)
current_icon = get_icon('glyphicons_181_download_alt.png', asicon=True)
self.current_pb.setIcon(current_icon)
refresh_icon = get_icon('refresh.png', asicon=True)
self.refresh_tb.setIcon(refresh_icon) |
def epsilon_crit(self):
"""
returns the critical projected mass density in units of M_sun/Mpc^2 (physical units)
:return: critical projected mass density
"""
if not hasattr(self, '_Epsilon_Crit'):
const_SI = const.c ** 2 / (4 * np.pi * const.G) #c^2/(4*pi*G) in units of [kg/m]
conversion = const.Mpc / const.M_sun # converts [kg/m] to [M_sun/Mpc]
factor = const_SI*conversion #c^2/(4*pi*G) in units of [M_sun/Mpc]
self._Epsilon_Crit = self.D_s/(self.D_d*self.D_ds) * factor #[M_sun/Mpc^2]
return self._Epsilon_Crit | returns the critical projected mass density in units of M_sun/Mpc^2 (physical units)
:return: critical projected mass density | Below is the the instruction that describes the task:
### Input:
returns the critical projected mass density in units of M_sun/Mpc^2 (physical units)
:return: critical projected mass density
### Response:
def epsilon_crit(self):
"""
returns the critical projected mass density in units of M_sun/Mpc^2 (physical units)
:return: critical projected mass density
"""
if not hasattr(self, '_Epsilon_Crit'):
const_SI = const.c ** 2 / (4 * np.pi * const.G) #c^2/(4*pi*G) in units of [kg/m]
conversion = const.Mpc / const.M_sun # converts [kg/m] to [M_sun/Mpc]
factor = const_SI*conversion #c^2/(4*pi*G) in units of [M_sun/Mpc]
self._Epsilon_Crit = self.D_s/(self.D_d*self.D_ds) * factor #[M_sun/Mpc^2]
return self._Epsilon_Crit |
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
expected = self._call_matcher((args, kwargs))
actual = [self._call_matcher(c) for c in self.call_args_list]
if expected not in actual:
cause = expected if isinstance(expected, Exception) else None
expected_string = self._format_mock_call_signature(args, kwargs)
six.raise_from(AssertionError(
'%s call not found' % expected_string
), cause) | assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one. | Below is the the instruction that describes the task:
### Input:
assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one.
### Response:
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
expected = self._call_matcher((args, kwargs))
actual = [self._call_matcher(c) for c in self.call_args_list]
if expected not in actual:
cause = expected if isinstance(expected, Exception) else None
expected_string = self._format_mock_call_signature(args, kwargs)
six.raise_from(AssertionError(
'%s call not found' % expected_string
), cause) |
def calc_significand(prefix, dpd_bits, num_bits):
"""
prefix: High bits integer value
dpd_bits: dpd encoded bits
num_bits: bit length of dpd_bits
"""
# https://en.wikipedia.org/wiki/Decimal128_floating-point_format#Densely_packed_decimal_significand_field
num_segments = num_bits // 10
segments = []
for i in range(num_segments):
segments.append(dpd_bits & 0b1111111111)
dpd_bits >>= 10
segments.reverse()
v = prefix
for dpd in segments:
v = v * 1000 + dpd_to_int(dpd)
return v | prefix: High bits integer value
dpd_bits: dpd encoded bits
num_bits: bit length of dpd_bits | Below is the the instruction that describes the task:
### Input:
prefix: High bits integer value
dpd_bits: dpd encoded bits
num_bits: bit length of dpd_bits
### Response:
def calc_significand(prefix, dpd_bits, num_bits):
"""
prefix: High bits integer value
dpd_bits: dpd encoded bits
num_bits: bit length of dpd_bits
"""
# https://en.wikipedia.org/wiki/Decimal128_floating-point_format#Densely_packed_decimal_significand_field
num_segments = num_bits // 10
segments = []
for i in range(num_segments):
segments.append(dpd_bits & 0b1111111111)
dpd_bits >>= 10
segments.reverse()
v = prefix
for dpd in segments:
v = v * 1000 + dpd_to_int(dpd)
return v |
def on_message(self, ws, message):
"""Websocket on_message event handler
Saves message as RTMMessage in self._inbox
"""
try:
data = json.loads(message)
except Exception:
self._set_error(message, "decode message failed")
else:
self._inbox.put(RTMMessage(data)) | Websocket on_message event handler
Saves message as RTMMessage in self._inbox | Below is the the instruction that describes the task:
### Input:
Websocket on_message event handler
Saves message as RTMMessage in self._inbox
### Response:
def on_message(self, ws, message):
"""Websocket on_message event handler
Saves message as RTMMessage in self._inbox
"""
try:
data = json.loads(message)
except Exception:
self._set_error(message, "decode message failed")
else:
self._inbox.put(RTMMessage(data)) |
def set_link(self, link,y=0,page=-1):
"Set destination of internal link"
if(y==-1):
y=self.y
if(page==-1):
page=self.page
self.links[link]=[page,y] | Set destination of internal link | Below is the the instruction that describes the task:
### Input:
Set destination of internal link
### Response:
def set_link(self, link,y=0,page=-1):
"Set destination of internal link"
if(y==-1):
y=self.y
if(page==-1):
page=self.page
self.links[link]=[page,y] |
def await_flush_completion(self, timeout=None):
"""
Mark all partitions as ready to send and block until the send is complete
"""
try:
for batch in self._incomplete.all():
log.debug('Waiting on produce to %s',
batch.produce_future.topic_partition)
if not batch.produce_future.wait(timeout=timeout):
raise Errors.KafkaTimeoutError('Timeout waiting for future')
if not batch.produce_future.is_done:
raise Errors.UnknownError('Future not done')
if batch.produce_future.failed():
log.warning(batch.produce_future.exception)
finally:
self._flushes_in_progress.decrement() | Mark all partitions as ready to send and block until the send is complete | Below is the the instruction that describes the task:
### Input:
Mark all partitions as ready to send and block until the send is complete
### Response:
def await_flush_completion(self, timeout=None):
"""
Mark all partitions as ready to send and block until the send is complete
"""
try:
for batch in self._incomplete.all():
log.debug('Waiting on produce to %s',
batch.produce_future.topic_partition)
if not batch.produce_future.wait(timeout=timeout):
raise Errors.KafkaTimeoutError('Timeout waiting for future')
if not batch.produce_future.is_done:
raise Errors.UnknownError('Future not done')
if batch.produce_future.failed():
log.warning(batch.produce_future.exception)
finally:
self._flushes_in_progress.decrement() |
def autodiscover():
"""
Auto-discover INSTALLED_APPS backbone_api.py modules.
"""
# This code is based off django.contrib.admin.__init__
from django.conf import settings
try:
# Django versions >= 1.9
from django.utils.module_loading import import_module
except ImportError:
# Django versions < 1.9
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from backbone.views import BackboneAPIView # This is to prevent a circular import issue
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's backbone module.
try:
import_module('%s.backbone_api' % app)
except:
# Decide whether to bubble up this error. If the app just
# doesn't have an backbone module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'backbone_api'):
raise | Auto-discover INSTALLED_APPS backbone_api.py modules. | Below is the the instruction that describes the task:
### Input:
Auto-discover INSTALLED_APPS backbone_api.py modules.
### Response:
def autodiscover():
"""
Auto-discover INSTALLED_APPS backbone_api.py modules.
"""
# This code is based off django.contrib.admin.__init__
from django.conf import settings
try:
# Django versions >= 1.9
from django.utils.module_loading import import_module
except ImportError:
# Django versions < 1.9
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from backbone.views import BackboneAPIView # This is to prevent a circular import issue
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's backbone module.
try:
import_module('%s.backbone_api' % app)
except:
# Decide whether to bubble up this error. If the app just
# doesn't have an backbone module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'backbone_api'):
raise |
def DeleteUser(self, user_link, options=None):
"""Deletes a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
The deleted user.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(user_link)
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return self.DeleteResource(path,
'users',
user_id,
None,
options) | Deletes a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
The deleted user.
:rtype:
dict | Below is the the instruction that describes the task:
### Input:
Deletes a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
The deleted user.
:rtype:
dict
### Response:
def DeleteUser(self, user_link, options=None):
"""Deletes a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
The deleted user.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(user_link)
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return self.DeleteResource(path,
'users',
user_id,
None,
options) |
def visit_image(self, node):
"""
Image directive
"""
uri = node.attributes['uri']
doc_folder = os.path.dirname(self.builder.current_docname)
if uri.startswith(doc_folder):
# drop docname prefix
uri = uri[len(doc_folder):]
if uri.startswith("/"):
uri = "." + uri
self.add('\n\n\n\n' % uri) | Image directive | Below is the the instruction that describes the task:
### Input:
Image directive
### Response:
def visit_image(self, node):
"""
Image directive
"""
uri = node.attributes['uri']
doc_folder = os.path.dirname(self.builder.current_docname)
if uri.startswith(doc_folder):
# drop docname prefix
uri = uri[len(doc_folder):]
if uri.startswith("/"):
uri = "." + uri
self.add('\n\n\n\n' % uri) |
def push_broks_to_broker(self): # pragma: no cover - not used!
"""Send all broks from arbiter internal list to broker
The arbiter get some broks and then pushes them to all the brokers.
:return: None
"""
someone_is_concerned = False
sent = False
for broker_link in self.conf.brokers:
# Send only if the broker is concerned...
if not broker_link.manage_arbiters:
continue
someone_is_concerned = True
if broker_link.reachable:
logger.debug("Sending %d broks to the broker %s", len(self.broks), broker_link.name)
if broker_link.push_broks(self.broks):
statsmgr.counter('broks.pushed.count', len(self.broks))
sent = True
if not someone_is_concerned or sent:
# No one is anymore interested with...
del self.broks[:] | Send all broks from arbiter internal list to broker
The arbiter get some broks and then pushes them to all the brokers.
:return: None | Below is the the instruction that describes the task:
### Input:
Send all broks from arbiter internal list to broker
The arbiter get some broks and then pushes them to all the brokers.
:return: None
### Response:
def push_broks_to_broker(self): # pragma: no cover - not used!
"""Send all broks from arbiter internal list to broker
The arbiter get some broks and then pushes them to all the brokers.
:return: None
"""
someone_is_concerned = False
sent = False
for broker_link in self.conf.brokers:
# Send only if the broker is concerned...
if not broker_link.manage_arbiters:
continue
someone_is_concerned = True
if broker_link.reachable:
logger.debug("Sending %d broks to the broker %s", len(self.broks), broker_link.name)
if broker_link.push_broks(self.broks):
statsmgr.counter('broks.pushed.count', len(self.broks))
sent = True
if not someone_is_concerned or sent:
# No one is anymore interested with...
del self.broks[:] |
def absent(name, force=False):
'''
Ensure that a container is absent
name
Name of the container
force : False
Set to ``True`` to remove the container even if it is running
Usage Examples:
.. code-block:: yaml
mycontainer:
docker_container.absent
multiple_containers:
docker_container.absent:
- names:
- foo
- bar
- baz
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if name not in __salt__['docker.list_containers'](all=True):
ret['result'] = True
ret['comment'] = 'Container \'{0}\' does not exist'.format(name)
return ret
pre_state = __salt__['docker.state'](name)
if pre_state != 'stopped' and not force:
ret['comment'] = ('Container is running, set force to True to '
'forcibly remove it')
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Container \'{0}\' will be removed'.format(name))
return ret
try:
ret['changes']['removed'] = __salt__['docker.rm'](name, force=force)
except Exception as exc:
ret['comment'] = ('Failed to remove container \'{0}\': {1}'
.format(name, exc))
return ret
if name in __salt__['docker.list_containers'](all=True):
ret['comment'] = 'Failed to remove container \'{0}\''.format(name)
else:
if force and pre_state != 'stopped':
method = 'Forcibly'
else:
method = 'Successfully'
ret['comment'] = '{0} removed container \'{1}\''.format(method, name)
ret['result'] = True
return ret | Ensure that a container is absent
name
Name of the container
force : False
Set to ``True`` to remove the container even if it is running
Usage Examples:
.. code-block:: yaml
mycontainer:
docker_container.absent
multiple_containers:
docker_container.absent:
- names:
- foo
- bar
- baz | Below is the the instruction that describes the task:
### Input:
Ensure that a container is absent
name
Name of the container
force : False
Set to ``True`` to remove the container even if it is running
Usage Examples:
.. code-block:: yaml
mycontainer:
docker_container.absent
multiple_containers:
docker_container.absent:
- names:
- foo
- bar
- baz
### Response:
def absent(name, force=False):
'''
Ensure that a container is absent
name
Name of the container
force : False
Set to ``True`` to remove the container even if it is running
Usage Examples:
.. code-block:: yaml
mycontainer:
docker_container.absent
multiple_containers:
docker_container.absent:
- names:
- foo
- bar
- baz
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if name not in __salt__['docker.list_containers'](all=True):
ret['result'] = True
ret['comment'] = 'Container \'{0}\' does not exist'.format(name)
return ret
pre_state = __salt__['docker.state'](name)
if pre_state != 'stopped' and not force:
ret['comment'] = ('Container is running, set force to True to '
'forcibly remove it')
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Container \'{0}\' will be removed'.format(name))
return ret
try:
ret['changes']['removed'] = __salt__['docker.rm'](name, force=force)
except Exception as exc:
ret['comment'] = ('Failed to remove container \'{0}\': {1}'
.format(name, exc))
return ret
if name in __salt__['docker.list_containers'](all=True):
ret['comment'] = 'Failed to remove container \'{0}\''.format(name)
else:
if force and pre_state != 'stopped':
method = 'Forcibly'
else:
method = 'Successfully'
ret['comment'] = '{0} removed container \'{1}\''.format(method, name)
ret['result'] = True
return ret |
def write_training_data(self, features, targets):
""" Writes data dictionary to filename """
assert len(features) == len(targets)
data = dict(zip(features, targets))
with open(os.path.join(self.repopath, 'training.pkl'), 'w') as fp:
pickle.dump(data, fp) | Writes data dictionary to filename | Below is the the instruction that describes the task:
### Input:
Writes data dictionary to filename
### Response:
def write_training_data(self, features, targets):
""" Writes data dictionary to filename """
assert len(features) == len(targets)
data = dict(zip(features, targets))
with open(os.path.join(self.repopath, 'training.pkl'), 'w') as fp:
pickle.dump(data, fp) |
def prediction_model_dict(self):
"""
Converts the list of prediction_models passed in into properly formatted dictionaries
:return: formatted prediction model dict
"""
models = {}
for model in self.predictions_models:
models[model.name] = model.keywords
return models | Converts the list of prediction_models passed in into properly formatted dictionaries
:return: formatted prediction model dict | Below is the the instruction that describes the task:
### Input:
Converts the list of prediction_models passed in into properly formatted dictionaries
:return: formatted prediction model dict
### Response:
def prediction_model_dict(self):
"""
Converts the list of prediction_models passed in into properly formatted dictionaries
:return: formatted prediction model dict
"""
models = {}
for model in self.predictions_models:
models[model.name] = model.keywords
return models |
def basicauthfail(self, realm = b'all'):
"""
Return 401 for authentication failure. This will end the handler.
"""
if not isinstance(realm, bytes):
realm = realm.encode('ascii')
self.start_response(401, [(b'WWW-Authenticate', b'Basic realm="' + realm + b'"')])
self.exit(b'<h1>' + _createstatus(401) + b'</h1>') | Return 401 for authentication failure. This will end the handler. | Below is the the instruction that describes the task:
### Input:
Return 401 for authentication failure. This will end the handler.
### Response:
def basicauthfail(self, realm = b'all'):
"""
Return 401 for authentication failure. This will end the handler.
"""
if not isinstance(realm, bytes):
realm = realm.encode('ascii')
self.start_response(401, [(b'WWW-Authenticate', b'Basic realm="' + realm + b'"')])
self.exit(b'<h1>' + _createstatus(401) + b'</h1>') |
def check_throttles(self, request):
"""
Check if request should be throttled.
Raises an appropriate exception if the request is throttled.
"""
for throttle in self.get_throttles():
if not throttle.allow_request(request, self):
self.throttled(request, throttle.wait()) | Check if request should be throttled.
Raises an appropriate exception if the request is throttled. | Below is the the instruction that describes the task:
### Input:
Check if request should be throttled.
Raises an appropriate exception if the request is throttled.
### Response:
def check_throttles(self, request):
"""
Check if request should be throttled.
Raises an appropriate exception if the request is throttled.
"""
for throttle in self.get_throttles():
if not throttle.allow_request(request, self):
self.throttled(request, throttle.wait()) |
def video_category(self):
"""doc: http://open.youku.com/docs/doc?id=90
"""
url = 'https://openapi.youku.com/v2/schemas/video/category.json'
r = requests.get(url)
check_error(r)
return r.json() | doc: http://open.youku.com/docs/doc?id=90 | Below is the the instruction that describes the task:
### Input:
doc: http://open.youku.com/docs/doc?id=90
### Response:
def video_category(self):
"""doc: http://open.youku.com/docs/doc?id=90
"""
url = 'https://openapi.youku.com/v2/schemas/video/category.json'
r = requests.get(url)
check_error(r)
return r.json() |
def add_code_cell(work_notebook, code):
"""Add a code cell to the notebook
Parameters
----------
code : str
Cell content
"""
code_cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {"collapsed": False},
"outputs": [],
"source": [code.strip()]
}
work_notebook["cells"].append(code_cell) | Add a code cell to the notebook
Parameters
----------
code : str
Cell content | Below is the the instruction that describes the task:
### Input:
Add a code cell to the notebook
Parameters
----------
code : str
Cell content
### Response:
def add_code_cell(work_notebook, code):
"""Add a code cell to the notebook
Parameters
----------
code : str
Cell content
"""
code_cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {"collapsed": False},
"outputs": [],
"source": [code.strip()]
}
work_notebook["cells"].append(code_cell) |
def _get_acceleration(self, data):
'''Return acceleration mG'''
if (data[7:8] == 0x7FFF or
data[9:10] == 0x7FFF or
data[11:12] == 0x7FFF):
return (None, None, None)
acc_x = twos_complement((data[7] << 8) + data[8], 16)
acc_y = twos_complement((data[9] << 8) + data[10], 16)
acc_z = twos_complement((data[11] << 8) + data[12], 16)
return (acc_x, acc_y, acc_z) | Return acceleration mG | Below is the the instruction that describes the task:
### Input:
Return acceleration mG
### Response:
def _get_acceleration(self, data):
'''Return acceleration mG'''
if (data[7:8] == 0x7FFF or
data[9:10] == 0x7FFF or
data[11:12] == 0x7FFF):
return (None, None, None)
acc_x = twos_complement((data[7] << 8) + data[8], 16)
acc_y = twos_complement((data[9] << 8) + data[10], 16)
acc_z = twos_complement((data[11] << 8) + data[12], 16)
return (acc_x, acc_y, acc_z) |
def start(self):
"""
start
"""
def main_thread():
# create resp, req thread pool
self._create_thread_pool()
# start connection, this will block until stop()
self.conn_thread = Thread(target=self._conn.connect)
self.conn_thread.daemon = True
self.conn_thread.start()
# register model to controller...
self.is_ready.wait()
if hasattr(self, 'run'):
_logger.debug("Start running...")
self.run()
# start main_thread
self.main_thread = Thread(target=main_thread)
self.main_thread.daemon = True
self.main_thread.start()
if threading.current_thread().__class__.__name__ == '_MainThread':
# control this bundle stop or not
while not self.stop_event.wait(1):
sleep(1)
else:
self.stop_event.wait()
self.stop()
_logger.debug("Shutdown successfully") | start | Below is the the instruction that describes the task:
### Input:
start
### Response:
def start(self):
"""
start
"""
def main_thread():
# create resp, req thread pool
self._create_thread_pool()
# start connection, this will block until stop()
self.conn_thread = Thread(target=self._conn.connect)
self.conn_thread.daemon = True
self.conn_thread.start()
# register model to controller...
self.is_ready.wait()
if hasattr(self, 'run'):
_logger.debug("Start running...")
self.run()
# start main_thread
self.main_thread = Thread(target=main_thread)
self.main_thread.daemon = True
self.main_thread.start()
if threading.current_thread().__class__.__name__ == '_MainThread':
# control this bundle stop or not
while not self.stop_event.wait(1):
sleep(1)
else:
self.stop_event.wait()
self.stop()
_logger.debug("Shutdown successfully") |
def calc_fracrain_v1(self):
"""Determine the temperature-dependent fraction of (liquid) rainfall
and (total) precipitation.
Required control parameters:
|NmbZones|
|TT|,
|TTInt|
Required flux sequence:
|TC|
Calculated flux sequences:
|FracRain|
Basic equation:
:math:`FracRain = \\frac{TC-(TT-\\frac{TTInt}{2})}{TTInt}`
Restriction:
:math:`0 \\leq FracRain \\leq 1`
Examples:
The threshold temperature of seven zones is 0°C and the corresponding
temperature interval of mixed precipitation 2°C:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(7)
>>> tt(0.0)
>>> ttint(2.0)
The fraction of rainfall is zero below -1°C, is one above 1°C and
increases linearly in between:
>>> fluxes.tc = -10.0, -1.0, -0.5, 0.0, 0.5, 1.0, 10.0
>>> model.calc_fracrain_v1()
>>> fluxes.fracrain
fracrain(0.0, 0.0, 0.25, 0.5, 0.75, 1.0, 1.0)
Note the special case of a zero temperature interval. With a
actual temperature being equal to the threshold temperature, the
rainfall fraction is one:
>>> ttint(0.0)
>>> model.calc_fracrain_v1()
>>> fluxes.fracrain
fracrain(0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0)
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
for k in range(con.nmbzones):
if flu.tc[k] >= (con.tt[k]+con.ttint[k]/2.):
flu.fracrain[k] = 1.
elif flu.tc[k] <= (con.tt[k]-con.ttint[k]/2.):
flu.fracrain[k] = 0.
else:
flu.fracrain[k] = ((flu.tc[k]-(con.tt[k]-con.ttint[k]/2.)) /
con.ttint[k]) | Determine the temperature-dependent fraction of (liquid) rainfall
and (total) precipitation.
Required control parameters:
|NmbZones|
|TT|,
|TTInt|
Required flux sequence:
|TC|
Calculated flux sequences:
|FracRain|
Basic equation:
:math:`FracRain = \\frac{TC-(TT-\\frac{TTInt}{2})}{TTInt}`
Restriction:
:math:`0 \\leq FracRain \\leq 1`
Examples:
The threshold temperature of seven zones is 0°C and the corresponding
temperature interval of mixed precipitation 2°C:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(7)
>>> tt(0.0)
>>> ttint(2.0)
The fraction of rainfall is zero below -1°C, is one above 1°C and
increases linearly in between:
>>> fluxes.tc = -10.0, -1.0, -0.5, 0.0, 0.5, 1.0, 10.0
>>> model.calc_fracrain_v1()
>>> fluxes.fracrain
fracrain(0.0, 0.0, 0.25, 0.5, 0.75, 1.0, 1.0)
Note the special case of a zero temperature interval. With a
actual temperature being equal to the threshold temperature, the
rainfall fraction is one:
>>> ttint(0.0)
>>> model.calc_fracrain_v1()
>>> fluxes.fracrain
fracrain(0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0) | Below is the the instruction that describes the task:
### Input:
Determine the temperature-dependent fraction of (liquid) rainfall
and (total) precipitation.
Required control parameters:
|NmbZones|
|TT|,
|TTInt|
Required flux sequence:
|TC|
Calculated flux sequences:
|FracRain|
Basic equation:
:math:`FracRain = \\frac{TC-(TT-\\frac{TTInt}{2})}{TTInt}`
Restriction:
:math:`0 \\leq FracRain \\leq 1`
Examples:
The threshold temperature of seven zones is 0°C and the corresponding
temperature interval of mixed precipitation 2°C:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(7)
>>> tt(0.0)
>>> ttint(2.0)
The fraction of rainfall is zero below -1°C, is one above 1°C and
increases linearly in between:
>>> fluxes.tc = -10.0, -1.0, -0.5, 0.0, 0.5, 1.0, 10.0
>>> model.calc_fracrain_v1()
>>> fluxes.fracrain
fracrain(0.0, 0.0, 0.25, 0.5, 0.75, 1.0, 1.0)
Note the special case of a zero temperature interval. With a
actual temperature being equal to the threshold temperature, the
rainfall fraction is one:
>>> ttint(0.0)
>>> model.calc_fracrain_v1()
>>> fluxes.fracrain
fracrain(0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0)
### Response:
def calc_fracrain_v1(self):
"""Determine the temperature-dependent fraction of (liquid) rainfall
and (total) precipitation.
Required control parameters:
|NmbZones|
|TT|,
|TTInt|
Required flux sequence:
|TC|
Calculated flux sequences:
|FracRain|
Basic equation:
:math:`FracRain = \\frac{TC-(TT-\\frac{TTInt}{2})}{TTInt}`
Restriction:
:math:`0 \\leq FracRain \\leq 1`
Examples:
The threshold temperature of seven zones is 0°C and the corresponding
temperature interval of mixed precipitation 2°C:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(7)
>>> tt(0.0)
>>> ttint(2.0)
The fraction of rainfall is zero below -1°C, is one above 1°C and
increases linearly in between:
>>> fluxes.tc = -10.0, -1.0, -0.5, 0.0, 0.5, 1.0, 10.0
>>> model.calc_fracrain_v1()
>>> fluxes.fracrain
fracrain(0.0, 0.0, 0.25, 0.5, 0.75, 1.0, 1.0)
Note the special case of a zero temperature interval. With a
actual temperature being equal to the threshold temperature, the
rainfall fraction is one:
>>> ttint(0.0)
>>> model.calc_fracrain_v1()
>>> fluxes.fracrain
fracrain(0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0)
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
for k in range(con.nmbzones):
if flu.tc[k] >= (con.tt[k]+con.ttint[k]/2.):
flu.fracrain[k] = 1.
elif flu.tc[k] <= (con.tt[k]-con.ttint[k]/2.):
flu.fracrain[k] = 0.
else:
flu.fracrain[k] = ((flu.tc[k]-(con.tt[k]-con.ttint[k]/2.)) /
con.ttint[k]) |
def _raise_error(self, status_code, raw_data):
"""
Locate appropriate exception and raise it.
"""
error_message = raw_data
additional_info = None
try:
additional_info = json.loads(raw_data)
error_message = additional_info.get('error', error_message)
if isinstance(error_message, dict) and 'type' in error_message:
error_message = error_message['type']
except:
pass
raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info) | Locate appropriate exception and raise it. | Below is the the instruction that describes the task:
### Input:
Locate appropriate exception and raise it.
### Response:
def _raise_error(self, status_code, raw_data):
"""
Locate appropriate exception and raise it.
"""
error_message = raw_data
additional_info = None
try:
additional_info = json.loads(raw_data)
error_message = additional_info.get('error', error_message)
if isinstance(error_message, dict) and 'type' in error_message:
error_message = error_message['type']
except:
pass
raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info) |
def extend(self, table, keys=None):
"""Extends all rows in the texttable.
The rows are extended with the new columns from the table.
Args:
table: A texttable, the table to extend this table by.
keys: A set, the set of columns to use as the key. If None, the
row index is used.
Raises:
IndexError: If key is not a valid column name.
"""
if keys:
for k in keys:
if k not in self._Header():
raise IndexError("Unknown key: '%s'", k)
extend_with = []
for column in table.header:
if column not in self.header:
extend_with.append(column)
if not extend_with:
return
for column in extend_with:
self.AddColumn(column)
if not keys:
for row1, row2 in zip(self, table):
for column in extend_with:
row1[column] = row2[column]
return
for row1 in self:
for row2 in table:
for k in keys:
if row1[k] != row2[k]:
break
else:
for column in extend_with:
row1[column] = row2[column]
break | Extends all rows in the texttable.
The rows are extended with the new columns from the table.
Args:
table: A texttable, the table to extend this table by.
keys: A set, the set of columns to use as the key. If None, the
row index is used.
Raises:
IndexError: If key is not a valid column name. | Below is the the instruction that describes the task:
### Input:
Extends all rows in the texttable.
The rows are extended with the new columns from the table.
Args:
table: A texttable, the table to extend this table by.
keys: A set, the set of columns to use as the key. If None, the
row index is used.
Raises:
IndexError: If key is not a valid column name.
### Response:
def extend(self, table, keys=None):
"""Extends all rows in the texttable.
The rows are extended with the new columns from the table.
Args:
table: A texttable, the table to extend this table by.
keys: A set, the set of columns to use as the key. If None, the
row index is used.
Raises:
IndexError: If key is not a valid column name.
"""
if keys:
for k in keys:
if k not in self._Header():
raise IndexError("Unknown key: '%s'", k)
extend_with = []
for column in table.header:
if column not in self.header:
extend_with.append(column)
if not extend_with:
return
for column in extend_with:
self.AddColumn(column)
if not keys:
for row1, row2 in zip(self, table):
for column in extend_with:
row1[column] = row2[column]
return
for row1 in self:
for row2 in table:
for k in keys:
if row1[k] != row2[k]:
break
else:
for column in extend_with:
row1[column] = row2[column]
break |
def color_scale_HSV(c: Color, scoef: float, vcoef: float) -> None:
"""Scale a color's saturation and value.
Does not return a new Color. ``c`` is modified inplace.
Args:
c (Union[Color, List[int]]): A Color instance, or an [r, g, b] list.
scoef (float): Saturation multiplier, from 0 to 1.
Use 1 to keep current saturation.
vcoef (float): Value multiplier, from 0 to 1.
Use 1 to keep current value.
"""
color_p = ffi.new("TCOD_color_t*")
color_p.r, color_p.g, color_p.b = c.r, c.g, c.b
lib.TCOD_color_scale_HSV(color_p, scoef, vcoef)
c[:] = color_p.r, color_p.g, color_p.b | Scale a color's saturation and value.
Does not return a new Color. ``c`` is modified inplace.
Args:
c (Union[Color, List[int]]): A Color instance, or an [r, g, b] list.
scoef (float): Saturation multiplier, from 0 to 1.
Use 1 to keep current saturation.
vcoef (float): Value multiplier, from 0 to 1.
Use 1 to keep current value. | Below is the the instruction that describes the task:
### Input:
Scale a color's saturation and value.
Does not return a new Color. ``c`` is modified inplace.
Args:
c (Union[Color, List[int]]): A Color instance, or an [r, g, b] list.
scoef (float): Saturation multiplier, from 0 to 1.
Use 1 to keep current saturation.
vcoef (float): Value multiplier, from 0 to 1.
Use 1 to keep current value.
### Response:
def color_scale_HSV(c: Color, scoef: float, vcoef: float) -> None:
"""Scale a color's saturation and value.
Does not return a new Color. ``c`` is modified inplace.
Args:
c (Union[Color, List[int]]): A Color instance, or an [r, g, b] list.
scoef (float): Saturation multiplier, from 0 to 1.
Use 1 to keep current saturation.
vcoef (float): Value multiplier, from 0 to 1.
Use 1 to keep current value.
"""
color_p = ffi.new("TCOD_color_t*")
color_p.r, color_p.g, color_p.b = c.r, c.g, c.b
lib.TCOD_color_scale_HSV(color_p, scoef, vcoef)
c[:] = color_p.r, color_p.g, color_p.b |
def write_file(path, content, mode='w'):
# type: (Text, Union[Text,bytes], Text) -> None
""" --pretend aware file writing.
You can always write files manually but you should always handle the
--pretend case.
Args:
path (str):
content (str):
mode (str):
"""
from peltak.core import context
from peltak.core import log
if context.get('pretend', False):
log.info("Would overwrite <34>{path}<32> with:\n<90>{content}",
path=path,
content=content)
else:
with open(path, mode) as fp:
fp.write(content) | --pretend aware file writing.
You can always write files manually but you should always handle the
--pretend case.
Args:
path (str):
content (str):
mode (str): | Below is the the instruction that describes the task:
### Input:
--pretend aware file writing.
You can always write files manually but you should always handle the
--pretend case.
Args:
path (str):
content (str):
mode (str):
### Response:
def write_file(path, content, mode='w'):
# type: (Text, Union[Text,bytes], Text) -> None
""" --pretend aware file writing.
You can always write files manually but you should always handle the
--pretend case.
Args:
path (str):
content (str):
mode (str):
"""
from peltak.core import context
from peltak.core import log
if context.get('pretend', False):
log.info("Would overwrite <34>{path}<32> with:\n<90>{content}",
path=path,
content=content)
else:
with open(path, mode) as fp:
fp.write(content) |
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret) | Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string | Below is the the instruction that describes the task:
### Input:
Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
### Response:
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret) |
def parse_classname(self, tup_tree):
"""
Parse a CLASSNAME element and return the class path it represents as a
CIMClassName object.
::
<!ELEMENT CLASSNAME EMPTY>
<!ATTLIST CLASSNAME
%CIMName;>
Returns:
CIMClassName object (without namespace or host)
"""
self.check_node(tup_tree, 'CLASSNAME', ('NAME',), (), ())
classname = attrs(tup_tree)['NAME']
class_path = CIMClassName(classname)
return class_path | Parse a CLASSNAME element and return the class path it represents as a
CIMClassName object.
::
<!ELEMENT CLASSNAME EMPTY>
<!ATTLIST CLASSNAME
%CIMName;>
Returns:
CIMClassName object (without namespace or host) | Below is the the instruction that describes the task:
### Input:
Parse a CLASSNAME element and return the class path it represents as a
CIMClassName object.
::
<!ELEMENT CLASSNAME EMPTY>
<!ATTLIST CLASSNAME
%CIMName;>
Returns:
CIMClassName object (without namespace or host)
### Response:
def parse_classname(self, tup_tree):
"""
Parse a CLASSNAME element and return the class path it represents as a
CIMClassName object.
::
<!ELEMENT CLASSNAME EMPTY>
<!ATTLIST CLASSNAME
%CIMName;>
Returns:
CIMClassName object (without namespace or host)
"""
self.check_node(tup_tree, 'CLASSNAME', ('NAME',), (), ())
classname = attrs(tup_tree)['NAME']
class_path = CIMClassName(classname)
return class_path |
def start_kline_socket(self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE):
"""Start a websocket for symbol kline data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#klinecandlestick-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param interval: Kline interval, default KLINE_INTERVAL_1MINUTE
:type interval: str
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "kline", # event type
"E": 1499404907056, # event time
"s": "ETHBTC", # symbol
"k": {
"t": 1499404860000, # start time of this bar
"T": 1499404919999, # end time of this bar
"s": "ETHBTC", # symbol
"i": "1m", # interval
"f": 77462, # first trade id
"L": 77465, # last trade id
"o": "0.10278577", # open
"c": "0.10278645", # close
"h": "0.10278712", # high
"l": "0.10278518", # low
"v": "17.47929838", # volume
"n": 4, # number of trades
"x": false, # whether this bar is final
"q": "1.79662878", # quote volume
"V": "2.34879839", # volume of active buy
"Q": "0.24142166", # quote volume of active buy
"B": "13279784.01349473" # can be ignored
}
}
"""
socket_name = '{}@kline_{}'.format(symbol.lower(), interval)
return self._start_socket(socket_name, callback) | Start a websocket for symbol kline data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#klinecandlestick-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param interval: Kline interval, default KLINE_INTERVAL_1MINUTE
:type interval: str
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "kline", # event type
"E": 1499404907056, # event time
"s": "ETHBTC", # symbol
"k": {
"t": 1499404860000, # start time of this bar
"T": 1499404919999, # end time of this bar
"s": "ETHBTC", # symbol
"i": "1m", # interval
"f": 77462, # first trade id
"L": 77465, # last trade id
"o": "0.10278577", # open
"c": "0.10278645", # close
"h": "0.10278712", # high
"l": "0.10278518", # low
"v": "17.47929838", # volume
"n": 4, # number of trades
"x": false, # whether this bar is final
"q": "1.79662878", # quote volume
"V": "2.34879839", # volume of active buy
"Q": "0.24142166", # quote volume of active buy
"B": "13279784.01349473" # can be ignored
}
} | Below is the the instruction that describes the task:
### Input:
Start a websocket for symbol kline data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#klinecandlestick-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param interval: Kline interval, default KLINE_INTERVAL_1MINUTE
:type interval: str
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "kline", # event type
"E": 1499404907056, # event time
"s": "ETHBTC", # symbol
"k": {
"t": 1499404860000, # start time of this bar
"T": 1499404919999, # end time of this bar
"s": "ETHBTC", # symbol
"i": "1m", # interval
"f": 77462, # first trade id
"L": 77465, # last trade id
"o": "0.10278577", # open
"c": "0.10278645", # close
"h": "0.10278712", # high
"l": "0.10278518", # low
"v": "17.47929838", # volume
"n": 4, # number of trades
"x": false, # whether this bar is final
"q": "1.79662878", # quote volume
"V": "2.34879839", # volume of active buy
"Q": "0.24142166", # quote volume of active buy
"B": "13279784.01349473" # can be ignored
}
}
### Response:
def start_kline_socket(self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE):
"""Start a websocket for symbol kline data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#klinecandlestick-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param interval: Kline interval, default KLINE_INTERVAL_1MINUTE
:type interval: str
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "kline", # event type
"E": 1499404907056, # event time
"s": "ETHBTC", # symbol
"k": {
"t": 1499404860000, # start time of this bar
"T": 1499404919999, # end time of this bar
"s": "ETHBTC", # symbol
"i": "1m", # interval
"f": 77462, # first trade id
"L": 77465, # last trade id
"o": "0.10278577", # open
"c": "0.10278645", # close
"h": "0.10278712", # high
"l": "0.10278518", # low
"v": "17.47929838", # volume
"n": 4, # number of trades
"x": false, # whether this bar is final
"q": "1.79662878", # quote volume
"V": "2.34879839", # volume of active buy
"Q": "0.24142166", # quote volume of active buy
"B": "13279784.01349473" # can be ignored
}
}
"""
socket_name = '{}@kline_{}'.format(symbol.lower(), interval)
return self._start_socket(socket_name, callback) |
def _set_vcs_cluster_type_info(self, v, load=False):
"""
Setter method for vcs_cluster_type_info, mapped from YANG variable /brocade_vcs_rpc/show_vcs/output/vcs_cluster_type_info (vcs-cluster-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_vcs_cluster_type_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vcs_cluster_type_info() directly.
YANG Description: Vcs Type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'vcs-fabric-cluster': {'value': 3}, u'vcs-unknown-cluster': {'value': 1}, u'vcs-stand-alone': {'value': 2}, u'vcs-management-cluster': {'value': 4}},), is_leaf=True, yang_name="vcs-cluster-type-info", rest_name="vcs-cluster-type-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='vcs-cluster-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vcs_cluster_type_info must be of a type compatible with vcs-cluster-type""",
'defined-type': "brocade-vcs:vcs-cluster-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'vcs-fabric-cluster': {'value': 3}, u'vcs-unknown-cluster': {'value': 1}, u'vcs-stand-alone': {'value': 2}, u'vcs-management-cluster': {'value': 4}},), is_leaf=True, yang_name="vcs-cluster-type-info", rest_name="vcs-cluster-type-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='vcs-cluster-type', is_config=True)""",
})
self.__vcs_cluster_type_info = t
if hasattr(self, '_set'):
self._set() | Setter method for vcs_cluster_type_info, mapped from YANG variable /brocade_vcs_rpc/show_vcs/output/vcs_cluster_type_info (vcs-cluster-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_vcs_cluster_type_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vcs_cluster_type_info() directly.
YANG Description: Vcs Type | Below is the the instruction that describes the task:
### Input:
Setter method for vcs_cluster_type_info, mapped from YANG variable /brocade_vcs_rpc/show_vcs/output/vcs_cluster_type_info (vcs-cluster-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_vcs_cluster_type_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vcs_cluster_type_info() directly.
YANG Description: Vcs Type
### Response:
def _set_vcs_cluster_type_info(self, v, load=False):
"""
Setter method for vcs_cluster_type_info, mapped from YANG variable /brocade_vcs_rpc/show_vcs/output/vcs_cluster_type_info (vcs-cluster-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_vcs_cluster_type_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vcs_cluster_type_info() directly.
YANG Description: Vcs Type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'vcs-fabric-cluster': {'value': 3}, u'vcs-unknown-cluster': {'value': 1}, u'vcs-stand-alone': {'value': 2}, u'vcs-management-cluster': {'value': 4}},), is_leaf=True, yang_name="vcs-cluster-type-info", rest_name="vcs-cluster-type-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='vcs-cluster-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vcs_cluster_type_info must be of a type compatible with vcs-cluster-type""",
'defined-type': "brocade-vcs:vcs-cluster-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'vcs-fabric-cluster': {'value': 3}, u'vcs-unknown-cluster': {'value': 1}, u'vcs-stand-alone': {'value': 2}, u'vcs-management-cluster': {'value': 4}},), is_leaf=True, yang_name="vcs-cluster-type-info", rest_name="vcs-cluster-type-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='vcs-cluster-type', is_config=True)""",
})
self.__vcs_cluster_type_info = t
if hasattr(self, '_set'):
self._set() |
def check(self, check_all=True, do_reload=True):
"""Check whether some modules need to be reloaded."""
with enaml.imports():
super(EnamlReloader, self).check(check_all=check_all, do_reload=do_reload) | Check whether some modules need to be reloaded. | Below is the the instruction that describes the task:
### Input:
Check whether some modules need to be reloaded.
### Response:
def check(self, check_all=True, do_reload=True):
"""Check whether some modules need to be reloaded."""
with enaml.imports():
super(EnamlReloader, self).check(check_all=check_all, do_reload=do_reload) |
def convert_choices_to_dict(choices):
""" Takes a list of tuples and converts it to a list of dictionaries of
where each dictionary has a value and text key. This is the expected format
of question choices to be returned by self.ask()
:param convert_choices_to_dict:
:type convert_choices_to_dict: list
:return:
:rtype:
"""
formatted_choices = []
for x in choices:
formatted_choices.append({
'value': x[0],
'text': x[1],
})
return formatted_choices | Takes a list of tuples and converts it to a list of dictionaries of
where each dictionary has a value and text key. This is the expected format
of question choices to be returned by self.ask()
:param convert_choices_to_dict:
:type convert_choices_to_dict: list
:return:
:rtype: | Below is the the instruction that describes the task:
### Input:
Takes a list of tuples and converts it to a list of dictionaries of
where each dictionary has a value and text key. This is the expected format
of question choices to be returned by self.ask()
:param convert_choices_to_dict:
:type convert_choices_to_dict: list
:return:
:rtype:
### Response:
def convert_choices_to_dict(choices):
""" Takes a list of tuples and converts it to a list of dictionaries of
where each dictionary has a value and text key. This is the expected format
of question choices to be returned by self.ask()
:param convert_choices_to_dict:
:type convert_choices_to_dict: list
:return:
:rtype:
"""
formatted_choices = []
for x in choices:
formatted_choices.append({
'value': x[0],
'text': x[1],
})
return formatted_choices |
def partial_fit(self, X):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict={self.x: X})
return cost | Train model based on mini-batch of input data.
Return cost of mini-batch. | Below is the the instruction that describes the task:
### Input:
Train model based on mini-batch of input data.
Return cost of mini-batch.
### Response:
def partial_fit(self, X):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict={self.x: X})
return cost |
def update_observation(observation_id: int, params: Dict[str, Any], access_token: str) -> List[Dict[str, Any]]:
"""
Update a single observation. See https://www.inaturalist.org/pages/api+reference#put-observations-id
:param observation_id: the ID of the observation to update
:param params: to be passed to iNaturalist API
:param access_token: the access token, as returned by :func:`get_access_token()`
:return: iNaturalist's JSON response, as a Python object
:raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 410 if the observation
doesn't exists or belongs to another user (as of November 2018).
"""
response = requests.put(url="{base_url}/observations/{id}.json".format(base_url=INAT_BASE_URL, id=observation_id),
json=params,
headers=_build_auth_header(access_token))
response.raise_for_status()
return response.json() | Update a single observation. See https://www.inaturalist.org/pages/api+reference#put-observations-id
:param observation_id: the ID of the observation to update
:param params: to be passed to iNaturalist API
:param access_token: the access token, as returned by :func:`get_access_token()`
:return: iNaturalist's JSON response, as a Python object
:raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 410 if the observation
doesn't exists or belongs to another user (as of November 2018). | Below is the the instruction that describes the task:
### Input:
Update a single observation. See https://www.inaturalist.org/pages/api+reference#put-observations-id
:param observation_id: the ID of the observation to update
:param params: to be passed to iNaturalist API
:param access_token: the access token, as returned by :func:`get_access_token()`
:return: iNaturalist's JSON response, as a Python object
:raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 410 if the observation
doesn't exists or belongs to another user (as of November 2018).
### Response:
def update_observation(observation_id: int, params: Dict[str, Any], access_token: str) -> List[Dict[str, Any]]:
"""
Update a single observation. See https://www.inaturalist.org/pages/api+reference#put-observations-id
:param observation_id: the ID of the observation to update
:param params: to be passed to iNaturalist API
:param access_token: the access token, as returned by :func:`get_access_token()`
:return: iNaturalist's JSON response, as a Python object
:raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 410 if the observation
doesn't exists or belongs to another user (as of November 2018).
"""
response = requests.put(url="{base_url}/observations/{id}.json".format(base_url=INAT_BASE_URL, id=observation_id),
json=params,
headers=_build_auth_header(access_token))
response.raise_for_status()
return response.json() |
def resample(grid, wl, flux):
""" Resample spectrum onto desired grid """
flux_rs = (interpolate.interp1d(wl, flux))(grid)
return flux_rs | Resample spectrum onto desired grid | Below is the the instruction that describes the task:
### Input:
Resample spectrum onto desired grid
### Response:
def resample(grid, wl, flux):
""" Resample spectrum onto desired grid """
flux_rs = (interpolate.interp1d(wl, flux))(grid)
return flux_rs |
def data_nodes(self):
"""
Returns all data nodes of the dispatcher.
:return:
All data nodes of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if v['type'] == 'data'} | Returns all data nodes of the dispatcher.
:return:
All data nodes of the dispatcher.
:rtype: dict[str, dict] | Below is the the instruction that describes the task:
### Input:
Returns all data nodes of the dispatcher.
:return:
All data nodes of the dispatcher.
:rtype: dict[str, dict]
### Response:
def data_nodes(self):
"""
Returns all data nodes of the dispatcher.
:return:
All data nodes of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if v['type'] == 'data'} |
def unpack_results(
data: bytes,
repetitions: int,
key_sizes: Sequence[Tuple[str, int]]
) -> Dict[str, np.ndarray]:
"""Unpack data from a bitstring into individual measurement results.
Args:
data: Packed measurement results, in the form <rep0><rep1>...
where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...
with bits packed in little-endian order in each byte.
repetitions: number of repetitions.
key_sizes: Keys and sizes of the measurements in the data.
Returns:
Dict mapping measurement key to a 2D array of boolean results. Each
array has shape (repetitions, size) with size for that measurement.
"""
bits_per_rep = sum(size for _, size in key_sizes)
total_bits = repetitions * bits_per_rep
byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))
bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool)
bits = bits[:total_bits].reshape((repetitions, bits_per_rep))
results = {}
ofs = 0
for key, size in key_sizes:
results[key] = bits[:, ofs:ofs + size]
ofs += size
return results | Unpack data from a bitstring into individual measurement results.
Args:
data: Packed measurement results, in the form <rep0><rep1>...
where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...
with bits packed in little-endian order in each byte.
repetitions: number of repetitions.
key_sizes: Keys and sizes of the measurements in the data.
Returns:
Dict mapping measurement key to a 2D array of boolean results. Each
array has shape (repetitions, size) with size for that measurement. | Below is the the instruction that describes the task:
### Input:
Unpack data from a bitstring into individual measurement results.
Args:
data: Packed measurement results, in the form <rep0><rep1>...
where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...
with bits packed in little-endian order in each byte.
repetitions: number of repetitions.
key_sizes: Keys and sizes of the measurements in the data.
Returns:
Dict mapping measurement key to a 2D array of boolean results. Each
array has shape (repetitions, size) with size for that measurement.
### Response:
def unpack_results(
data: bytes,
repetitions: int,
key_sizes: Sequence[Tuple[str, int]]
) -> Dict[str, np.ndarray]:
"""Unpack data from a bitstring into individual measurement results.
Args:
data: Packed measurement results, in the form <rep0><rep1>...
where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...
with bits packed in little-endian order in each byte.
repetitions: number of repetitions.
key_sizes: Keys and sizes of the measurements in the data.
Returns:
Dict mapping measurement key to a 2D array of boolean results. Each
array has shape (repetitions, size) with size for that measurement.
"""
bits_per_rep = sum(size for _, size in key_sizes)
total_bits = repetitions * bits_per_rep
byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))
bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool)
bits = bits[:total_bits].reshape((repetitions, bits_per_rep))
results = {}
ofs = 0
for key, size in key_sizes:
results[key] = bits[:, ofs:ofs + size]
ofs += size
return results |
def find_faces(self, image, draw_box=False):
"""Uses a haarcascade to detect faces inside an image.
Args:
image: The image.
draw_box: If True, the image will be marked with a rectangle.
Return:
The faces as returned by OpenCV's detectMultiScale method for
cascades.
"""
frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
faces = self.cascade.detectMultiScale(
frame_gray,
scaleFactor=1.3,
minNeighbors=5,
minSize=(50, 50),
flags=0)
if draw_box:
for x, y, w, h in faces:
cv2.rectangle(image, (x, y),
(x + w, y + h), (0, 255, 0), 2)
return faces | Uses a haarcascade to detect faces inside an image.
Args:
image: The image.
draw_box: If True, the image will be marked with a rectangle.
Return:
The faces as returned by OpenCV's detectMultiScale method for
cascades. | Below is the the instruction that describes the task:
### Input:
Uses a haarcascade to detect faces inside an image.
Args:
image: The image.
draw_box: If True, the image will be marked with a rectangle.
Return:
The faces as returned by OpenCV's detectMultiScale method for
cascades.
### Response:
def find_faces(self, image, draw_box=False):
"""Uses a haarcascade to detect faces inside an image.
Args:
image: The image.
draw_box: If True, the image will be marked with a rectangle.
Return:
The faces as returned by OpenCV's detectMultiScale method for
cascades.
"""
frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
faces = self.cascade.detectMultiScale(
frame_gray,
scaleFactor=1.3,
minNeighbors=5,
minSize=(50, 50),
flags=0)
if draw_box:
for x, y, w, h in faces:
cv2.rectangle(image, (x, y),
(x + w, y + h), (0, 255, 0), 2)
return faces |
def init_default_config(self, path):
''' Initialize the config object and load the default configuration.
The path to the config file must be provided. The name of the
application is read from the config file.
The config file stores the description and the default values for
all configurations including the application name.
@param path: The path to the config config file.
'''
if not (os.path.exists(path) and os.path.isfile(path)):
raise AppConfigValueException('The given config config file does '
'not exist. ({0})'.format(path))
cfl = open(path, 'r')
data = json.load(cfl)
cfl.close()
for key in data.keys():
if 'application_name' == key:
self.application_name = data[key].lower()
continue
if 'application_author' == key:
self.application_author = data[key].lower()
continue
if 'application_version' == key:
self.application_version = data[key].lower()
continue
self._add_section_default(key, data[key]) | Initialize the config object and load the default configuration.
The path to the config file must be provided. The name of the
application is read from the config file.
The config file stores the description and the default values for
all configurations including the application name.
@param path: The path to the config config file. | Below is the the instruction that describes the task:
### Input:
Initialize the config object and load the default configuration.
The path to the config file must be provided. The name of the
application is read from the config file.
The config file stores the description and the default values for
all configurations including the application name.
@param path: The path to the config config file.
### Response:
def init_default_config(self, path):
''' Initialize the config object and load the default configuration.
The path to the config file must be provided. The name of the
application is read from the config file.
The config file stores the description and the default values for
all configurations including the application name.
@param path: The path to the config config file.
'''
if not (os.path.exists(path) and os.path.isfile(path)):
raise AppConfigValueException('The given config config file does '
'not exist. ({0})'.format(path))
cfl = open(path, 'r')
data = json.load(cfl)
cfl.close()
for key in data.keys():
if 'application_name' == key:
self.application_name = data[key].lower()
continue
if 'application_author' == key:
self.application_author = data[key].lower()
continue
if 'application_version' == key:
self.application_version = data[key].lower()
continue
self._add_section_default(key, data[key]) |
def set_style(self):
"""
Set font style with the following attributes:
'foreground_color', 'background_color', 'italic',
'bold' and 'underline'
"""
if self.current_format is None:
assert self.base_format is not None
self.current_format = QTextCharFormat(self.base_format)
# Foreground color
if self.foreground_color is None:
qcolor = self.base_format.foreground()
else:
cstr = self.ANSI_COLORS[self.foreground_color-30][self.intensity]
qcolor = QColor(cstr)
self.current_format.setForeground(qcolor)
# Background color
if self.background_color is None:
qcolor = self.base_format.background()
else:
cstr = self.ANSI_COLORS[self.background_color-40][self.intensity]
qcolor = QColor(cstr)
self.current_format.setBackground(qcolor)
font = self.current_format.font()
# Italic
if self.italic is None:
italic = self.base_format.fontItalic()
else:
italic = self.italic
font.setItalic(italic)
# Bold
if self.bold is None:
bold = self.base_format.font().bold()
else:
bold = self.bold
font.setBold(bold)
# Underline
if self.underline is None:
underline = self.base_format.font().underline()
else:
underline = self.underline
font.setUnderline(underline)
self.current_format.setFont(font) | Set font style with the following attributes:
'foreground_color', 'background_color', 'italic',
'bold' and 'underline' | Below is the the instruction that describes the task:
### Input:
Set font style with the following attributes:
'foreground_color', 'background_color', 'italic',
'bold' and 'underline'
### Response:
def set_style(self):
"""
Set font style with the following attributes:
'foreground_color', 'background_color', 'italic',
'bold' and 'underline'
"""
if self.current_format is None:
assert self.base_format is not None
self.current_format = QTextCharFormat(self.base_format)
# Foreground color
if self.foreground_color is None:
qcolor = self.base_format.foreground()
else:
cstr = self.ANSI_COLORS[self.foreground_color-30][self.intensity]
qcolor = QColor(cstr)
self.current_format.setForeground(qcolor)
# Background color
if self.background_color is None:
qcolor = self.base_format.background()
else:
cstr = self.ANSI_COLORS[self.background_color-40][self.intensity]
qcolor = QColor(cstr)
self.current_format.setBackground(qcolor)
font = self.current_format.font()
# Italic
if self.italic is None:
italic = self.base_format.fontItalic()
else:
italic = self.italic
font.setItalic(italic)
# Bold
if self.bold is None:
bold = self.base_format.font().bold()
else:
bold = self.bold
font.setBold(bold)
# Underline
if self.underline is None:
underline = self.base_format.font().underline()
else:
underline = self.underline
font.setUnderline(underline)
self.current_format.setFont(font) |
def normalize(body_part_tup,):
"""Normalize a tuple of BodyPart objects to a string.
Normalization is done by sorting the body_parts by the Content- Disposition headers,
which is typically on the form, ``form-data; name="name_of_part``.
"""
return '\n\n'.join(
[
'{}\n\n{}'.format(
str(p.headers[b'Content-Disposition'], p.encoding), p.text
)
for p in sorted(
body_part_tup, key=lambda p: p.headers[b'Content-Disposition']
)
]
) | Normalize a tuple of BodyPart objects to a string.
Normalization is done by sorting the body_parts by the Content- Disposition headers,
which is typically on the form, ``form-data; name="name_of_part``. | Below is the the instruction that describes the task:
### Input:
Normalize a tuple of BodyPart objects to a string.
Normalization is done by sorting the body_parts by the Content- Disposition headers,
which is typically on the form, ``form-data; name="name_of_part``.
### Response:
def normalize(body_part_tup,):
"""Normalize a tuple of BodyPart objects to a string.
Normalization is done by sorting the body_parts by the Content- Disposition headers,
which is typically on the form, ``form-data; name="name_of_part``.
"""
return '\n\n'.join(
[
'{}\n\n{}'.format(
str(p.headers[b'Content-Disposition'], p.encoding), p.text
)
for p in sorted(
body_part_tup, key=lambda p: p.headers[b'Content-Disposition']
)
]
) |
def run_subprocess(command, cwd=None, stdout=None, stderr=None, shell=False, beat_freq=None):
"""
Parameters
----------
command: command
cwd: current working directory
stdout: output info stream (must have 'write' method)
stderr: output error stream (must have 'write' method)
shell: see subprocess.Popen
beat_freq: if not none, stdout will be used at least every beat_freq (in seconds)
"""
sys.encoding = CONF.encoding
# prepare variables
stdout = sys.stdout if stdout is None else stdout
stderr = sys.stderr if stderr is None else stderr
# run subprocess
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
shell=shell,
universal_newlines=True
) as sub_p:
# link output streams
with redirect_stream(sub_p.stdout, stdout), redirect_stream(sub_p.stderr, stderr):
while True:
try:
sub_p.wait(timeout=beat_freq)
break
except subprocess.TimeoutExpired:
stdout.write("subprocess is still running\n")
if hasattr(sys.stdout, "flush"):
sys.stdout.flush()
return sub_p.returncode | Parameters
----------
command: command
cwd: current working directory
stdout: output info stream (must have 'write' method)
stderr: output error stream (must have 'write' method)
shell: see subprocess.Popen
beat_freq: if not none, stdout will be used at least every beat_freq (in seconds) | Below is the the instruction that describes the task:
### Input:
Parameters
----------
command: command
cwd: current working directory
stdout: output info stream (must have 'write' method)
stderr: output error stream (must have 'write' method)
shell: see subprocess.Popen
beat_freq: if not none, stdout will be used at least every beat_freq (in seconds)
### Response:
def run_subprocess(command, cwd=None, stdout=None, stderr=None, shell=False, beat_freq=None):
"""
Parameters
----------
command: command
cwd: current working directory
stdout: output info stream (must have 'write' method)
stderr: output error stream (must have 'write' method)
shell: see subprocess.Popen
beat_freq: if not none, stdout will be used at least every beat_freq (in seconds)
"""
sys.encoding = CONF.encoding
# prepare variables
stdout = sys.stdout if stdout is None else stdout
stderr = sys.stderr if stderr is None else stderr
# run subprocess
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
shell=shell,
universal_newlines=True
) as sub_p:
# link output streams
with redirect_stream(sub_p.stdout, stdout), redirect_stream(sub_p.stderr, stderr):
while True:
try:
sub_p.wait(timeout=beat_freq)
break
except subprocess.TimeoutExpired:
stdout.write("subprocess is still running\n")
if hasattr(sys.stdout, "flush"):
sys.stdout.flush()
return sub_p.returncode |
def _shares_exec_prefix(basedir):
''' Whether a give base directory is on the system exex prefix
'''
import sys
prefix = sys.exec_prefix
return (prefix is not None and basedir.startswith(prefix)) | Whether a give base directory is on the system exex prefix | Below is the the instruction that describes the task:
### Input:
Whether a give base directory is on the system exex prefix
### Response:
def _shares_exec_prefix(basedir):
''' Whether a give base directory is on the system exex prefix
'''
import sys
prefix = sys.exec_prefix
return (prefix is not None and basedir.startswith(prefix)) |
def create(self, email, device_name, passphrase=None,
api_token=None, redirect_uri=None, **kwargs):
"""Create a new User object and add it to this Users collection.
In addition to creating a user, this call will create a device for that
user, whose device_token will be returned from this call. Store the
device_token, as it's required to complete Gem-Device authentication
after the user approves the device at the end of their signup flow.
If you lose the device_token returned from users.create, you'll have to
create a new device for the user to gain access to their account again.
Also, after this call, be sure to redirect the user to the location in
`mfa_uri` (second return value of this function) to complete their
account.
If you get a 409 Conflict error, then the user already exists in the Gem
system and you'll want to do a
`client.user(email).devices.create(device_name)`
Args:
email (str)
device_name (str): Human-readable name for the device through which
your Application will be authorized to access the new User's account.
passphrase (str, optional): A passphrase with which to encrypt a user
wallet. If not provided, a default_wallet parameter must be passed in
kwargs.
api_token (str, optional): Your app's API token. This is optional if
and only if the Client which will be calling this function already
has Gem-Application or Gem-Identify authentication.
redirect_uri (str, optional): A URI to which to redirect the User after
they confirm their Gem account.
**kwargs
Returns: device_token
"""
if not passphrase and u'default_wallet' not in kwargs:
raise ValueError("Usage: users.create(email, passphrase, device_name"
", api_token, redirect_uri)")
elif passphrase:
default_wallet = generate(passphrase, ['primary'])['primary']
else:
default_wallet = kwargs['default_wallet']
default_wallet['name'] = 'default'
default_wallet['primary_private_seed'] = default_wallet['encrypted_seed']
default_wallet['primary_public_seed'] = default_wallet['public_seed']
del default_wallet['encrypted_seed']
del default_wallet['public_seed']
del default_wallet['private_seed']
# If not supplied, we assume the client already has an api_token param.
if api_token:
self.client.authenticate_identify(api_token)
user_data = dict(email=email,
default_wallet=default_wallet,
device_name=device_name)
if redirect_uri:
user_data['redirect_uri'] = redirect_uri
if 'first_name' in kwargs:
user_data['first_name'] = kwargs['first_name']
if 'last_name' in kwargs:
user_data['last_name'] = kwargs['last_name']
try:
resource = self.resource.create(user_data)
except ResponseError as e:
if "conflict" in e.message:
raise ConflictError(
"This user already exists. Use "
"client.user(email).devices.create(name) to request "
"authorization from the user.")
raise e
return resource.attributes['metadata']['device_token'] | Create a new User object and add it to this Users collection.
In addition to creating a user, this call will create a device for that
user, whose device_token will be returned from this call. Store the
device_token, as it's required to complete Gem-Device authentication
after the user approves the device at the end of their signup flow.
If you lose the device_token returned from users.create, you'll have to
create a new device for the user to gain access to their account again.
Also, after this call, be sure to redirect the user to the location in
`mfa_uri` (second return value of this function) to complete their
account.
If you get a 409 Conflict error, then the user already exists in the Gem
system and you'll want to do a
`client.user(email).devices.create(device_name)`
Args:
email (str)
device_name (str): Human-readable name for the device through which
your Application will be authorized to access the new User's account.
passphrase (str, optional): A passphrase with which to encrypt a user
wallet. If not provided, a default_wallet parameter must be passed in
kwargs.
api_token (str, optional): Your app's API token. This is optional if
and only if the Client which will be calling this function already
has Gem-Application or Gem-Identify authentication.
redirect_uri (str, optional): A URI to which to redirect the User after
they confirm their Gem account.
**kwargs
Returns: device_token | Below is the the instruction that describes the task:
### Input:
Create a new User object and add it to this Users collection.
In addition to creating a user, this call will create a device for that
user, whose device_token will be returned from this call. Store the
device_token, as it's required to complete Gem-Device authentication
after the user approves the device at the end of their signup flow.
If you lose the device_token returned from users.create, you'll have to
create a new device for the user to gain access to their account again.
Also, after this call, be sure to redirect the user to the location in
`mfa_uri` (second return value of this function) to complete their
account.
If you get a 409 Conflict error, then the user already exists in the Gem
system and you'll want to do a
`client.user(email).devices.create(device_name)`
Args:
email (str)
device_name (str): Human-readable name for the device through which
your Application will be authorized to access the new User's account.
passphrase (str, optional): A passphrase with which to encrypt a user
wallet. If not provided, a default_wallet parameter must be passed in
kwargs.
api_token (str, optional): Your app's API token. This is optional if
and only if the Client which will be calling this function already
has Gem-Application or Gem-Identify authentication.
redirect_uri (str, optional): A URI to which to redirect the User after
they confirm their Gem account.
**kwargs
Returns: device_token
### Response:
def create(self, email, device_name, passphrase=None,
api_token=None, redirect_uri=None, **kwargs):
"""Create a new User object and add it to this Users collection.
In addition to creating a user, this call will create a device for that
user, whose device_token will be returned from this call. Store the
device_token, as it's required to complete Gem-Device authentication
after the user approves the device at the end of their signup flow.
If you lose the device_token returned from users.create, you'll have to
create a new device for the user to gain access to their account again.
Also, after this call, be sure to redirect the user to the location in
`mfa_uri` (second return value of this function) to complete their
account.
If you get a 409 Conflict error, then the user already exists in the Gem
system and you'll want to do a
`client.user(email).devices.create(device_name)`
Args:
email (str)
device_name (str): Human-readable name for the device through which
your Application will be authorized to access the new User's account.
passphrase (str, optional): A passphrase with which to encrypt a user
wallet. If not provided, a default_wallet parameter must be passed in
kwargs.
api_token (str, optional): Your app's API token. This is optional if
and only if the Client which will be calling this function already
has Gem-Application or Gem-Identify authentication.
redirect_uri (str, optional): A URI to which to redirect the User after
they confirm their Gem account.
**kwargs
Returns: device_token
"""
if not passphrase and u'default_wallet' not in kwargs:
raise ValueError("Usage: users.create(email, passphrase, device_name"
", api_token, redirect_uri)")
elif passphrase:
default_wallet = generate(passphrase, ['primary'])['primary']
else:
default_wallet = kwargs['default_wallet']
default_wallet['name'] = 'default'
default_wallet['primary_private_seed'] = default_wallet['encrypted_seed']
default_wallet['primary_public_seed'] = default_wallet['public_seed']
del default_wallet['encrypted_seed']
del default_wallet['public_seed']
del default_wallet['private_seed']
# If not supplied, we assume the client already has an api_token param.
if api_token:
self.client.authenticate_identify(api_token)
user_data = dict(email=email,
default_wallet=default_wallet,
device_name=device_name)
if redirect_uri:
user_data['redirect_uri'] = redirect_uri
if 'first_name' in kwargs:
user_data['first_name'] = kwargs['first_name']
if 'last_name' in kwargs:
user_data['last_name'] = kwargs['last_name']
try:
resource = self.resource.create(user_data)
except ResponseError as e:
if "conflict" in e.message:
raise ConflictError(
"This user already exists. Use "
"client.user(email).devices.create(name) to request "
"authorization from the user.")
raise e
return resource.attributes['metadata']['device_token'] |
def spia_matrices_to_excel(spia_matrices: Mapping[str, pd.DataFrame], path: str) -> None:
"""Export a SPIA data dictionary into an Excel sheet at the given path.
.. note::
# The R import should add the values:
# ["nodes"] from the columns
# ["title"] from the name of the file
# ["NumberOfReactions"] set to "0"
"""
writer = pd.ExcelWriter(path, engine='xlsxwriter')
for relation, df in spia_matrices.items():
df.to_excel(writer, sheet_name=relation, index=False)
# Save excel
writer.save() | Export a SPIA data dictionary into an Excel sheet at the given path.
.. note::
# The R import should add the values:
# ["nodes"] from the columns
# ["title"] from the name of the file
# ["NumberOfReactions"] set to "0" | Below is the the instruction that describes the task:
### Input:
Export a SPIA data dictionary into an Excel sheet at the given path.
.. note::
# The R import should add the values:
# ["nodes"] from the columns
# ["title"] from the name of the file
# ["NumberOfReactions"] set to "0"
### Response:
def spia_matrices_to_excel(spia_matrices: Mapping[str, pd.DataFrame], path: str) -> None:
"""Export a SPIA data dictionary into an Excel sheet at the given path.
.. note::
# The R import should add the values:
# ["nodes"] from the columns
# ["title"] from the name of the file
# ["NumberOfReactions"] set to "0"
"""
writer = pd.ExcelWriter(path, engine='xlsxwriter')
for relation, df in spia_matrices.items():
df.to_excel(writer, sheet_name=relation, index=False)
# Save excel
writer.save() |
def _run_env(self):
"""
Augment the current environment providing the PYTHONUSERBASE.
"""
env = dict(os.environ)
env.update(
getattr(self, 'env', {}),
PYTHONUSERBASE=self.env_path,
PIP_USER="1",
)
self._disable_venv(env)
return env | Augment the current environment providing the PYTHONUSERBASE. | Below is the the instruction that describes the task:
### Input:
Augment the current environment providing the PYTHONUSERBASE.
### Response:
def _run_env(self):
"""
Augment the current environment providing the PYTHONUSERBASE.
"""
env = dict(os.environ)
env.update(
getattr(self, 'env', {}),
PYTHONUSERBASE=self.env_path,
PIP_USER="1",
)
self._disable_venv(env)
return env |
def p_casecontent_statement(self, p):
'casecontent_statement : casecontent_condition COLON basic_statement'
p[0] = Case(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | casecontent_statement : casecontent_condition COLON basic_statement | Below is the the instruction that describes the task:
### Input:
casecontent_statement : casecontent_condition COLON basic_statement
### Response:
def p_casecontent_statement(self, p):
'casecontent_statement : casecontent_condition COLON basic_statement'
p[0] = Case(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def write(self, output):
"""
Writes specified text to the underlying stream
Parameters
----------
output bytes-like object
Bytes to write
"""
self._stream.write(output)
if self._auto_flush:
self._stream.flush() | Writes specified text to the underlying stream
Parameters
----------
output bytes-like object
Bytes to write | Below is the the instruction that describes the task:
### Input:
Writes specified text to the underlying stream
Parameters
----------
output bytes-like object
Bytes to write
### Response:
def write(self, output):
"""
Writes specified text to the underlying stream
Parameters
----------
output bytes-like object
Bytes to write
"""
self._stream.write(output)
if self._auto_flush:
self._stream.flush() |
def L(self,*args,**kwargs):
"""
NAME:
L
PURPOSE:
calculate the angular momentum
INPUT:
(none)
OUTPUT:
angular momentum
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
#Make sure you are not using physical coordinates
old_physical= kwargs.get('use_physical',None)
kwargs['use_physical']= False
Omega= kwargs.pop('Omega',None)
t= kwargs.pop('t',None)
thiso= self(*args,**kwargs)
if not len(thiso.shape) == 2: thiso= thiso.reshape((thiso.shape[0],1))
if len(thiso[:,0]) < 3:
raise AttributeError("'linearOrbit has no angular momentum")
elif len(thiso[:,0]) == 3 or len(thiso[:,0]) == 4:
if Omega is None:
out= thiso[0,:]*thiso[2,:]
else:
out= thiso[0,:]*(thiso[2,:]-Omega*t*thiso[0,:])
elif len(thiso[:,0]) == 5:
raise AttributeError("You must track the azimuth to get the angular momentum of a 3D Orbit")
else: #len(thiso[:,0]) == 6
vx= self.vx(*args,**kwargs)
vy= self.vy(*args,**kwargs)
vz= self.vz(*args,**kwargs)
x= self.x(*args,**kwargs)
y= self.y(*args,**kwargs)
z= self.z(*args,**kwargs)
out= nu.zeros((len(x),3))
out[:,0]= y*vz-z*vy
out[:,1]= z*vx-x*vz
out[:,2]= x*vy-y*vx
if not old_physical is None:
kwargs['use_physical']= old_physical
else:
kwargs.pop('use_physical')
return out | NAME:
L
PURPOSE:
calculate the angular momentum
INPUT:
(none)
OUTPUT:
angular momentum
HISTORY:
2010-09-15 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
L
PURPOSE:
calculate the angular momentum
INPUT:
(none)
OUTPUT:
angular momentum
HISTORY:
2010-09-15 - Written - Bovy (NYU)
### Response:
def L(self,*args,**kwargs):
"""
NAME:
L
PURPOSE:
calculate the angular momentum
INPUT:
(none)
OUTPUT:
angular momentum
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
#Make sure you are not using physical coordinates
old_physical= kwargs.get('use_physical',None)
kwargs['use_physical']= False
Omega= kwargs.pop('Omega',None)
t= kwargs.pop('t',None)
thiso= self(*args,**kwargs)
if not len(thiso.shape) == 2: thiso= thiso.reshape((thiso.shape[0],1))
if len(thiso[:,0]) < 3:
raise AttributeError("'linearOrbit has no angular momentum")
elif len(thiso[:,0]) == 3 or len(thiso[:,0]) == 4:
if Omega is None:
out= thiso[0,:]*thiso[2,:]
else:
out= thiso[0,:]*(thiso[2,:]-Omega*t*thiso[0,:])
elif len(thiso[:,0]) == 5:
raise AttributeError("You must track the azimuth to get the angular momentum of a 3D Orbit")
else: #len(thiso[:,0]) == 6
vx= self.vx(*args,**kwargs)
vy= self.vy(*args,**kwargs)
vz= self.vz(*args,**kwargs)
x= self.x(*args,**kwargs)
y= self.y(*args,**kwargs)
z= self.z(*args,**kwargs)
out= nu.zeros((len(x),3))
out[:,0]= y*vz-z*vy
out[:,1]= z*vx-x*vz
out[:,2]= x*vy-y*vx
if not old_physical is None:
kwargs['use_physical']= old_physical
else:
kwargs.pop('use_physical')
return out |
def metadata_add_description(self):
""" Metadata: add description """
service_description = {}
if (self.args.json):
service_description = json.loads(self.args.json)
if (self.args.url):
if "url" in service_description:
raise Exception("json service description already contains url field")
service_description["url"] = self.args.url
if (self.args.description):
if "description" in service_description:
raise Exception("json service description already contains description field")
service_description["description"] = self.args.description
metadata = load_mpe_service_metadata(self.args.metadata_file)
# merge with old service_description if necessary
if ("service_description" in metadata):
service_description = {**metadata["service_description"], **service_description}
metadata.set_simple_field("service_description", service_description)
metadata.save_pretty(self.args.metadata_file) | Metadata: add description | Below is the the instruction that describes the task:
### Input:
Metadata: add description
### Response:
def metadata_add_description(self):
""" Metadata: add description """
service_description = {}
if (self.args.json):
service_description = json.loads(self.args.json)
if (self.args.url):
if "url" in service_description:
raise Exception("json service description already contains url field")
service_description["url"] = self.args.url
if (self.args.description):
if "description" in service_description:
raise Exception("json service description already contains description field")
service_description["description"] = self.args.description
metadata = load_mpe_service_metadata(self.args.metadata_file)
# merge with old service_description if necessary
if ("service_description" in metadata):
service_description = {**metadata["service_description"], **service_description}
metadata.set_simple_field("service_description", service_description)
metadata.save_pretty(self.args.metadata_file) |
def FromReadings(cls, uuid, readings, root_key=AuthProvider.NoKey, signer=None,
report_id=IOTileReading.InvalidReadingID, selector=0xFFFF, streamer=0, sent_timestamp=0):
"""Generate an instance of the report format from a list of readings and a uuid.
The signed list report is created using the passed readings and signed using the specified method
and AuthProvider. If no auth provider is specified, the report is signed using the default authorization
chain.
Args:
uuid (int): The uuid of the deviec that this report came from
readings (list): A list of IOTileReading objects containing the data in the report
root_key (int): The key that should be used to sign the report (must be supported
by an auth_provider)
signer (AuthProvider): An optional preconfigured AuthProvider that should be used to sign this
report. If no AuthProvider is provided, the default ChainedAuthProvider is used.
report_id (int): The id of the report. If not provided it defaults to IOTileReading.InvalidReadingID.
Note that you can specify anything you want for the report id but for actual IOTile devices
the report id will always be greater than the id of all of the readings contained in the report
since devices generate ids sequentially.
selector (int): The streamer selector of this report. This can be anything but if the report came from
a device, it would correspond with the query the device used to pick readings to go into the report.
streamer (int): The streamer id that this reading was sent from.
sent_timestamp (int): The device's uptime that sent this report.
"""
lowest_id = IOTileReading.InvalidReadingID
highest_id = IOTileReading.InvalidReadingID
report_len = 20 + 16*len(readings) + 24
len_low = report_len & 0xFF
len_high = report_len >> 8
unique_readings = [x.reading_id for x in readings if x.reading_id != IOTileReading.InvalidReadingID]
if len(unique_readings) > 0:
lowest_id = min(unique_readings)
highest_id = max(unique_readings)
header = struct.pack("<BBHLLLBBH", cls.ReportType, len_low, len_high, uuid, report_id,
sent_timestamp, root_key, streamer, selector)
header = bytearray(header)
packed_readings = bytearray()
for reading in readings:
packed_reading = struct.pack("<HHLLL", reading.stream, 0, reading.reading_id,
reading.raw_time, reading.value)
packed_readings += bytearray(packed_reading)
footer_stats = struct.pack("<LL", lowest_id, highest_id)
if signer is None:
signer = ChainedAuthProvider()
# If we are supposed to encrypt this report, do the encryption
if root_key != signer.NoKey:
enc_data = packed_readings
try:
result = signer.encrypt_report(uuid, root_key, enc_data, report_id=report_id,
sent_timestamp=sent_timestamp)
except NotFoundError:
raise ExternalError("Could not encrypt report because no AuthProvider supported "
"the requested encryption method for the requested device",
device_id=uuid, root_key=root_key)
signed_data = header + result['data'] + footer_stats
else:
signed_data = header + packed_readings + footer_stats
try:
signature = signer.sign_report(uuid, root_key, signed_data, report_id=report_id,
sent_timestamp=sent_timestamp)
except NotFoundError:
raise ExternalError("Could not sign report because no AuthProvider supported the requested "
"signature method for the requested device", device_id=uuid, root_key=root_key)
footer = struct.pack("16s", bytes(signature['signature'][:16]))
footer = bytearray(footer)
data = signed_data + footer
return SignedListReport(data) | Generate an instance of the report format from a list of readings and a uuid.
The signed list report is created using the passed readings and signed using the specified method
and AuthProvider. If no auth provider is specified, the report is signed using the default authorization
chain.
Args:
uuid (int): The uuid of the deviec that this report came from
readings (list): A list of IOTileReading objects containing the data in the report
root_key (int): The key that should be used to sign the report (must be supported
by an auth_provider)
signer (AuthProvider): An optional preconfigured AuthProvider that should be used to sign this
report. If no AuthProvider is provided, the default ChainedAuthProvider is used.
report_id (int): The id of the report. If not provided it defaults to IOTileReading.InvalidReadingID.
Note that you can specify anything you want for the report id but for actual IOTile devices
the report id will always be greater than the id of all of the readings contained in the report
since devices generate ids sequentially.
selector (int): The streamer selector of this report. This can be anything but if the report came from
a device, it would correspond with the query the device used to pick readings to go into the report.
streamer (int): The streamer id that this reading was sent from.
sent_timestamp (int): The device's uptime that sent this report. | Below is the the instruction that describes the task:
### Input:
Generate an instance of the report format from a list of readings and a uuid.
The signed list report is created using the passed readings and signed using the specified method
and AuthProvider. If no auth provider is specified, the report is signed using the default authorization
chain.
Args:
uuid (int): The uuid of the deviec that this report came from
readings (list): A list of IOTileReading objects containing the data in the report
root_key (int): The key that should be used to sign the report (must be supported
by an auth_provider)
signer (AuthProvider): An optional preconfigured AuthProvider that should be used to sign this
report. If no AuthProvider is provided, the default ChainedAuthProvider is used.
report_id (int): The id of the report. If not provided it defaults to IOTileReading.InvalidReadingID.
Note that you can specify anything you want for the report id but for actual IOTile devices
the report id will always be greater than the id of all of the readings contained in the report
since devices generate ids sequentially.
selector (int): The streamer selector of this report. This can be anything but if the report came from
a device, it would correspond with the query the device used to pick readings to go into the report.
streamer (int): The streamer id that this reading was sent from.
sent_timestamp (int): The device's uptime that sent this report.
### Response:
def FromReadings(cls, uuid, readings, root_key=AuthProvider.NoKey, signer=None,
report_id=IOTileReading.InvalidReadingID, selector=0xFFFF, streamer=0, sent_timestamp=0):
"""Generate an instance of the report format from a list of readings and a uuid.
The signed list report is created using the passed readings and signed using the specified method
and AuthProvider. If no auth provider is specified, the report is signed using the default authorization
chain.
Args:
uuid (int): The uuid of the deviec that this report came from
readings (list): A list of IOTileReading objects containing the data in the report
root_key (int): The key that should be used to sign the report (must be supported
by an auth_provider)
signer (AuthProvider): An optional preconfigured AuthProvider that should be used to sign this
report. If no AuthProvider is provided, the default ChainedAuthProvider is used.
report_id (int): The id of the report. If not provided it defaults to IOTileReading.InvalidReadingID.
Note that you can specify anything you want for the report id but for actual IOTile devices
the report id will always be greater than the id of all of the readings contained in the report
since devices generate ids sequentially.
selector (int): The streamer selector of this report. This can be anything but if the report came from
a device, it would correspond with the query the device used to pick readings to go into the report.
streamer (int): The streamer id that this reading was sent from.
sent_timestamp (int): The device's uptime that sent this report.
"""
lowest_id = IOTileReading.InvalidReadingID
highest_id = IOTileReading.InvalidReadingID
report_len = 20 + 16*len(readings) + 24
len_low = report_len & 0xFF
len_high = report_len >> 8
unique_readings = [x.reading_id for x in readings if x.reading_id != IOTileReading.InvalidReadingID]
if len(unique_readings) > 0:
lowest_id = min(unique_readings)
highest_id = max(unique_readings)
header = struct.pack("<BBHLLLBBH", cls.ReportType, len_low, len_high, uuid, report_id,
sent_timestamp, root_key, streamer, selector)
header = bytearray(header)
packed_readings = bytearray()
for reading in readings:
packed_reading = struct.pack("<HHLLL", reading.stream, 0, reading.reading_id,
reading.raw_time, reading.value)
packed_readings += bytearray(packed_reading)
footer_stats = struct.pack("<LL", lowest_id, highest_id)
if signer is None:
signer = ChainedAuthProvider()
# If we are supposed to encrypt this report, do the encryption
if root_key != signer.NoKey:
enc_data = packed_readings
try:
result = signer.encrypt_report(uuid, root_key, enc_data, report_id=report_id,
sent_timestamp=sent_timestamp)
except NotFoundError:
raise ExternalError("Could not encrypt report because no AuthProvider supported "
"the requested encryption method for the requested device",
device_id=uuid, root_key=root_key)
signed_data = header + result['data'] + footer_stats
else:
signed_data = header + packed_readings + footer_stats
try:
signature = signer.sign_report(uuid, root_key, signed_data, report_id=report_id,
sent_timestamp=sent_timestamp)
except NotFoundError:
raise ExternalError("Could not sign report because no AuthProvider supported the requested "
"signature method for the requested device", device_id=uuid, root_key=root_key)
footer = struct.pack("16s", bytes(signature['signature'][:16]))
footer = bytearray(footer)
data = signed_data + footer
return SignedListReport(data) |
def get_max_ptrm_check(ptrm_checks_included_temps, ptrm_checks_all_temps, ptrm_x, t_Arai, x_Arai):
"""
input: ptrm_checks_included_temps, ptrm_checks_all_temps, ptrm_x, t_Arai, x_Arai.
sorts through included ptrm_checks and finds the largest ptrm check diff,
the sum of the total diffs,
and the percentage of the largest check / original measurement at that temperature step
output: max_diff, sum_diffs, check_percent, sum_abs_diffs.
"""
if not ptrm_checks_included_temps:
return [], float('nan'), float('nan'), float('nan'), float('nan')
diffs = []
abs_diffs = []
x_Arai_compare = []
ptrm_compare = []
check_percents = []
ptrm_checks_all_temps = list(ptrm_checks_all_temps)
for check in ptrm_checks_included_temps: # goes through each included temperature step
ptrm_ind = ptrm_checks_all_temps.index(check) # indexes the number of the check
ptrm_check = ptrm_x[ptrm_ind] # x value at that temperature step
ptrm_compare.append(ptrm_check) #
arai_ind = t_Arai.index(check)
ptrm_orig = x_Arai[arai_ind]
x_Arai_compare.append(ptrm_orig)
diff = ptrm_orig - ptrm_check
diffs.append(diff)
abs_diffs.append(abs(diff))
if ptrm_orig == 0:
check_percents.append(0)
else:
check_percents.append((old_div(abs(diff), ptrm_orig)) * 100)
max_diff = max(abs_diffs)
check_percent = max(check_percents)
sum_diffs = abs(sum(diffs))
sum_abs_diffs = sum(abs_diffs)
return diffs, max_diff, sum_diffs, check_percent, sum_abs_diffs | input: ptrm_checks_included_temps, ptrm_checks_all_temps, ptrm_x, t_Arai, x_Arai.
sorts through included ptrm_checks and finds the largest ptrm check diff,
the sum of the total diffs,
and the percentage of the largest check / original measurement at that temperature step
output: max_diff, sum_diffs, check_percent, sum_abs_diffs. | Below is the the instruction that describes the task:
### Input:
input: ptrm_checks_included_temps, ptrm_checks_all_temps, ptrm_x, t_Arai, x_Arai.
sorts through included ptrm_checks and finds the largest ptrm check diff,
the sum of the total diffs,
and the percentage of the largest check / original measurement at that temperature step
output: max_diff, sum_diffs, check_percent, sum_abs_diffs.
### Response:
def get_max_ptrm_check(ptrm_checks_included_temps, ptrm_checks_all_temps, ptrm_x, t_Arai, x_Arai):
"""
input: ptrm_checks_included_temps, ptrm_checks_all_temps, ptrm_x, t_Arai, x_Arai.
sorts through included ptrm_checks and finds the largest ptrm check diff,
the sum of the total diffs,
and the percentage of the largest check / original measurement at that temperature step
output: max_diff, sum_diffs, check_percent, sum_abs_diffs.
"""
if not ptrm_checks_included_temps:
return [], float('nan'), float('nan'), float('nan'), float('nan')
diffs = []
abs_diffs = []
x_Arai_compare = []
ptrm_compare = []
check_percents = []
ptrm_checks_all_temps = list(ptrm_checks_all_temps)
for check in ptrm_checks_included_temps: # goes through each included temperature step
ptrm_ind = ptrm_checks_all_temps.index(check) # indexes the number of the check
ptrm_check = ptrm_x[ptrm_ind] # x value at that temperature step
ptrm_compare.append(ptrm_check) #
arai_ind = t_Arai.index(check)
ptrm_orig = x_Arai[arai_ind]
x_Arai_compare.append(ptrm_orig)
diff = ptrm_orig - ptrm_check
diffs.append(diff)
abs_diffs.append(abs(diff))
if ptrm_orig == 0:
check_percents.append(0)
else:
check_percents.append((old_div(abs(diff), ptrm_orig)) * 100)
max_diff = max(abs_diffs)
check_percent = max(check_percents)
sum_diffs = abs(sum(diffs))
sum_abs_diffs = sum(abs_diffs)
return diffs, max_diff, sum_diffs, check_percent, sum_abs_diffs |
def cmRecall(cm, average=True):
"""
Calculates recall using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
average (bool, optional): if True metric value is averaged over all classes
Returns:
MetricsLambda
"""
# Increase floating point precision
cm = cm.type(torch.float64)
recall = cm.diag() / (cm.sum(dim=1) + 1e-15)
if average:
return recall.mean()
return recall | Calculates recall using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
average (bool, optional): if True metric value is averaged over all classes
Returns:
MetricsLambda | Below is the the instruction that describes the task:
### Input:
Calculates recall using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
average (bool, optional): if True metric value is averaged over all classes
Returns:
MetricsLambda
### Response:
def cmRecall(cm, average=True):
"""
Calculates recall using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
average (bool, optional): if True metric value is averaged over all classes
Returns:
MetricsLambda
"""
# Increase floating point precision
cm = cm.type(torch.float64)
recall = cm.diag() / (cm.sum(dim=1) + 1e-15)
if average:
return recall.mean()
return recall |
def set_state(self, site, timestamp=None):
"""Write status dict to client status file.
FIXME - should have some file lock to avoid race
"""
parser = ConfigParser()
parser.read(self.status_file)
status_section = 'incremental'
if (not parser.has_section(status_section)):
parser.add_section(status_section)
if (timestamp is None):
parser.remove_option(
status_section,
self.config_site_to_name(site))
else:
parser.set(
status_section,
self.config_site_to_name(site),
str(timestamp))
with open(self.status_file, 'w') as configfile:
parser.write(configfile)
configfile.close() | Write status dict to client status file.
FIXME - should have some file lock to avoid race | Below is the the instruction that describes the task:
### Input:
Write status dict to client status file.
FIXME - should have some file lock to avoid race
### Response:
def set_state(self, site, timestamp=None):
"""Write status dict to client status file.
FIXME - should have some file lock to avoid race
"""
parser = ConfigParser()
parser.read(self.status_file)
status_section = 'incremental'
if (not parser.has_section(status_section)):
parser.add_section(status_section)
if (timestamp is None):
parser.remove_option(
status_section,
self.config_site_to_name(site))
else:
parser.set(
status_section,
self.config_site_to_name(site),
str(timestamp))
with open(self.status_file, 'w') as configfile:
parser.write(configfile)
configfile.close() |
def get_blast(pdb_id, chain_id='A'):
"""
Return BLAST search results for a given PDB ID
The key of the output dict())that outputs the full search results is
'BlastOutput_iterations'
To get a list of just the results without the metadata of the search use:
hits = full_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
Returns
-------
out : dict()
A nested dict() consisting of the BLAST search results and all associated metadata
If you just want the hits, look under four levels of keys:
results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
Examples
--------
>>> blast_results = get_blast('2F5N', chain_id='A')
>>> just_hits = blast_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
>>> print(just_hits[50]['Hit_hsps']['Hsp']['Hsp_hseq'])
PELPEVETVRRELEKRIVGQKIISIEATYPRMVL--TGFEQLKKELTGKTIQGISRRGKYLIFEIGDDFRLISHLRMEGKYRLATLDAPREKHDHL
TMKFADG-QLIYADVRKFGTWELISTDQVLPYFLKKKIGPEPTYEDFDEKLFREKLRKSTKKIKPYLLEQTLVAGLGNIYVDEVLWLAKIHPEKET
NQLIESSIHLLHDSIIEILQKAIKLGGSSIRTY-SALGSTGKMQNELQVYGKTGEKCSRCGAEIQKIKVAGRGTHFCPVCQQ
"""
raw_results = get_raw_blast(pdb_id, output_form='XML', chain_id=chain_id)
out = xmltodict.parse(raw_results, process_namespaces=True)
out = to_dict(out)
out = out['BlastOutput']
return out | Return BLAST search results for a given PDB ID
The key of the output dict())that outputs the full search results is
'BlastOutput_iterations'
To get a list of just the results without the metadata of the search use:
hits = full_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
Returns
-------
out : dict()
A nested dict() consisting of the BLAST search results and all associated metadata
If you just want the hits, look under four levels of keys:
results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
Examples
--------
>>> blast_results = get_blast('2F5N', chain_id='A')
>>> just_hits = blast_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
>>> print(just_hits[50]['Hit_hsps']['Hsp']['Hsp_hseq'])
PELPEVETVRRELEKRIVGQKIISIEATYPRMVL--TGFEQLKKELTGKTIQGISRRGKYLIFEIGDDFRLISHLRMEGKYRLATLDAPREKHDHL
TMKFADG-QLIYADVRKFGTWELISTDQVLPYFLKKKIGPEPTYEDFDEKLFREKLRKSTKKIKPYLLEQTLVAGLGNIYVDEVLWLAKIHPEKET
NQLIESSIHLLHDSIIEILQKAIKLGGSSIRTY-SALGSTGKMQNELQVYGKTGEKCSRCGAEIQKIKVAGRGTHFCPVCQQ | Below is the the instruction that describes the task:
### Input:
Return BLAST search results for a given PDB ID
The key of the output dict())that outputs the full search results is
'BlastOutput_iterations'
To get a list of just the results without the metadata of the search use:
hits = full_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
Returns
-------
out : dict()
A nested dict() consisting of the BLAST search results and all associated metadata
If you just want the hits, look under four levels of keys:
results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
Examples
--------
>>> blast_results = get_blast('2F5N', chain_id='A')
>>> just_hits = blast_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
>>> print(just_hits[50]['Hit_hsps']['Hsp']['Hsp_hseq'])
PELPEVETVRRELEKRIVGQKIISIEATYPRMVL--TGFEQLKKELTGKTIQGISRRGKYLIFEIGDDFRLISHLRMEGKYRLATLDAPREKHDHL
TMKFADG-QLIYADVRKFGTWELISTDQVLPYFLKKKIGPEPTYEDFDEKLFREKLRKSTKKIKPYLLEQTLVAGLGNIYVDEVLWLAKIHPEKET
NQLIESSIHLLHDSIIEILQKAIKLGGSSIRTY-SALGSTGKMQNELQVYGKTGEKCSRCGAEIQKIKVAGRGTHFCPVCQQ
### Response:
def get_blast(pdb_id, chain_id='A'):
"""
Return BLAST search results for a given PDB ID
The key of the output dict())that outputs the full search results is
'BlastOutput_iterations'
To get a list of just the results without the metadata of the search use:
hits = full_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
Returns
-------
out : dict()
A nested dict() consisting of the BLAST search results and all associated metadata
If you just want the hits, look under four levels of keys:
results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
Examples
--------
>>> blast_results = get_blast('2F5N', chain_id='A')
>>> just_hits = blast_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
>>> print(just_hits[50]['Hit_hsps']['Hsp']['Hsp_hseq'])
PELPEVETVRRELEKRIVGQKIISIEATYPRMVL--TGFEQLKKELTGKTIQGISRRGKYLIFEIGDDFRLISHLRMEGKYRLATLDAPREKHDHL
TMKFADG-QLIYADVRKFGTWELISTDQVLPYFLKKKIGPEPTYEDFDEKLFREKLRKSTKKIKPYLLEQTLVAGLGNIYVDEVLWLAKIHPEKET
NQLIESSIHLLHDSIIEILQKAIKLGGSSIRTY-SALGSTGKMQNELQVYGKTGEKCSRCGAEIQKIKVAGRGTHFCPVCQQ
"""
raw_results = get_raw_blast(pdb_id, output_form='XML', chain_id=chain_id)
out = xmltodict.parse(raw_results, process_namespaces=True)
out = to_dict(out)
out = out['BlastOutput']
return out |
def check_include_exclude(attributes):
"""Check __include__ and __exclude__ attributes.
:type attributes: dict
"""
include = attributes.get('__include__', tuple())
exclude = attributes.get('__exclude__', tuple())
if not isinstance(include, tuple):
raise TypeError("Attribute __include__ must be a tuple.")
if not isinstance(exclude, tuple):
raise TypeError("Attribute __exclude__ must be a tuple.")
if all((not include, not exclude)):
return None
if all((include, exclude)):
raise AttributeError("Usage of __include__ and __exclude__ "
"at the same time is prohibited.") | Check __include__ and __exclude__ attributes.
:type attributes: dict | Below is the the instruction that describes the task:
### Input:
Check __include__ and __exclude__ attributes.
:type attributes: dict
### Response:
def check_include_exclude(attributes):
"""Check __include__ and __exclude__ attributes.
:type attributes: dict
"""
include = attributes.get('__include__', tuple())
exclude = attributes.get('__exclude__', tuple())
if not isinstance(include, tuple):
raise TypeError("Attribute __include__ must be a tuple.")
if not isinstance(exclude, tuple):
raise TypeError("Attribute __exclude__ must be a tuple.")
if all((not include, not exclude)):
return None
if all((include, exclude)):
raise AttributeError("Usage of __include__ and __exclude__ "
"at the same time is prohibited.") |
def get_sampletype_info(self, obj):
"""Returns the info for a Sample Type
"""
info = self.get_base_info(obj)
# Bika Setup folder
bika_setup = api.get_bika_setup()
# bika samplepoints
bika_samplepoints = bika_setup.bika_samplepoints
bika_samplepoints_uid = api.get_uid(bika_samplepoints)
# bika analysisspecs
bika_analysisspecs = bika_setup.bika_analysisspecs
bika_analysisspecs_uid = api.get_uid(bika_analysisspecs)
# client
client = self.get_client()
client_uid = client and api.get_uid(client) or ""
# sample matrix
sample_matrix = obj.getSampleMatrix()
sample_matrix_uid = sample_matrix and sample_matrix.UID() or ""
sample_matrix_title = sample_matrix and sample_matrix.Title() or ""
# container type
container_type = obj.getContainerType()
container_type_uid = container_type and container_type.UID() or ""
container_type_title = container_type and container_type.Title() or ""
# sample points
sample_points = obj.getSamplePoints()
sample_point_uids = map(lambda sp: sp.UID(), sample_points)
sample_point_titles = map(lambda sp: sp.Title(), sample_points)
info.update({
"prefix": obj.getPrefix(),
"minimum_volume": obj.getMinimumVolume(),
"hazardous": obj.getHazardous(),
"retention_period": obj.getRetentionPeriod(),
"sample_matrix_uid": sample_matrix_uid,
"sample_matrix_title": sample_matrix_title,
"container_type_uid": container_type_uid,
"container_type_title": container_type_title,
"sample_point_uids": sample_point_uids,
"sample_point_titles": sample_point_titles,
})
# catalog queries for UI field filtering
filter_queries = {
"samplepoint": {
"getSampleTypeTitles": [obj.Title(), ''],
"getClientUID": [client_uid, bika_samplepoints_uid],
"sort_order": "descending",
},
"specification": {
"getSampleTypeTitle": obj.Title(),
"getClientUID": [client_uid, bika_analysisspecs_uid],
"sort_order": "descending",
}
}
info["filter_queries"] = filter_queries
return info | Returns the info for a Sample Type | Below is the the instruction that describes the task:
### Input:
Returns the info for a Sample Type
### Response:
def get_sampletype_info(self, obj):
"""Returns the info for a Sample Type
"""
info = self.get_base_info(obj)
# Bika Setup folder
bika_setup = api.get_bika_setup()
# bika samplepoints
bika_samplepoints = bika_setup.bika_samplepoints
bika_samplepoints_uid = api.get_uid(bika_samplepoints)
# bika analysisspecs
bika_analysisspecs = bika_setup.bika_analysisspecs
bika_analysisspecs_uid = api.get_uid(bika_analysisspecs)
# client
client = self.get_client()
client_uid = client and api.get_uid(client) or ""
# sample matrix
sample_matrix = obj.getSampleMatrix()
sample_matrix_uid = sample_matrix and sample_matrix.UID() or ""
sample_matrix_title = sample_matrix and sample_matrix.Title() or ""
# container type
container_type = obj.getContainerType()
container_type_uid = container_type and container_type.UID() or ""
container_type_title = container_type and container_type.Title() or ""
# sample points
sample_points = obj.getSamplePoints()
sample_point_uids = map(lambda sp: sp.UID(), sample_points)
sample_point_titles = map(lambda sp: sp.Title(), sample_points)
info.update({
"prefix": obj.getPrefix(),
"minimum_volume": obj.getMinimumVolume(),
"hazardous": obj.getHazardous(),
"retention_period": obj.getRetentionPeriod(),
"sample_matrix_uid": sample_matrix_uid,
"sample_matrix_title": sample_matrix_title,
"container_type_uid": container_type_uid,
"container_type_title": container_type_title,
"sample_point_uids": sample_point_uids,
"sample_point_titles": sample_point_titles,
})
# catalog queries for UI field filtering
filter_queries = {
"samplepoint": {
"getSampleTypeTitles": [obj.Title(), ''],
"getClientUID": [client_uid, bika_samplepoints_uid],
"sort_order": "descending",
},
"specification": {
"getSampleTypeTitle": obj.Title(),
"getClientUID": [client_uid, bika_analysisspecs_uid],
"sort_order": "descending",
}
}
info["filter_queries"] = filter_queries
return info |
def templates(self):
"""Iterate over the defined Templates."""
template = lib.EnvGetNextDeftemplate(self._env, ffi.NULL)
while template != ffi.NULL:
yield Template(self._env, template)
template = lib.EnvGetNextDeftemplate(self._env, template) | Iterate over the defined Templates. | Below is the the instruction that describes the task:
### Input:
Iterate over the defined Templates.
### Response:
def templates(self):
"""Iterate over the defined Templates."""
template = lib.EnvGetNextDeftemplate(self._env, ffi.NULL)
while template != ffi.NULL:
yield Template(self._env, template)
template = lib.EnvGetNextDeftemplate(self._env, template) |
def validate_field_matches_type(field, value, field_type, select_items=None, _min=None, _max=None):
"""Validate a config field against a specific type."""
if (field_type == defs.TEXT_TYPE and not isinstance(value, six.string_types)) or \
(field_type == defs.STRING_TYPE and not isinstance(value, six.string_types)) or \
(field_type == defs.BOOLEAN_TYPE and not isinstance(value, bool)) or \
(field_type == defs.INTEGER_TYPE and not isinstance(value, int)):
raise exceptions.ConfigFieldTypeMismatch(field, value, field_type)
if field_type == defs.INTEGER_TYPE:
if _min and value < _min:
raise exceptions.ConfigFieldTypeMismatch(field, value, "must be higher than {}".format(_min))
if _max and value > _max:
raise exceptions.ConfigFieldTypeMismatch(field, value, "must be lower than {}".format(_max))
if field_type == defs.SELECT_TYPE:
from honeycomb.utils.plugin_utils import get_select_items
items = get_select_items(select_items)
if value not in items:
raise exceptions.ConfigFieldTypeMismatch(field, value, "one of: {}".format(", ".join(items))) | Validate a config field against a specific type. | Below is the the instruction that describes the task:
### Input:
Validate a config field against a specific type.
### Response:
def validate_field_matches_type(field, value, field_type, select_items=None, _min=None, _max=None):
"""Validate a config field against a specific type."""
if (field_type == defs.TEXT_TYPE and not isinstance(value, six.string_types)) or \
(field_type == defs.STRING_TYPE and not isinstance(value, six.string_types)) or \
(field_type == defs.BOOLEAN_TYPE and not isinstance(value, bool)) or \
(field_type == defs.INTEGER_TYPE and not isinstance(value, int)):
raise exceptions.ConfigFieldTypeMismatch(field, value, field_type)
if field_type == defs.INTEGER_TYPE:
if _min and value < _min:
raise exceptions.ConfigFieldTypeMismatch(field, value, "must be higher than {}".format(_min))
if _max and value > _max:
raise exceptions.ConfigFieldTypeMismatch(field, value, "must be lower than {}".format(_max))
if field_type == defs.SELECT_TYPE:
from honeycomb.utils.plugin_utils import get_select_items
items = get_select_items(select_items)
if value not in items:
raise exceptions.ConfigFieldTypeMismatch(field, value, "one of: {}".format(", ".join(items))) |
def get(self, name: Text, final: C) -> C:
"""
Get the function to call which will run all middlewares.
:param name: Name of the function to be called
:param final: Function to call at the bottom of the stack (that's the
one provided by the implementer).
:return:
"""
# noinspection PyTypeChecker
return Caller(self, name, final) | Get the function to call which will run all middlewares.
:param name: Name of the function to be called
:param final: Function to call at the bottom of the stack (that's the
one provided by the implementer).
:return: | Below is the the instruction that describes the task:
### Input:
Get the function to call which will run all middlewares.
:param name: Name of the function to be called
:param final: Function to call at the bottom of the stack (that's the
one provided by the implementer).
:return:
### Response:
def get(self, name: Text, final: C) -> C:
"""
Get the function to call which will run all middlewares.
:param name: Name of the function to be called
:param final: Function to call at the bottom of the stack (that's the
one provided by the implementer).
:return:
"""
# noinspection PyTypeChecker
return Caller(self, name, final) |
def clone(self):
''' Returns a shallow copy of the current instance, except that all
variables are deep-cloned.
'''
clone = copy(self)
clone.variables = {k: v.clone() for (k, v) in self.variables.items()}
return clone | Returns a shallow copy of the current instance, except that all
variables are deep-cloned. | Below is the the instruction that describes the task:
### Input:
Returns a shallow copy of the current instance, except that all
variables are deep-cloned.
### Response:
def clone(self):
''' Returns a shallow copy of the current instance, except that all
variables are deep-cloned.
'''
clone = copy(self)
clone.variables = {k: v.clone() for (k, v) in self.variables.items()}
return clone |
def _run_link(self, stream=sys.stdout, dry_run=False,
stage_files=True, resubmit_failed=False):
"""Internal function that actually runs this link.
This checks if input and output files are present.
If input files are missing this will raise `OSError` if dry_run is False
If all output files are present this will skip execution.
Parameters
-----------
stream : `file`
Stream that this `Link` will print to,
must have 'write' function.
dry_run : bool
Print command but do not run it.
stage_files : bool
Stage files to and from the scratch area.
resubmit_failed : bool
Resubmit failed jobs.
"""
if resubmit_failed:
self.args['action'] = 'resubmit'
argv = self._make_argv()
if dry_run:
argv.append('--dry_run')
self._invoke(argv, stream, resubmit_failed=resubmit_failed) | Internal function that actually runs this link.
This checks if input and output files are present.
If input files are missing this will raise `OSError` if dry_run is False
If all output files are present this will skip execution.
Parameters
-----------
stream : `file`
Stream that this `Link` will print to,
must have 'write' function.
dry_run : bool
Print command but do not run it.
stage_files : bool
Stage files to and from the scratch area.
resubmit_failed : bool
Resubmit failed jobs. | Below is the the instruction that describes the task:
### Input:
Internal function that actually runs this link.
This checks if input and output files are present.
If input files are missing this will raise `OSError` if dry_run is False
If all output files are present this will skip execution.
Parameters
-----------
stream : `file`
Stream that this `Link` will print to,
must have 'write' function.
dry_run : bool
Print command but do not run it.
stage_files : bool
Stage files to and from the scratch area.
resubmit_failed : bool
Resubmit failed jobs.
### Response:
def _run_link(self, stream=sys.stdout, dry_run=False,
stage_files=True, resubmit_failed=False):
"""Internal function that actually runs this link.
This checks if input and output files are present.
If input files are missing this will raise `OSError` if dry_run is False
If all output files are present this will skip execution.
Parameters
-----------
stream : `file`
Stream that this `Link` will print to,
must have 'write' function.
dry_run : bool
Print command but do not run it.
stage_files : bool
Stage files to and from the scratch area.
resubmit_failed : bool
Resubmit failed jobs.
"""
if resubmit_failed:
self.args['action'] = 'resubmit'
argv = self._make_argv()
if dry_run:
argv.append('--dry_run')
self._invoke(argv, stream, resubmit_failed=resubmit_failed) |
def dasopr(fname):
"""
Open a DAS file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopr_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
:rtype: int
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.dasopr_c(fname, ctypes.byref(handle))
return handle.value | Open a DAS file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopr_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Open a DAS file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopr_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
:rtype: int
### Response:
def dasopr(fname):
"""
Open a DAS file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopr_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
:rtype: int
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.dasopr_c(fname, ctypes.byref(handle))
return handle.value |
def enable_mfa_device(self,
user_name,
serial_number,
authentication_code_1,
authentication_code_2):
"""Enable MFA Device for user."""
user = self.get_user(user_name)
if serial_number in user.mfa_devices:
raise IAMConflictException(
"EntityAlreadyExists",
"Device {0} already exists".format(serial_number)
)
user.enable_mfa_device(
serial_number,
authentication_code_1,
authentication_code_2
) | Enable MFA Device for user. | Below is the the instruction that describes the task:
### Input:
Enable MFA Device for user.
### Response:
def enable_mfa_device(self,
user_name,
serial_number,
authentication_code_1,
authentication_code_2):
"""Enable MFA Device for user."""
user = self.get_user(user_name)
if serial_number in user.mfa_devices:
raise IAMConflictException(
"EntityAlreadyExists",
"Device {0} already exists".format(serial_number)
)
user.enable_mfa_device(
serial_number,
authentication_code_1,
authentication_code_2
) |
def s3_download(url, dst): # type: (str, str) -> None
"""Download a file from S3.
Args:
url (str): the s3 url of the file.
dst (str): the destination where the file will be saved.
"""
url = parse.urlparse(url)
if url.scheme != 's3':
raise ValueError("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url))
bucket, key = url.netloc, url.path.lstrip('/')
region = os.environ.get('AWS_REGION', os.environ.get(_params.REGION_NAME_ENV))
s3 = boto3.resource('s3', region_name=region)
s3.Bucket(bucket).download_file(key, dst) | Download a file from S3.
Args:
url (str): the s3 url of the file.
dst (str): the destination where the file will be saved. | Below is the the instruction that describes the task:
### Input:
Download a file from S3.
Args:
url (str): the s3 url of the file.
dst (str): the destination where the file will be saved.
### Response:
def s3_download(url, dst): # type: (str, str) -> None
"""Download a file from S3.
Args:
url (str): the s3 url of the file.
dst (str): the destination where the file will be saved.
"""
url = parse.urlparse(url)
if url.scheme != 's3':
raise ValueError("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url))
bucket, key = url.netloc, url.path.lstrip('/')
region = os.environ.get('AWS_REGION', os.environ.get(_params.REGION_NAME_ENV))
s3 = boto3.resource('s3', region_name=region)
s3.Bucket(bucket).download_file(key, dst) |
def job_terminate(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /job-xxxx/terminate API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fjob-xxxx%2Fterminate
"""
return DXHTTPRequest('/%s/terminate' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /job-xxxx/terminate API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fjob-xxxx%2Fterminate | Below is the the instruction that describes the task:
### Input:
Invokes the /job-xxxx/terminate API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fjob-xxxx%2Fterminate
### Response:
def job_terminate(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /job-xxxx/terminate API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fjob-xxxx%2Fterminate
"""
return DXHTTPRequest('/%s/terminate' % object_id, input_params, always_retry=always_retry, **kwargs) |
def __liftover_coordinates_genomic_deletions(self, intersecting_region):
"""
A 'private' helper member function to perform liftover in coordinate space
when the length of the genomic match is smaller than the concensus match.
We assume the genomic region contains deletions. In this case, we uniformly
distribute the deletions (gaps) through the genomic region. This is the
trickiest liftover case because it fragments the region after liftover.
This method should only be called when the aboe condition is true (longer
consensus region than genomic region) otherwise an assertion will be
failed.
:param intersecting_region: a region that intersects this occurrence.
:return: list of GenomicInterval objects. This is a list because a genomic
deletion of part of the retrotransposon can fragment the
intersecting region and result in more than one returned interval.
"""
# should never happen, but check anyway...
consensus_match_length = self.consensus_end - self.consensus_start
size_dif = consensus_match_length - len(self)
assert(size_dif > 0)
name = self.repeat_name()
gap_interval = int(math.ceil(len(self) / float(size_dif)))
s_dist_to_gen_start = max(intersecting_region.start - self.start, 0)
e_dist_to_gen_start = min(max(intersecting_region.end -
self.start, 0), len(self))
gaps_before = s_dist_to_gen_start / gap_interval
gaps_in = ((e_dist_to_gen_start - 1) / gap_interval) - gaps_before
gen_s_dist = s_dist_to_gen_start
left_to_lift = (min(self.end, intersecting_region.end) -
max(self.start, intersecting_region.start))
res = []
if self.consensus_match_strand is '+':
s = s_dist_to_gen_start + self.consensus_start
s = s + gaps_before
for i in range(0, gaps_in):
e = s + min((gap_interval - (gen_s_dist % gap_interval)), left_to_lift)
res.append(GenomicInterval(name, s, e, intersecting_region.name,
intersecting_region.score, self.strand))
gen_s_dist += (e - s)
left_to_lift -= (e - s)
s = e + 1
e = min(s + min(gap_interval, left_to_lift), self.consensus_end)
if e - s != 0:
res.append(GenomicInterval(name, s, e, intersecting_region.name,
intersecting_region.score, self.strand))
else:
e = self.consensus_end - s_dist_to_gen_start
e = e - gaps_before
for i in range(0, gaps_in):
s = e - min((gap_interval - (gen_s_dist % gap_interval)), left_to_lift)
res.append(GenomicInterval(name, s, e, intersecting_region.name,
intersecting_region.score, self.strand))
gen_s_dist += (e - s)
left_to_lift -= (e - s)
e = s - 1
s = max(e - min(gap_interval, left_to_lift), self.consensus_start)
if e - s != 0:
res.append(GenomicInterval(name, s, e, intersecting_region.name,
intersecting_region.score, self.strand))
return res | A 'private' helper member function to perform liftover in coordinate space
when the length of the genomic match is smaller than the concensus match.
We assume the genomic region contains deletions. In this case, we uniformly
distribute the deletions (gaps) through the genomic region. This is the
trickiest liftover case because it fragments the region after liftover.
This method should only be called when the aboe condition is true (longer
consensus region than genomic region) otherwise an assertion will be
failed.
:param intersecting_region: a region that intersects this occurrence.
:return: list of GenomicInterval objects. This is a list because a genomic
deletion of part of the retrotransposon can fragment the
intersecting region and result in more than one returned interval. | Below is the the instruction that describes the task:
### Input:
A 'private' helper member function to perform liftover in coordinate space
when the length of the genomic match is smaller than the concensus match.
We assume the genomic region contains deletions. In this case, we uniformly
distribute the deletions (gaps) through the genomic region. This is the
trickiest liftover case because it fragments the region after liftover.
This method should only be called when the aboe condition is true (longer
consensus region than genomic region) otherwise an assertion will be
failed.
:param intersecting_region: a region that intersects this occurrence.
:return: list of GenomicInterval objects. This is a list because a genomic
deletion of part of the retrotransposon can fragment the
intersecting region and result in more than one returned interval.
### Response:
def __liftover_coordinates_genomic_deletions(self, intersecting_region):
"""
A 'private' helper member function to perform liftover in coordinate space
when the length of the genomic match is smaller than the concensus match.
We assume the genomic region contains deletions. In this case, we uniformly
distribute the deletions (gaps) through the genomic region. This is the
trickiest liftover case because it fragments the region after liftover.
This method should only be called when the aboe condition is true (longer
consensus region than genomic region) otherwise an assertion will be
failed.
:param intersecting_region: a region that intersects this occurrence.
:return: list of GenomicInterval objects. This is a list because a genomic
deletion of part of the retrotransposon can fragment the
intersecting region and result in more than one returned interval.
"""
# should never happen, but check anyway...
consensus_match_length = self.consensus_end - self.consensus_start
size_dif = consensus_match_length - len(self)
assert(size_dif > 0)
name = self.repeat_name()
gap_interval = int(math.ceil(len(self) / float(size_dif)))
s_dist_to_gen_start = max(intersecting_region.start - self.start, 0)
e_dist_to_gen_start = min(max(intersecting_region.end -
self.start, 0), len(self))
gaps_before = s_dist_to_gen_start / gap_interval
gaps_in = ((e_dist_to_gen_start - 1) / gap_interval) - gaps_before
gen_s_dist = s_dist_to_gen_start
left_to_lift = (min(self.end, intersecting_region.end) -
max(self.start, intersecting_region.start))
res = []
if self.consensus_match_strand is '+':
s = s_dist_to_gen_start + self.consensus_start
s = s + gaps_before
for i in range(0, gaps_in):
e = s + min((gap_interval - (gen_s_dist % gap_interval)), left_to_lift)
res.append(GenomicInterval(name, s, e, intersecting_region.name,
intersecting_region.score, self.strand))
gen_s_dist += (e - s)
left_to_lift -= (e - s)
s = e + 1
e = min(s + min(gap_interval, left_to_lift), self.consensus_end)
if e - s != 0:
res.append(GenomicInterval(name, s, e, intersecting_region.name,
intersecting_region.score, self.strand))
else:
e = self.consensus_end - s_dist_to_gen_start
e = e - gaps_before
for i in range(0, gaps_in):
s = e - min((gap_interval - (gen_s_dist % gap_interval)), left_to_lift)
res.append(GenomicInterval(name, s, e, intersecting_region.name,
intersecting_region.score, self.strand))
gen_s_dist += (e - s)
left_to_lift -= (e - s)
e = s - 1
s = max(e - min(gap_interval, left_to_lift), self.consensus_start)
if e - s != 0:
res.append(GenomicInterval(name, s, e, intersecting_region.name,
intersecting_region.score, self.strand))
return res |
def rawData(self, fileAdres=None):
# skip skiprows
skiprows = None
skip_from = [b'Field', b'Moment']
with open(fileAdres, 'rb') as fr:
#f = fr.read()
for i, line in enumerate(fr, 1):
# print(line.split())
if skip_from == line.split():
skiprows = i+2
break
# else:
# print('file format wrong, cannot find the data row.')
skiprows = 34 if skiprows == None else skiprows
df = pd.read_csv(fileAdres, skiprows=skiprows, sep='\s+',
delimiter=',', names=['H', 'M'], skipfooter=1,
engine='python')
H = df.H # measured field
M = df.M # measured magnetic moment
'''
#=================================================
/datainterval_H/_M
/slice the measured data into pieces
/for every measured FORC
#=================================================
'''
dataInterval_H = []
dataInterval_M = []
# print(H)
cretia = df.H.mean() # edge of linear programing for selecting data
H0 = df.H.max() # the maximum field
self.x, self.y, self.z = [[], [], []]
for i in np.arange(1, len(H)):
dataInterval_H.append(H[i])
dataInterval_M.append(M[i])
if abs(H[i]-H0) <= 0.001: # when the filed reach the max, a new forc
if len(dataInterval_H) >= 0 and len(dataInterval_H) <= 200:
# print(dataInterval_H)
Ha = dataInterval_H[0]
dataInterval_H.pop(-1)
dataInterval_M.pop(-1)
Hb = dataInterval_H[1:-1]
Hm = dataInterval_M[1:-1]
for t in np.arange(len(Hb)):
self.x.append(Hb[t])
self.y.append(Ha)
self.z.append(Hm[t])
# print(Ha)
dataInterval_H = []
dataInterval_M = []
self.rawdf = df
'''
#=================================================
transfer the data set to matrix as len(x)*len(y) with z value
/mesh up the rawdata
/select the data area by X,Y ranges
/obtain regular spaced data potins by np.linspace
/use interplote to caculate the Hm values
/loop Ha(Y),Hb(X)
/fill every position with Hm, else with np.nan
#=================================================
'''
self.z = self.z/np.max(self.z)
# print(int(np.min(self.x)*100)/100,np.max(self.x))
xrange = [int((np.min(self.x)-0.1)*10)/10,
int((np.max(self.x)+0.1)*10)/10]
yrange = [int((np.min(self.y)-0.1)*10)/10,
int((np.max(self.y)+0.1)*10)/10]
X = np.linspace(xrange[0], xrange[1], 200)
Y = np.linspace(yrange[0], yrange[1], 200)
yi, xi = np.mgrid[yrange[0]:yrange[1]:200j, xrange[0]:xrange[1]:200j]
#X = np.linspace(-0.2,0.3,200)
#Y = np.linspace(-0.2,0.3,200)
#xi,yi = np.mgrid[-0.2:0.3:200j,-0.2:0.3:200j]
zi = griddata((self.x, self.y), self.z, (xi, yi),
method='linear') # !!! must linear
self.matrix_z = zi
self.x_range = X
self.y_range = Y | #=================================================
/datainterval_H/_M
/slice the measured data into pieces
/for every measured FORC
#================================================= | Below is the the instruction that describes the task:
### Input:
#=================================================
/datainterval_H/_M
/slice the measured data into pieces
/for every measured FORC
#=================================================
### Response:
def rawData(self, fileAdres=None):
# skip skiprows
skiprows = None
skip_from = [b'Field', b'Moment']
with open(fileAdres, 'rb') as fr:
#f = fr.read()
for i, line in enumerate(fr, 1):
# print(line.split())
if skip_from == line.split():
skiprows = i+2
break
# else:
# print('file format wrong, cannot find the data row.')
skiprows = 34 if skiprows == None else skiprows
df = pd.read_csv(fileAdres, skiprows=skiprows, sep='\s+',
delimiter=',', names=['H', 'M'], skipfooter=1,
engine='python')
H = df.H # measured field
M = df.M # measured magnetic moment
'''
#=================================================
/datainterval_H/_M
/slice the measured data into pieces
/for every measured FORC
#=================================================
'''
dataInterval_H = []
dataInterval_M = []
# print(H)
cretia = df.H.mean() # edge of linear programing for selecting data
H0 = df.H.max() # the maximum field
self.x, self.y, self.z = [[], [], []]
for i in np.arange(1, len(H)):
dataInterval_H.append(H[i])
dataInterval_M.append(M[i])
if abs(H[i]-H0) <= 0.001: # when the filed reach the max, a new forc
if len(dataInterval_H) >= 0 and len(dataInterval_H) <= 200:
# print(dataInterval_H)
Ha = dataInterval_H[0]
dataInterval_H.pop(-1)
dataInterval_M.pop(-1)
Hb = dataInterval_H[1:-1]
Hm = dataInterval_M[1:-1]
for t in np.arange(len(Hb)):
self.x.append(Hb[t])
self.y.append(Ha)
self.z.append(Hm[t])
# print(Ha)
dataInterval_H = []
dataInterval_M = []
self.rawdf = df
'''
#=================================================
transfer the data set to matrix as len(x)*len(y) with z value
/mesh up the rawdata
/select the data area by X,Y ranges
/obtain regular spaced data potins by np.linspace
/use interplote to caculate the Hm values
/loop Ha(Y),Hb(X)
/fill every position with Hm, else with np.nan
#=================================================
'''
self.z = self.z/np.max(self.z)
# print(int(np.min(self.x)*100)/100,np.max(self.x))
xrange = [int((np.min(self.x)-0.1)*10)/10,
int((np.max(self.x)+0.1)*10)/10]
yrange = [int((np.min(self.y)-0.1)*10)/10,
int((np.max(self.y)+0.1)*10)/10]
X = np.linspace(xrange[0], xrange[1], 200)
Y = np.linspace(yrange[0], yrange[1], 200)
yi, xi = np.mgrid[yrange[0]:yrange[1]:200j, xrange[0]:xrange[1]:200j]
#X = np.linspace(-0.2,0.3,200)
#Y = np.linspace(-0.2,0.3,200)
#xi,yi = np.mgrid[-0.2:0.3:200j,-0.2:0.3:200j]
zi = griddata((self.x, self.y), self.z, (xi, yi),
method='linear') # !!! must linear
self.matrix_z = zi
self.x_range = X
self.y_range = Y |
def _get_all(self):
""" Get all users from db and turn into list of dicts """
return [self._to_dict(row) for row in models.User.objects.all()] | Get all users from db and turn into list of dicts | Below is the the instruction that describes the task:
### Input:
Get all users from db and turn into list of dicts
### Response:
def _get_all(self):
""" Get all users from db and turn into list of dicts """
return [self._to_dict(row) for row in models.User.objects.all()] |
def _get_access_token(self):
"""
Get IAM access token using API key.
"""
err = 'Failed to contact IAM token service'
try:
resp = super(IAMSession, self).request(
'POST',
self._token_url,
auth=self._token_auth,
headers={'Accepts': 'application/json'},
data={
'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',
'response_type': 'cloud_iam',
'apikey': self._api_key
}
)
err = response_to_json_dict(resp).get('errorMessage', err)
resp.raise_for_status()
return response_to_json_dict(resp)['access_token']
except KeyError:
raise CloudantException('Invalid response from IAM token service')
except RequestException:
raise CloudantException(err) | Get IAM access token using API key. | Below is the the instruction that describes the task:
### Input:
Get IAM access token using API key.
### Response:
def _get_access_token(self):
"""
Get IAM access token using API key.
"""
err = 'Failed to contact IAM token service'
try:
resp = super(IAMSession, self).request(
'POST',
self._token_url,
auth=self._token_auth,
headers={'Accepts': 'application/json'},
data={
'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',
'response_type': 'cloud_iam',
'apikey': self._api_key
}
)
err = response_to_json_dict(resp).get('errorMessage', err)
resp.raise_for_status()
return response_to_json_dict(resp)['access_token']
except KeyError:
raise CloudantException('Invalid response from IAM token service')
except RequestException:
raise CloudantException(err) |
def handle(self, request: HttpRequest) -> HttpResponse:
"""
Prepares for the CallBackResolver and handles the response and exceptions
:param request HttpRequest
:rtype: HttpResponse
"""
self.__request_start = datetime.now()
self.__request = request
self.__uri = request.path[1:]
self.__method = request.method
# Initializes the callable controller and call it's connect method to get the mapped end-points.
controller: RouteMapping = self.__controller().connect(self.app)
self.__end_points = controller.get_routes()
indent = self.get_json_ident(request.META)
if self.set_end_point_uri() is False:
return self.set_response_headers(self.no_route_found(self.__request).render(indent))
response = HttpResponse(None)
try:
response = self.exec_route_callback()
except RinzlerHttpException as e:
client.captureException()
self.app.log.error(f"< {e.status_code}", exc_info=True)
response = Response(None, status=e.status_code)
except RequestDataTooBig:
client.captureException()
self.app.log.error("< 413", exc_info=True)
response = Response(None, status=413)
except BaseException:
client.captureException()
self.app.log.error("< 500", exc_info=True)
response = Response(None, status=500)
finally:
if type(response) == Response:
return self.set_response_headers(response.render(indent))
else:
return self.set_response_headers(response) | Prepares for the CallBackResolver and handles the response and exceptions
:param request HttpRequest
:rtype: HttpResponse | Below is the the instruction that describes the task:
### Input:
Prepares for the CallBackResolver and handles the response and exceptions
:param request HttpRequest
:rtype: HttpResponse
### Response:
def handle(self, request: HttpRequest) -> HttpResponse:
"""
Prepares for the CallBackResolver and handles the response and exceptions
:param request HttpRequest
:rtype: HttpResponse
"""
self.__request_start = datetime.now()
self.__request = request
self.__uri = request.path[1:]
self.__method = request.method
# Initializes the callable controller and call it's connect method to get the mapped end-points.
controller: RouteMapping = self.__controller().connect(self.app)
self.__end_points = controller.get_routes()
indent = self.get_json_ident(request.META)
if self.set_end_point_uri() is False:
return self.set_response_headers(self.no_route_found(self.__request).render(indent))
response = HttpResponse(None)
try:
response = self.exec_route_callback()
except RinzlerHttpException as e:
client.captureException()
self.app.log.error(f"< {e.status_code}", exc_info=True)
response = Response(None, status=e.status_code)
except RequestDataTooBig:
client.captureException()
self.app.log.error("< 413", exc_info=True)
response = Response(None, status=413)
except BaseException:
client.captureException()
self.app.log.error("< 500", exc_info=True)
response = Response(None, status=500)
finally:
if type(response) == Response:
return self.set_response_headers(response.render(indent))
else:
return self.set_response_headers(response) |
def parse(filename, format=u"Jæren Sparebank", encoding="latin1"):
"""Parses bank CSV file and returns Transactions instance.
Args:
filename: Path to CSV file to read.
format: CSV format; one of the entries in `elv.formats`.
encoding: The CSV file encoding.
Returns:
A ``Transactions`` object.
"""
Class = formats[format.lower()]
if PY3:
kw = {"encoding": encoding}
else:
kw = {}
with open(filename, "rt", **kw) as f:
return Class.csv_to_transactions(f) | Parses bank CSV file and returns Transactions instance.
Args:
filename: Path to CSV file to read.
format: CSV format; one of the entries in `elv.formats`.
encoding: The CSV file encoding.
Returns:
A ``Transactions`` object. | Below is the the instruction that describes the task:
### Input:
Parses bank CSV file and returns Transactions instance.
Args:
filename: Path to CSV file to read.
format: CSV format; one of the entries in `elv.formats`.
encoding: The CSV file encoding.
Returns:
A ``Transactions`` object.
### Response:
def parse(filename, format=u"Jæren Sparebank", encoding="latin1"):
"""Parses bank CSV file and returns Transactions instance.
Args:
filename: Path to CSV file to read.
format: CSV format; one of the entries in `elv.formats`.
encoding: The CSV file encoding.
Returns:
A ``Transactions`` object.
"""
Class = formats[format.lower()]
if PY3:
kw = {"encoding": encoding}
else:
kw = {}
with open(filename, "rt", **kw) as f:
return Class.csv_to_transactions(f) |
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value) | Return a float as a IEEE 754 format bytes object. | Below is the the instruction that describes the task:
### Input:
Return a float as a IEEE 754 format bytes object.
### Response:
def _send_float(self,value):
"""
Return a float as a IEEE 754 format bytes object.
"""
# convert to float. this will throw a ValueError if the type is not
# readily converted
if type(value) != float:
value = float(value)
# Range check
if value > self.board.float_max or value < self.board.float_min:
err = "Value {} exceeds the size of the board's float.".format(value)
raise OverflowError(err)
return struct.pack(self.board.float_type,value) |
def _CreateLineStringForShape(self, parent, shape):
"""Create a KML LineString using coordinates from a shape.
Args:
parent: The parent ElementTree.Element instance.
shape: The transitfeed.Shape instance.
Returns:
The LineString ElementTree.Element instance or None if coordinate_list is
empty.
"""
coordinate_list = [(longitude, latitude) for
(latitude, longitude, distance) in shape.points]
return self._CreateLineString(parent, coordinate_list) | Create a KML LineString using coordinates from a shape.
Args:
parent: The parent ElementTree.Element instance.
shape: The transitfeed.Shape instance.
Returns:
The LineString ElementTree.Element instance or None if coordinate_list is
empty. | Below is the the instruction that describes the task:
### Input:
Create a KML LineString using coordinates from a shape.
Args:
parent: The parent ElementTree.Element instance.
shape: The transitfeed.Shape instance.
Returns:
The LineString ElementTree.Element instance or None if coordinate_list is
empty.
### Response:
def _CreateLineStringForShape(self, parent, shape):
"""Create a KML LineString using coordinates from a shape.
Args:
parent: The parent ElementTree.Element instance.
shape: The transitfeed.Shape instance.
Returns:
The LineString ElementTree.Element instance or None if coordinate_list is
empty.
"""
coordinate_list = [(longitude, latitude) for
(latitude, longitude, distance) in shape.points]
return self._CreateLineString(parent, coordinate_list) |
def create(self, resource):
"""
Set all the labels for a resource.
Args:
resource: The object containing the resource URI and a list of labels
Returns:
dict: Resource Labels
"""
uri = self.URI + self.RESOURCES_PATH
return self._client.create(resource=resource, uri=uri) | Set all the labels for a resource.
Args:
resource: The object containing the resource URI and a list of labels
Returns:
dict: Resource Labels | Below is the the instruction that describes the task:
### Input:
Set all the labels for a resource.
Args:
resource: The object containing the resource URI and a list of labels
Returns:
dict: Resource Labels
### Response:
def create(self, resource):
"""
Set all the labels for a resource.
Args:
resource: The object containing the resource URI and a list of labels
Returns:
dict: Resource Labels
"""
uri = self.URI + self.RESOURCES_PATH
return self._client.create(resource=resource, uri=uri) |
def sanitize(self):
'''
Check if the current settings conform to the LISP specifications and
fix where possible.
'''
# WARNING: http://tools.ietf.org/html/draft-ietf-lisp-ddt-00
# does not define this field so the description is taken from
# http://tools.ietf.org/html/draft-ietf-lisp-24
#
# Record TTL: The time in minutes the recipient of the Map-Reply will
# store the mapping. If the TTL is 0, the entry SHOULD be removed
# from the cache immediately. If the value is 0xffffffff, the
# recipient can decide locally how long to store the mapping.
if not isinstance(self.ttl, numbers.Integral) \
or self.ttl < 0 or self.ttl > 0xffffffff:
raise ValueError('Invalid TTL')
# ACT: The "action" field of the mapping record in a Map-Referral
# message encodes 6 action types. The values for the action types are:
#
# NODE-REFERRAL (0): Sent by a DDT node with a child delegation which
# is authoritative for the EID.
#
# MS-REFERRAL (1): Sent by a DDT node that has information about Map
# Server(s) for the EID but it is not one of the Map Servers listed,
# i.e. the DDT-Node sending the referral is not a Map Server.
#
# MS-ACK (2): Sent by a DDT Map Server that has one or more ETR
# registered for the EID.
#
# MS-NOT-REGISTERED (3): Sent by a DDT Map Server that is configured
# for the EID-prefix but for which no ETRs are registered.
#
# DELEGATION-HOLE (4): Sent by an intermediate DDT node with
# authoritative configuration covering the requested EID but without
# any child delegation for the EID. Also sent by a DDT Map Server
# with authoritative configuration covering the requested EID but
# for which no specific site ETR is configured.
#
# NOT-AUTHORITATIVE (5): Sent by a DDT node that does not have
# authoritative configuration for the requested EID. The EID-prefix
# returned MUST be the original requested EID and the TTL MUST be
# set to 0. However, if such a DDT node has a child delegation
# covering the requested EID, it may choose to return NODE-REFERRAL
# or MS-REFERRAL as appropriate. A DDT Map Server with site
# information may choose to return of type MS-ACK or MS-NOT-
# REGISTERED as appropriate.
if self.action not in (self.ACT_NODE_REFERRAL,
self.ACT_MS_REFERRAL,
self.ACT_MS_ACK,
self.ACT_MS_NOT_REGISTERED,
self.ACT_DELEGATION_HOLE,
self.ACT_NOT_AUTHORITATIVE):
raise ValueError('Invalid action')
# WARNING: http://tools.ietf.org/html/draft-ietf-lisp-ddt-00
# does not define this field so the description is taken from
# http://tools.ietf.org/html/draft-ietf-lisp-24
#
# A: The Authoritative bit, when sent is always set to 1 by an ETR.
# When a Map-Server is proxy Map-Replying [LISP-MS] for a LISP site,
# the Authoritative bit is set to 0. This indicates to requesting
# ITRs that the Map-Reply was not originated by a LISP node managed
# at the site that owns the EID-prefix.
if not isinstance(self.authoritative, bool):
raise ValueError('Authoritative flag must be a boolean')
# Incomplete: The "I" bit indicates that a DDT node's referral-set of
# locators is incomplete and the receiver of this message should not
# cache the referral
if not isinstance(self.incomplete, bool):
raise ValueError('Incomplete flag must be a boolean')
# A DDT sets the "incomplete" flag, the TTL, and the Action Type field
# as follows:
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
# 1 MS-REFERRAL NO YES 1440
# 2 MS-ACK * * 1440
# 3 MS-NOT-REGISTERED * * 1
# 4 DELEGATION-HOLE NO NO 15
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
# *: The "Incomplete" flag setting on Map Server originated referral of
# MS-REFERRAL and MS-NOT-REGISTERED types depend on whether the Map
# Server has the full peer Map Server configuration for the same
# prefix and has encoded the information in the mapping record.
# Incomplete bit is not set when the Map Server has encoded the
# information, which means the referral-set includes all the RLOCs
# of all Map Servers that serve the prefix. It is set when the Map
# Server has not encoded the Map Server set information.
if self.action == self.ACT_NODE_REFERRAL:
if self.incomplete:
raise ValueError('NODE-REFERRAL messages cannot be incomplete')
if not self.locator_records:
raise ValueError('NODE-REFERRAL messages must have locators')
if self.ttl != 1440:
raise ValueError('NODE-REFERRAL messages must have TTL=1440')
elif self.action == self.ACT_MS_REFERRAL:
if self.incomplete:
raise ValueError('MS-REFERRAL messages cannot be incomplete')
if not self.locator_records:
raise ValueError('MS-REFERRAL messages must have locators')
if self.ttl != 1440:
raise ValueError('MS-REFERRAL messages must have TTL=1440')
elif self.action == self.ACT_MS_ACK:
if self.ttl != 1440:
raise ValueError('MS-ACK messages must have TTL=1440')
elif self.action == self.ACT_MS_NOT_REGISTERED:
if self.ttl != 1:
raise ValueError('MS-NOT-REGISTERED messages must have '
'TTL=1')
elif self.action == self.ACT_DELEGATION_HOLE:
if self.incomplete:
raise ValueError('DELEGATION-HOLE messages cannot be '
'incomplete')
if self.locator_records:
raise ValueError('DELEGATION-HOLE messages can not have '
'locators')
if self.ttl != 15:
raise ValueError('DELEGATION-HOLE messages must have TTL=15')
elif self.action == self.ACT_NOT_AUTHORITATIVE:
if not self.incomplete:
raise ValueError('NOT-AUTHORITATIVE messages must be '
'incomplete')
if self.locator_records:
raise ValueError('NOT-AUTHORITATIVE messages can not have '
'locators')
if self.ttl != 0:
raise ValueError('NOT-AUTHORITATIVE messages must have TTL=0')
# WARNING: http://tools.ietf.org/html/draft-ietf-lisp-ddt-00
# does not define this field so the description is taken from
# http://tools.ietf.org/html/draft-ietf-lisp-24
#
# Map-Version Number: When this 12-bit value is non-zero the Map-Reply
# sender is informing the ITR what the version number is for the
# EID-record contained in the Map-Reply. The ETR can allocate this
# number internally but MUST coordinate this value with other ETRs
# for the site. When this value is 0, there is no versioning
# information conveyed. The Map-Version Number can be included in
# Map-Request and Map-Register messages. See Section 6.6.3 for more
# details.
if not isinstance(self.map_version, numbers.Integral) \
or self.map_version < 0 \
or self.map_version >= 2 ** 12:
raise ValueError('Invalid map version')
# EID-prefix: 4 octets if an IPv4 address-family, 16 octets if an IPv6
# address-family.
if not isinstance(self.eid_prefix, LCAFInstanceAddress):
if not isinstance(self.eid_prefix, (IPv4Network, IPv6Network)):
raise ValueError("Unexpected EID prefix %r", self.eid_prefix)
# Wrap in LCAF address with Instance ID
self.eid_prefix = LCAFInstanceAddress(instance_id=0,
address=self.eid_prefix)
# Check locator records
# The probed_locator bits aren't used in this context
for locator_record in self.locator_records:
if not isinstance(locator_record, LocatorRecord) \
or locator_record.probed_locator:
raise ValueError('Invalid Locator record')
locator_record.sanitize()
# For each Map-Reply record, the list of Locators in a Locator-Set MUST
# appear in the same order for each ETR that originates a Map-Reply
# message. The Locator-Set MUST be sorted in order of ascending IP
# address where an IPv4 locator address is considered numerically 'less
# than' an IPv6 locator address.
self.locator_records.sort(key=LocatorRecord.sort_key)
# Check signatures
for dummy in self.signatures:
# TODO: Implement signatures [LISP-Security]
pass | Check if the current settings conform to the LISP specifications and
fix where possible. | Below is the the instruction that describes the task:
### Input:
Check if the current settings conform to the LISP specifications and
fix where possible.
### Response:
def sanitize(self):
'''
Check if the current settings conform to the LISP specifications and
fix where possible.
'''
# WARNING: http://tools.ietf.org/html/draft-ietf-lisp-ddt-00
# does not define this field so the description is taken from
# http://tools.ietf.org/html/draft-ietf-lisp-24
#
# Record TTL: The time in minutes the recipient of the Map-Reply will
# store the mapping. If the TTL is 0, the entry SHOULD be removed
# from the cache immediately. If the value is 0xffffffff, the
# recipient can decide locally how long to store the mapping.
if not isinstance(self.ttl, numbers.Integral) \
or self.ttl < 0 or self.ttl > 0xffffffff:
raise ValueError('Invalid TTL')
# ACT: The "action" field of the mapping record in a Map-Referral
# message encodes 6 action types. The values for the action types are:
#
# NODE-REFERRAL (0): Sent by a DDT node with a child delegation which
# is authoritative for the EID.
#
# MS-REFERRAL (1): Sent by a DDT node that has information about Map
# Server(s) for the EID but it is not one of the Map Servers listed,
# i.e. the DDT-Node sending the referral is not a Map Server.
#
# MS-ACK (2): Sent by a DDT Map Server that has one or more ETR
# registered for the EID.
#
# MS-NOT-REGISTERED (3): Sent by a DDT Map Server that is configured
# for the EID-prefix but for which no ETRs are registered.
#
# DELEGATION-HOLE (4): Sent by an intermediate DDT node with
# authoritative configuration covering the requested EID but without
# any child delegation for the EID. Also sent by a DDT Map Server
# with authoritative configuration covering the requested EID but
# for which no specific site ETR is configured.
#
# NOT-AUTHORITATIVE (5): Sent by a DDT node that does not have
# authoritative configuration for the requested EID. The EID-prefix
# returned MUST be the original requested EID and the TTL MUST be
# set to 0. However, if such a DDT node has a child delegation
# covering the requested EID, it may choose to return NODE-REFERRAL
# or MS-REFERRAL as appropriate. A DDT Map Server with site
# information may choose to return of type MS-ACK or MS-NOT-
# REGISTERED as appropriate.
if self.action not in (self.ACT_NODE_REFERRAL,
self.ACT_MS_REFERRAL,
self.ACT_MS_ACK,
self.ACT_MS_NOT_REGISTERED,
self.ACT_DELEGATION_HOLE,
self.ACT_NOT_AUTHORITATIVE):
raise ValueError('Invalid action')
# WARNING: http://tools.ietf.org/html/draft-ietf-lisp-ddt-00
# does not define this field so the description is taken from
# http://tools.ietf.org/html/draft-ietf-lisp-24
#
# A: The Authoritative bit, when sent is always set to 1 by an ETR.
# When a Map-Server is proxy Map-Replying [LISP-MS] for a LISP site,
# the Authoritative bit is set to 0. This indicates to requesting
# ITRs that the Map-Reply was not originated by a LISP node managed
# at the site that owns the EID-prefix.
if not isinstance(self.authoritative, bool):
raise ValueError('Authoritative flag must be a boolean')
# Incomplete: The "I" bit indicates that a DDT node's referral-set of
# locators is incomplete and the receiver of this message should not
# cache the referral
if not isinstance(self.incomplete, bool):
raise ValueError('Incomplete flag must be a boolean')
# A DDT sets the "incomplete" flag, the TTL, and the Action Type field
# as follows:
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
# 1 MS-REFERRAL NO YES 1440
# 2 MS-ACK * * 1440
# 3 MS-NOT-REGISTERED * * 1
# 4 DELEGATION-HOLE NO NO 15
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
# *: The "Incomplete" flag setting on Map Server originated referral of
# MS-REFERRAL and MS-NOT-REGISTERED types depend on whether the Map
# Server has the full peer Map Server configuration for the same
# prefix and has encoded the information in the mapping record.
# Incomplete bit is not set when the Map Server has encoded the
# information, which means the referral-set includes all the RLOCs
# of all Map Servers that serve the prefix. It is set when the Map
# Server has not encoded the Map Server set information.
if self.action == self.ACT_NODE_REFERRAL:
if self.incomplete:
raise ValueError('NODE-REFERRAL messages cannot be incomplete')
if not self.locator_records:
raise ValueError('NODE-REFERRAL messages must have locators')
if self.ttl != 1440:
raise ValueError('NODE-REFERRAL messages must have TTL=1440')
elif self.action == self.ACT_MS_REFERRAL:
if self.incomplete:
raise ValueError('MS-REFERRAL messages cannot be incomplete')
if not self.locator_records:
raise ValueError('MS-REFERRAL messages must have locators')
if self.ttl != 1440:
raise ValueError('MS-REFERRAL messages must have TTL=1440')
elif self.action == self.ACT_MS_ACK:
if self.ttl != 1440:
raise ValueError('MS-ACK messages must have TTL=1440')
elif self.action == self.ACT_MS_NOT_REGISTERED:
if self.ttl != 1:
raise ValueError('MS-NOT-REGISTERED messages must have '
'TTL=1')
elif self.action == self.ACT_DELEGATION_HOLE:
if self.incomplete:
raise ValueError('DELEGATION-HOLE messages cannot be '
'incomplete')
if self.locator_records:
raise ValueError('DELEGATION-HOLE messages can not have '
'locators')
if self.ttl != 15:
raise ValueError('DELEGATION-HOLE messages must have TTL=15')
elif self.action == self.ACT_NOT_AUTHORITATIVE:
if not self.incomplete:
raise ValueError('NOT-AUTHORITATIVE messages must be '
'incomplete')
if self.locator_records:
raise ValueError('NOT-AUTHORITATIVE messages can not have '
'locators')
if self.ttl != 0:
raise ValueError('NOT-AUTHORITATIVE messages must have TTL=0')
# WARNING: http://tools.ietf.org/html/draft-ietf-lisp-ddt-00
# does not define this field so the description is taken from
# http://tools.ietf.org/html/draft-ietf-lisp-24
#
# Map-Version Number: When this 12-bit value is non-zero the Map-Reply
# sender is informing the ITR what the version number is for the
# EID-record contained in the Map-Reply. The ETR can allocate this
# number internally but MUST coordinate this value with other ETRs
# for the site. When this value is 0, there is no versioning
# information conveyed. The Map-Version Number can be included in
# Map-Request and Map-Register messages. See Section 6.6.3 for more
# details.
if not isinstance(self.map_version, numbers.Integral) \
or self.map_version < 0 \
or self.map_version >= 2 ** 12:
raise ValueError('Invalid map version')
# EID-prefix: 4 octets if an IPv4 address-family, 16 octets if an IPv6
# address-family.
if not isinstance(self.eid_prefix, LCAFInstanceAddress):
if not isinstance(self.eid_prefix, (IPv4Network, IPv6Network)):
raise ValueError("Unexpected EID prefix %r", self.eid_prefix)
# Wrap in LCAF address with Instance ID
self.eid_prefix = LCAFInstanceAddress(instance_id=0,
address=self.eid_prefix)
# Check locator records
# The probed_locator bits aren't used in this context
for locator_record in self.locator_records:
if not isinstance(locator_record, LocatorRecord) \
or locator_record.probed_locator:
raise ValueError('Invalid Locator record')
locator_record.sanitize()
# For each Map-Reply record, the list of Locators in a Locator-Set MUST
# appear in the same order for each ETR that originates a Map-Reply
# message. The Locator-Set MUST be sorted in order of ascending IP
# address where an IPv4 locator address is considered numerically 'less
# than' an IPv6 locator address.
self.locator_records.sort(key=LocatorRecord.sort_key)
# Check signatures
for dummy in self.signatures:
# TODO: Implement signatures [LISP-Security]
pass |
def dcmanonym(
dcmpth,
displayonly=False,
patient='anonymised',
physician='anonymised',
dob='19800101',
verbose=True):
''' Anonymise DICOM file(s)
Arguments:
> dcmpth: it can be passed as a single DICOM file, or
a folder containing DICOM files, or a list of DICOM file paths.
> patient: the name of the patient.
> physician:the name of the referring physician.
> dob: patient's date of birth.
> verbose: display processing output.
'''
#> check if a single DICOM file
if isinstance(dcmpth, basestring) and os.path.isfile(dcmpth):
dcmlst = [dcmpth]
if verbose:
print 'i> recognised the input argument as a single DICOM file.'
#> check if a folder containing DICOM files
elif isinstance(dcmpth, basestring) and os.path.isdir(dcmpth):
dircontent = os.listdir(dcmpth)
#> create a list of DICOM files inside the folder
dcmlst = [os.path.join(dcmpth,d) for d in dircontent if os.path.isfile(os.path.join(dcmpth,d)) and d.endswith(dcmext)]
if verbose:
print 'i> recognised the input argument as the folder containing DICOM files.'
#> check if a folder containing DICOM files
elif isinstance(dcmpth, list):
if not all([os.path.isfile(d) and d.endswith(dcmext) for d in dcmpth]):
raise IOError('Not all files in the list are DICOM files.')
dcmlst = dcmpth
if verbose:
print 'i> recognised the input argument as the list of DICOM file paths.'
#> check if dictionary of data input <datain>
elif isinstance(dcmpth, dict) and 'corepath' in dcmpth:
dcmlst = list_dcm_datain(dcmpth)
if verbose:
print 'i> recognised the input argument as the dictionary of scanner data.'
else:
raise IOError('Unrecognised input!')
for dcmf in dcmlst:
#> read the file
dhdr = dcm.dcmread(dcmf)
#> get the basic info about the DICOM file
dcmtype = dcminfo(dhdr, verbose=False)
if verbose:
print '-------------------------------'
print 'i> the DICOM file is for:', dcmtype
#> anonymise mMR data.
if 'mmr' in dcmtype:
if [0x029, 0x1120] in dhdr and dhdr[0x029, 0x1120].name=='[CSA Series Header Info]':
csafield = dhdr[0x029, 0x1120]
csa = csafield.value
elif [0x029, 0x1020] in dhdr and dhdr[0x029, 0x1020].name=='[CSA Series Header Info]':
csafield = dhdr[0x029, 0x1020]
csa = csafield.value
else:
csa = ''
# string length considered for replacement
strlen = 200
idx = [m.start() for m in re.finditer(r'([Pp]atients{0,1}[Nn]ame)', csa)]
if idx and verbose:
print ' > found sensitive information deep in DICOM headers:', dcmtype
#> run the anonymisation
iupdate = 0
for i in idx:
ci = i - iupdate
if displayonly:
print ' > sensitive info:'
print ' ', csa[ci:ci+strlen]
continue
rplcmnt = re.sub( r'(\{\s*\"{1,2}\W*\w+\W*\w+\W*\"{1,2}\s*\})',
'{ ""' +patient+ '"" }',
csa[ci:ci+strlen]
)
#> update string
csa = csa[:ci] + rplcmnt + csa[ci+strlen:]
print ' > removed sensitive information.'
#> correct for the number of removed letters
iupdate = strlen-len(rplcmnt)
#> update DICOM
if not displayonly and csa!='':
csafield.value = csa
#> Patient's name
if [0x010,0x010] in dhdr:
if displayonly:
print ' > sensitive info:', dhdr[0x010,0x010].name
print ' ', dhdr[0x010,0x010].value
else:
dhdr[0x010,0x010].value = patient
if verbose: print ' > anonymised patients name'
#> date of birth
if [0x010,0x030] in dhdr:
if displayonly:
print ' > sensitive info:', dhdr[0x010,0x030].name
print ' ', dhdr[0x010,0x030].value
else:
dhdr[0x010,0x030].value = dob
if verbose: print ' > anonymised date of birh'
#> physician's name
if [0x008, 0x090] in dhdr:
if displayonly:
print ' > sensitive info:', dhdr[0x008,0x090].name
print ' ', dhdr[0x008,0x090].value
else:
dhdr[0x008,0x090].value = physician
if verbose: print ' > anonymised physician name'
dhdr.save_as(dcmf) | Anonymise DICOM file(s)
Arguments:
> dcmpth: it can be passed as a single DICOM file, or
a folder containing DICOM files, or a list of DICOM file paths.
> patient: the name of the patient.
> physician:the name of the referring physician.
> dob: patient's date of birth.
> verbose: display processing output. | Below is the the instruction that describes the task:
### Input:
Anonymise DICOM file(s)
Arguments:
> dcmpth: it can be passed as a single DICOM file, or
a folder containing DICOM files, or a list of DICOM file paths.
> patient: the name of the patient.
> physician:the name of the referring physician.
> dob: patient's date of birth.
> verbose: display processing output.
### Response:
def dcmanonym(
dcmpth,
displayonly=False,
patient='anonymised',
physician='anonymised',
dob='19800101',
verbose=True):
''' Anonymise DICOM file(s)
Arguments:
> dcmpth: it can be passed as a single DICOM file, or
a folder containing DICOM files, or a list of DICOM file paths.
> patient: the name of the patient.
> physician:the name of the referring physician.
> dob: patient's date of birth.
> verbose: display processing output.
'''
#> check if a single DICOM file
if isinstance(dcmpth, basestring) and os.path.isfile(dcmpth):
dcmlst = [dcmpth]
if verbose:
print 'i> recognised the input argument as a single DICOM file.'
#> check if a folder containing DICOM files
elif isinstance(dcmpth, basestring) and os.path.isdir(dcmpth):
dircontent = os.listdir(dcmpth)
#> create a list of DICOM files inside the folder
dcmlst = [os.path.join(dcmpth,d) for d in dircontent if os.path.isfile(os.path.join(dcmpth,d)) and d.endswith(dcmext)]
if verbose:
print 'i> recognised the input argument as the folder containing DICOM files.'
#> check if a folder containing DICOM files
elif isinstance(dcmpth, list):
if not all([os.path.isfile(d) and d.endswith(dcmext) for d in dcmpth]):
raise IOError('Not all files in the list are DICOM files.')
dcmlst = dcmpth
if verbose:
print 'i> recognised the input argument as the list of DICOM file paths.'
#> check if dictionary of data input <datain>
elif isinstance(dcmpth, dict) and 'corepath' in dcmpth:
dcmlst = list_dcm_datain(dcmpth)
if verbose:
print 'i> recognised the input argument as the dictionary of scanner data.'
else:
raise IOError('Unrecognised input!')
for dcmf in dcmlst:
#> read the file
dhdr = dcm.dcmread(dcmf)
#> get the basic info about the DICOM file
dcmtype = dcminfo(dhdr, verbose=False)
if verbose:
print '-------------------------------'
print 'i> the DICOM file is for:', dcmtype
#> anonymise mMR data.
if 'mmr' in dcmtype:
if [0x029, 0x1120] in dhdr and dhdr[0x029, 0x1120].name=='[CSA Series Header Info]':
csafield = dhdr[0x029, 0x1120]
csa = csafield.value
elif [0x029, 0x1020] in dhdr and dhdr[0x029, 0x1020].name=='[CSA Series Header Info]':
csafield = dhdr[0x029, 0x1020]
csa = csafield.value
else:
csa = ''
# string length considered for replacement
strlen = 200
idx = [m.start() for m in re.finditer(r'([Pp]atients{0,1}[Nn]ame)', csa)]
if idx and verbose:
print ' > found sensitive information deep in DICOM headers:', dcmtype
#> run the anonymisation
iupdate = 0
for i in idx:
ci = i - iupdate
if displayonly:
print ' > sensitive info:'
print ' ', csa[ci:ci+strlen]
continue
rplcmnt = re.sub( r'(\{\s*\"{1,2}\W*\w+\W*\w+\W*\"{1,2}\s*\})',
'{ ""' +patient+ '"" }',
csa[ci:ci+strlen]
)
#> update string
csa = csa[:ci] + rplcmnt + csa[ci+strlen:]
print ' > removed sensitive information.'
#> correct for the number of removed letters
iupdate = strlen-len(rplcmnt)
#> update DICOM
if not displayonly and csa!='':
csafield.value = csa
#> Patient's name
if [0x010,0x010] in dhdr:
if displayonly:
print ' > sensitive info:', dhdr[0x010,0x010].name
print ' ', dhdr[0x010,0x010].value
else:
dhdr[0x010,0x010].value = patient
if verbose: print ' > anonymised patients name'
#> date of birth
if [0x010,0x030] in dhdr:
if displayonly:
print ' > sensitive info:', dhdr[0x010,0x030].name
print ' ', dhdr[0x010,0x030].value
else:
dhdr[0x010,0x030].value = dob
if verbose: print ' > anonymised date of birh'
#> physician's name
if [0x008, 0x090] in dhdr:
if displayonly:
print ' > sensitive info:', dhdr[0x008,0x090].name
print ' ', dhdr[0x008,0x090].value
else:
dhdr[0x008,0x090].value = physician
if verbose: print ' > anonymised physician name'
dhdr.save_as(dcmf) |
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)] | Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`. | Below is the the instruction that describes the task:
### Input:
Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
### Response:
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)] |
def resource_request_encode(self, request_id, uri_type, uri, transfer_type, storage):
'''
The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t)
'''
return MAVLink_resource_request_message(request_id, uri_type, uri, transfer_type, storage) | The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t) | Below is the the instruction that describes the task:
### Input:
The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t)
### Response:
def resource_request_encode(self, request_id, uri_type, uri, transfer_type, storage):
'''
The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t)
'''
return MAVLink_resource_request_message(request_id, uri_type, uri, transfer_type, storage) |
def enable_command(self, command: str) -> None:
"""
Enable a command by restoring its functions
:param command: the command being enabled
"""
# If the commands is already enabled, then return
if command not in self.disabled_commands:
return
help_func_name = HELP_FUNC_PREFIX + command
# Restore the command and help functions to their original values
dc = self.disabled_commands[command]
setattr(self, self.cmd_func_name(command), dc.command_function)
if dc.help_function is None:
delattr(self, help_func_name)
else:
setattr(self, help_func_name, dc.help_function)
# Remove the disabled command entry
del self.disabled_commands[command] | Enable a command by restoring its functions
:param command: the command being enabled | Below is the the instruction that describes the task:
### Input:
Enable a command by restoring its functions
:param command: the command being enabled
### Response:
def enable_command(self, command: str) -> None:
"""
Enable a command by restoring its functions
:param command: the command being enabled
"""
# If the commands is already enabled, then return
if command not in self.disabled_commands:
return
help_func_name = HELP_FUNC_PREFIX + command
# Restore the command and help functions to their original values
dc = self.disabled_commands[command]
setattr(self, self.cmd_func_name(command), dc.command_function)
if dc.help_function is None:
delattr(self, help_func_name)
else:
setattr(self, help_func_name, dc.help_function)
# Remove the disabled command entry
del self.disabled_commands[command] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.