code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def increment(version):
"""Return an incremented version string."""
release_version = os.environ.get("RELEASE_VERSION", None)
if release_version is not None:
return release_version
if isinstance(version, LegacyVersion):
msg = """{0} is considered a legacy version and does not
support automatic incrementing. Please bring your version
numbering into PEP440 standards and then it can be
automatically incremented.
"""
raise Exception(msg.format(version))
release_type = os.environ.get("RELEASE_TYPE", "micro")
v = version._version
# epoch
epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, "!")
pre_name, pre = VersionUtils.get_version_number(v, 3, None, "pre")
post_name, post = VersionUtils.get_version_number(v, 4, None, "post")
dev_name, dev = VersionUtils.get_version_number(v, 2, None, "dev")
_, major = VersionUtils.get_version_number(v[1], 0, 0)
_, minor = VersionUtils.get_version_number(v[1], 1, None)
_, micro = VersionUtils.get_version_number(v[1], 2, None)
# Handle dev/pre/post
if release_type == "pre":
micro, post, pre = VersionUtils.process_pre(micro, post, pre)
if release_type == "post":
dev, post = VersionUtils.process_post(dev, post)
if release_type == "dev":
dev = VersionUtils.process_dev(dev)
if release_type == "micro":
dev, micro, minor, post, pre = VersionUtils.process_micro(
dev, micro, minor, post, pre
)
if release_type == "minor":
dev, micro, minor, post, pre = VersionUtils.process_minor(
dev, micro, minor, post, pre
)
if release_type == "major":
dev, major, micro, minor, post, pre = VersionUtils.process_major(
dev, major, micro, minor, post, pre
)
# Handle Epoch
if release_type == "epoch":
dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(
dev, epoch, major, micro, minor, post, pre
)
local = "".join(v[5] or []) or None
version_list = [major, minor, micro]
if release_type not in ["epoch", "major", "minor", "micro", "pre"]:
version_list += list(v[1][3:])
version_string = ".".join([str(x) for x in version_list if x or x == 0])
if epoch:
version_string = str(epoch) + epoch_name + version_string
if pre is not None:
version_string = VersionUtils.calc_pre_version_string(
pre, pre_name, version_string
)
if post is not None:
version_string += "." + post_name + str(post)
if dev is not None:
version_string += "." + dev_name + str(dev)
if local is not None:
version_string += "." + str(local)
return version_string | Return an incremented version string. | Below is the the instruction that describes the task:
### Input:
Return an incremented version string.
### Response:
def increment(version):
"""Return an incremented version string."""
release_version = os.environ.get("RELEASE_VERSION", None)
if release_version is not None:
return release_version
if isinstance(version, LegacyVersion):
msg = """{0} is considered a legacy version and does not
support automatic incrementing. Please bring your version
numbering into PEP440 standards and then it can be
automatically incremented.
"""
raise Exception(msg.format(version))
release_type = os.environ.get("RELEASE_TYPE", "micro")
v = version._version
# epoch
epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, "!")
pre_name, pre = VersionUtils.get_version_number(v, 3, None, "pre")
post_name, post = VersionUtils.get_version_number(v, 4, None, "post")
dev_name, dev = VersionUtils.get_version_number(v, 2, None, "dev")
_, major = VersionUtils.get_version_number(v[1], 0, 0)
_, minor = VersionUtils.get_version_number(v[1], 1, None)
_, micro = VersionUtils.get_version_number(v[1], 2, None)
# Handle dev/pre/post
if release_type == "pre":
micro, post, pre = VersionUtils.process_pre(micro, post, pre)
if release_type == "post":
dev, post = VersionUtils.process_post(dev, post)
if release_type == "dev":
dev = VersionUtils.process_dev(dev)
if release_type == "micro":
dev, micro, minor, post, pre = VersionUtils.process_micro(
dev, micro, minor, post, pre
)
if release_type == "minor":
dev, micro, minor, post, pre = VersionUtils.process_minor(
dev, micro, minor, post, pre
)
if release_type == "major":
dev, major, micro, minor, post, pre = VersionUtils.process_major(
dev, major, micro, minor, post, pre
)
# Handle Epoch
if release_type == "epoch":
dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(
dev, epoch, major, micro, minor, post, pre
)
local = "".join(v[5] or []) or None
version_list = [major, minor, micro]
if release_type not in ["epoch", "major", "minor", "micro", "pre"]:
version_list += list(v[1][3:])
version_string = ".".join([str(x) for x in version_list if x or x == 0])
if epoch:
version_string = str(epoch) + epoch_name + version_string
if pre is not None:
version_string = VersionUtils.calc_pre_version_string(
pre, pre_name, version_string
)
if post is not None:
version_string += "." + post_name + str(post)
if dev is not None:
version_string += "." + dev_name + str(dev)
if local is not None:
version_string += "." + str(local)
return version_string |
def convert_batchnorm(node, **kwargs):
"""Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
momentum = float(attrs.get("momentum", 0.9))
eps = float(attrs.get("eps", 0.001))
bn_node = onnx.helper.make_node(
"BatchNormalization",
input_nodes,
[name],
name=name,
epsilon=eps,
momentum=momentum,
# MXNet computes mean and variance per feature for batchnorm
# Default for onnx is across all spatial features. So disabling the parameter.
spatial=0
)
return [bn_node] | Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator
and return the created node. | Below is the the instruction that describes the task:
### Input:
Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator
and return the created node.
### Response:
def convert_batchnorm(node, **kwargs):
"""Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
momentum = float(attrs.get("momentum", 0.9))
eps = float(attrs.get("eps", 0.001))
bn_node = onnx.helper.make_node(
"BatchNormalization",
input_nodes,
[name],
name=name,
epsilon=eps,
momentum=momentum,
# MXNet computes mean and variance per feature for batchnorm
# Default for onnx is across all spatial features. So disabling the parameter.
spatial=0
)
return [bn_node] |
def discover_all_plugins(self):
"""
Load all plugins from dgit extension
"""
for v in pkg_resources.iter_entry_points('dgit.plugins'):
m = v.load()
m.setup(self) | Load all plugins from dgit extension | Below is the the instruction that describes the task:
### Input:
Load all plugins from dgit extension
### Response:
def discover_all_plugins(self):
"""
Load all plugins from dgit extension
"""
for v in pkg_resources.iter_entry_points('dgit.plugins'):
m = v.load()
m.setup(self) |
def align_unaligned_seqs(seqs, moltype=DNA, params=None):
"""Returns an Alignment object from seqs.
seqs: SequenceCollection object, or data that can be used to build one.
moltype: a MolType object. DNA, RNA, or PROTEIN.
params: dict of parameters to pass in to the Muscle app controller.
Result will be an Alignment object.
"""
if not params:
params = {}
#create SequenceCollection object from seqs
seq_collection = SequenceCollection(seqs,MolType=moltype)
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seq_collection.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
#get temporary filename
params.update({'-out':get_tmp_filename()})
#Create Muscle app.
app = Muscle(InputHandler='_input_as_multiline_string',\
params=params, WorkingDir=tempfile.gettempdir())
#Get results using int_map as input to app
res = app(int_map.toFasta())
#Get alignment as dict out of results
alignment = dict(parse_fasta(res['MuscleOut']))
#Make new dict mapping original IDs
new_alignment = {}
for k,v in alignment.items():
new_alignment[int_keys[k]]=v
#Create an Alignment object from alignment dict
new_alignment = Alignment(new_alignment,MolType=moltype)
#Clean up
res.cleanUp()
del(seq_collection,int_map,int_keys,app,res,alignment,params)
return new_alignment | Returns an Alignment object from seqs.
seqs: SequenceCollection object, or data that can be used to build one.
moltype: a MolType object. DNA, RNA, or PROTEIN.
params: dict of parameters to pass in to the Muscle app controller.
Result will be an Alignment object. | Below is the the instruction that describes the task:
### Input:
Returns an Alignment object from seqs.
seqs: SequenceCollection object, or data that can be used to build one.
moltype: a MolType object. DNA, RNA, or PROTEIN.
params: dict of parameters to pass in to the Muscle app controller.
Result will be an Alignment object.
### Response:
def align_unaligned_seqs(seqs, moltype=DNA, params=None):
"""Returns an Alignment object from seqs.
seqs: SequenceCollection object, or data that can be used to build one.
moltype: a MolType object. DNA, RNA, or PROTEIN.
params: dict of parameters to pass in to the Muscle app controller.
Result will be an Alignment object.
"""
if not params:
params = {}
#create SequenceCollection object from seqs
seq_collection = SequenceCollection(seqs,MolType=moltype)
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seq_collection.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
#get temporary filename
params.update({'-out':get_tmp_filename()})
#Create Muscle app.
app = Muscle(InputHandler='_input_as_multiline_string',\
params=params, WorkingDir=tempfile.gettempdir())
#Get results using int_map as input to app
res = app(int_map.toFasta())
#Get alignment as dict out of results
alignment = dict(parse_fasta(res['MuscleOut']))
#Make new dict mapping original IDs
new_alignment = {}
for k,v in alignment.items():
new_alignment[int_keys[k]]=v
#Create an Alignment object from alignment dict
new_alignment = Alignment(new_alignment,MolType=moltype)
#Clean up
res.cleanUp()
del(seq_collection,int_map,int_keys,app,res,alignment,params)
return new_alignment |
def dilated_attention_1d(x,
hparams,
attention_type="masked_dilated_1d",
q_padding="VALID",
kv_padding="VALID",
gap_size=2):
"""Dilated 1d self attention."""
# self-attention
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("masked_dilated_1d"):
y = common_attention.multihead_attention(
x,
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
gap_size=gap_size,
num_memory_blocks=hparams.num_memory_blocks,
name="self_attention")
if is_4d:
y = tf.reshape(y, x_shape)
y.set_shape([None, None, None, hparams.hidden_size])
return y | Dilated 1d self attention. | Below is the the instruction that describes the task:
### Input:
Dilated 1d self attention.
### Response:
def dilated_attention_1d(x,
hparams,
attention_type="masked_dilated_1d",
q_padding="VALID",
kv_padding="VALID",
gap_size=2):
"""Dilated 1d self attention."""
# self-attention
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("masked_dilated_1d"):
y = common_attention.multihead_attention(
x,
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
gap_size=gap_size,
num_memory_blocks=hparams.num_memory_blocks,
name="self_attention")
if is_4d:
y = tf.reshape(y, x_shape)
y.set_shape([None, None, None, hparams.hidden_size])
return y |
def hasDependencyRecursively(self, name, target=None, test_dependencies=False):
''' Check if this module, or any of its dependencies, have a
dependencies with the specified name in their dependencies, or in
their targetDependencies corresponding to the specified target.
Note that if recursive dependencies are not installed, this test
may return a false-negative.
'''
# checking dependencies recursively isn't entirely straightforward, so
# use the existing method to resolve them all before checking:
dependencies = self.getDependenciesRecursive(
target = target,
test = test_dependencies
)
return (name in dependencies) | Check if this module, or any of its dependencies, have a
dependencies with the specified name in their dependencies, or in
their targetDependencies corresponding to the specified target.
Note that if recursive dependencies are not installed, this test
may return a false-negative. | Below is the the instruction that describes the task:
### Input:
Check if this module, or any of its dependencies, have a
dependencies with the specified name in their dependencies, or in
their targetDependencies corresponding to the specified target.
Note that if recursive dependencies are not installed, this test
may return a false-negative.
### Response:
def hasDependencyRecursively(self, name, target=None, test_dependencies=False):
''' Check if this module, or any of its dependencies, have a
dependencies with the specified name in their dependencies, or in
their targetDependencies corresponding to the specified target.
Note that if recursive dependencies are not installed, this test
may return a false-negative.
'''
# checking dependencies recursively isn't entirely straightforward, so
# use the existing method to resolve them all before checking:
dependencies = self.getDependenciesRecursive(
target = target,
test = test_dependencies
)
return (name in dependencies) |
def raw_conf_process_pyramid(raw_conf):
"""
Loads the process pyramid of a raw configuration.
Parameters
----------
raw_conf : dict
Raw mapchete configuration as dictionary.
Returns
-------
BufferedTilePyramid
"""
return BufferedTilePyramid(
raw_conf["pyramid"]["grid"],
metatiling=raw_conf["pyramid"].get("metatiling", 1),
pixelbuffer=raw_conf["pyramid"].get("pixelbuffer", 0)
) | Loads the process pyramid of a raw configuration.
Parameters
----------
raw_conf : dict
Raw mapchete configuration as dictionary.
Returns
-------
BufferedTilePyramid | Below is the the instruction that describes the task:
### Input:
Loads the process pyramid of a raw configuration.
Parameters
----------
raw_conf : dict
Raw mapchete configuration as dictionary.
Returns
-------
BufferedTilePyramid
### Response:
def raw_conf_process_pyramid(raw_conf):
"""
Loads the process pyramid of a raw configuration.
Parameters
----------
raw_conf : dict
Raw mapchete configuration as dictionary.
Returns
-------
BufferedTilePyramid
"""
return BufferedTilePyramid(
raw_conf["pyramid"]["grid"],
metatiling=raw_conf["pyramid"].get("metatiling", 1),
pixelbuffer=raw_conf["pyramid"].get("pixelbuffer", 0)
) |
def tag(collector, image, artifact, **kwargs):
"""Tag an image!"""
if artifact in (None, "", NotSpecified):
raise BadOption("Please specify a tag using the artifact option")
if image.image_index in (None, "", NotSpecified):
raise BadOption("Please specify an image with an image_index option")
tag = image.image_name
if collector.configuration["harpoon"].tag is not NotSpecified:
tag = "{0}:{1}".format(tag, collector.configuration["harpoon"].tag)
else:
tag = "{0}:latest".format(tag)
images = image.harpoon.docker_api.images()
current_tags = chain.from_iterable(image_conf["RepoTags"] for image_conf in images if image_conf["RepoTags"] is not None)
if tag not in current_tags:
raise BadOption("Please build or pull the image down to your local cache before tagging it")
for image_conf in images:
if image_conf["RepoTags"] is not None:
if tag in image_conf["RepoTags"]:
image_id = image_conf["Id"]
break
log.info("Tagging {0} ({1}) as {2}".format(image_id, image.image_name, artifact))
image.harpoon.docker_api.tag(image_id, repository=image.image_name, tag=artifact, force=True)
image.tag = artifact
Syncer().push(image) | Tag an image! | Below is the the instruction that describes the task:
### Input:
Tag an image!
### Response:
def tag(collector, image, artifact, **kwargs):
"""Tag an image!"""
if artifact in (None, "", NotSpecified):
raise BadOption("Please specify a tag using the artifact option")
if image.image_index in (None, "", NotSpecified):
raise BadOption("Please specify an image with an image_index option")
tag = image.image_name
if collector.configuration["harpoon"].tag is not NotSpecified:
tag = "{0}:{1}".format(tag, collector.configuration["harpoon"].tag)
else:
tag = "{0}:latest".format(tag)
images = image.harpoon.docker_api.images()
current_tags = chain.from_iterable(image_conf["RepoTags"] for image_conf in images if image_conf["RepoTags"] is not None)
if tag not in current_tags:
raise BadOption("Please build or pull the image down to your local cache before tagging it")
for image_conf in images:
if image_conf["RepoTags"] is not None:
if tag in image_conf["RepoTags"]:
image_id = image_conf["Id"]
break
log.info("Tagging {0} ({1}) as {2}".format(image_id, image.image_name, artifact))
image.harpoon.docker_api.tag(image_id, repository=image.image_name, tag=artifact, force=True)
image.tag = artifact
Syncer().push(image) |
def get(self, name, default=None):
"""Return the value of the requested parameter or `default` if None."""
value = self.parameters.get(name)
self._processed_parameters.append(name)
if value is None:
return default
return value | Return the value of the requested parameter or `default` if None. | Below is the the instruction that describes the task:
### Input:
Return the value of the requested parameter or `default` if None.
### Response:
def get(self, name, default=None):
"""Return the value of the requested parameter or `default` if None."""
value = self.parameters.get(name)
self._processed_parameters.append(name)
if value is None:
return default
return value |
def _zeropad(sig, N, axis=0):
"""pads with N zeros at the end of the signal, along given axis"""
# ensures concatenation dimension is the first
sig = np.moveaxis(sig, axis, 0)
# zero pad
out = np.zeros((sig.shape[0] + N,) + sig.shape[1:])
out[:sig.shape[0], ...] = sig
# put back axis in place
out = np.moveaxis(out, 0, axis)
return out | pads with N zeros at the end of the signal, along given axis | Below is the the instruction that describes the task:
### Input:
pads with N zeros at the end of the signal, along given axis
### Response:
def _zeropad(sig, N, axis=0):
"""pads with N zeros at the end of the signal, along given axis"""
# ensures concatenation dimension is the first
sig = np.moveaxis(sig, axis, 0)
# zero pad
out = np.zeros((sig.shape[0] + N,) + sig.shape[1:])
out[:sig.shape[0], ...] = sig
# put back axis in place
out = np.moveaxis(out, 0, axis)
return out |
def instantiate(repo, validator_name=None, filename=None, rulesfiles=None):
"""
Instantiate the validation specification
"""
default_validators = repo.options.get('validator', {})
validators = {}
if validator_name is not None:
# Handle the case validator is specified..
if validator_name in default_validators:
validators = {
validator_name : default_validators[validator_name]
}
else:
validators = {
validator_name : {
'files': [],
'rules': {},
'rules-files': []
}
}
else:
validators = default_validators
#=========================================
# Insert the file names
#=========================================
if filename is not None:
matching_files = repo.find_matching_files([filename])
if len(matching_files) == 0:
print("Filename could not be found", filename)
raise Exception("Invalid filename pattern")
for v in validators:
validators[v]['files'] = matching_files
else:
# Instantiate the files from the patterns specified
for v in validators:
if 'files' not in validators[v]:
validators[v]['files'] = []
elif len(validators[v]['files']) > 0:
matching_files = repo.find_matching_files(validators[v]['files'])
validators[v]['files'] = matching_files
#=========================================
# Insert the rules files..
#=========================================
if rulesfiles is not None:
# Command lines...
matching_files = repo.find_matching_files([rulesfiles])
if len(matching_files) == 0:
print("Could not find matching rules files ({}) for {}".format(rulesfiles,v))
raise Exception("Invalid rules")
for v in validators:
validators[v]['rules-files'] = matching_files
else:
# Instantiate the files from the patterns specified
for v in validators:
if 'rules-files' not in validators[v]:
validators[v]['rules-files'] = []
else:
rulesfiles = validators[v]['rules-files']
matching_files = repo.find_matching_files(rulesfiles)
validators[v]['rules-files'] = matching_files
return validators | Instantiate the validation specification | Below is the the instruction that describes the task:
### Input:
Instantiate the validation specification
### Response:
def instantiate(repo, validator_name=None, filename=None, rulesfiles=None):
"""
Instantiate the validation specification
"""
default_validators = repo.options.get('validator', {})
validators = {}
if validator_name is not None:
# Handle the case validator is specified..
if validator_name in default_validators:
validators = {
validator_name : default_validators[validator_name]
}
else:
validators = {
validator_name : {
'files': [],
'rules': {},
'rules-files': []
}
}
else:
validators = default_validators
#=========================================
# Insert the file names
#=========================================
if filename is not None:
matching_files = repo.find_matching_files([filename])
if len(matching_files) == 0:
print("Filename could not be found", filename)
raise Exception("Invalid filename pattern")
for v in validators:
validators[v]['files'] = matching_files
else:
# Instantiate the files from the patterns specified
for v in validators:
if 'files' not in validators[v]:
validators[v]['files'] = []
elif len(validators[v]['files']) > 0:
matching_files = repo.find_matching_files(validators[v]['files'])
validators[v]['files'] = matching_files
#=========================================
# Insert the rules files..
#=========================================
if rulesfiles is not None:
# Command lines...
matching_files = repo.find_matching_files([rulesfiles])
if len(matching_files) == 0:
print("Could not find matching rules files ({}) for {}".format(rulesfiles,v))
raise Exception("Invalid rules")
for v in validators:
validators[v]['rules-files'] = matching_files
else:
# Instantiate the files from the patterns specified
for v in validators:
if 'rules-files' not in validators[v]:
validators[v]['rules-files'] = []
else:
rulesfiles = validators[v]['rules-files']
matching_files = repo.find_matching_files(rulesfiles)
validators[v]['rules-files'] = matching_files
return validators |
def erosion(mapfile, dilated):
"""
We will continue to work with the modified Mapfile
If we wanted to start from scratch we could simply reread it
"""
ll = mappyfile.find(mapfile["layers"], "name", "line")
ll["status"] = "OFF"
pl = mappyfile.find(mapfile["layers"], "name", "polygon")
# make a deep copy of the polygon layer in the Map
# so any modification are made to this layer only
pl2 = deepcopy(pl)
pl2["name"] = "newpolygon"
mapfile["layers"].append(pl2)
dilated = dilated.buffer(-0.3)
pl2["features"][0]["wkt"] = dilated.wkt
style = pl["classes"][0]["styles"][0]
style["color"] = "#999999"
style["outlinecolor"] = "#b2b2b2" | We will continue to work with the modified Mapfile
If we wanted to start from scratch we could simply reread it | Below is the the instruction that describes the task:
### Input:
We will continue to work with the modified Mapfile
If we wanted to start from scratch we could simply reread it
### Response:
def erosion(mapfile, dilated):
"""
We will continue to work with the modified Mapfile
If we wanted to start from scratch we could simply reread it
"""
ll = mappyfile.find(mapfile["layers"], "name", "line")
ll["status"] = "OFF"
pl = mappyfile.find(mapfile["layers"], "name", "polygon")
# make a deep copy of the polygon layer in the Map
# so any modification are made to this layer only
pl2 = deepcopy(pl)
pl2["name"] = "newpolygon"
mapfile["layers"].append(pl2)
dilated = dilated.buffer(-0.3)
pl2["features"][0]["wkt"] = dilated.wkt
style = pl["classes"][0]["styles"][0]
style["color"] = "#999999"
style["outlinecolor"] = "#b2b2b2" |
def set_emission_scenario_setup(self, scenario, config_dict):
"""Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration
"""
self.write(scenario, self._scen_file_name)
# can be lazy in this line as fix backwards key handles errors for us
config_dict["file_emissionscenario"] = self._scen_file_name
config_dict = self._fix_any_backwards_emissions_scen_key_in_config(config_dict)
return config_dict | Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration | Below is the the instruction that describes the task:
### Input:
Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration
### Response:
def set_emission_scenario_setup(self, scenario, config_dict):
"""Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration
"""
self.write(scenario, self._scen_file_name)
# can be lazy in this line as fix backwards key handles errors for us
config_dict["file_emissionscenario"] = self._scen_file_name
config_dict = self._fix_any_backwards_emissions_scen_key_in_config(config_dict)
return config_dict |
def _make_child_iterator(node, with_links, current_depth=0):
"""Returns an iterator over a node's children.
In case of using a trajectory as a run (setting 'v_crun') some sub branches
that do not belong to the run are blinded out.
"""
cdp1 = current_depth + 1
if with_links:
iterator = ((cdp1, x[0], x[1]) for x in node._children.items())
else:
leaves = ((cdp1, x[0], x[1]) for x in node._leaves.items())
groups = ((cdp1, y[0], y[1]) for y in node._groups.items())
iterator = itools.chain(groups, leaves)
return iterator | Returns an iterator over a node's children.
In case of using a trajectory as a run (setting 'v_crun') some sub branches
that do not belong to the run are blinded out. | Below is the the instruction that describes the task:
### Input:
Returns an iterator over a node's children.
In case of using a trajectory as a run (setting 'v_crun') some sub branches
that do not belong to the run are blinded out.
### Response:
def _make_child_iterator(node, with_links, current_depth=0):
"""Returns an iterator over a node's children.
In case of using a trajectory as a run (setting 'v_crun') some sub branches
that do not belong to the run are blinded out.
"""
cdp1 = current_depth + 1
if with_links:
iterator = ((cdp1, x[0], x[1]) for x in node._children.items())
else:
leaves = ((cdp1, x[0], x[1]) for x in node._leaves.items())
groups = ((cdp1, y[0], y[1]) for y in node._groups.items())
iterator = itools.chain(groups, leaves)
return iterator |
def date_to_um_date(date):
"""
Convert a date object to 'year, month, day, hour, minute, second.'
"""
assert date.hour == 0 and date.minute == 0 and date.second == 0
return [date.year, date.month, date.day, 0, 0, 0] | Convert a date object to 'year, month, day, hour, minute, second.' | Below is the the instruction that describes the task:
### Input:
Convert a date object to 'year, month, day, hour, minute, second.'
### Response:
def date_to_um_date(date):
"""
Convert a date object to 'year, month, day, hour, minute, second.'
"""
assert date.hour == 0 and date.minute == 0 and date.second == 0
return [date.year, date.month, date.day, 0, 0, 0] |
def _shift2boolean(self,
q_mesh_shift,
is_gamma_center=False,
tolerance=1e-5):
"""
Tolerance is used to judge zero/half gird shift.
This value is not necessary to be changed usually.
"""
if q_mesh_shift is None:
shift = np.zeros(3, dtype='double')
else:
shift = np.array(q_mesh_shift, dtype='double')
diffby2 = np.abs(shift * 2 - np.rint(shift * 2))
if (diffby2 < 0.01).all(): # zero or half shift
diff = np.abs(shift - np.rint(shift))
if is_gamma_center:
is_shift = list(diff > 0.1)
else: # Monkhorst-pack
is_shift = list(np.logical_xor((diff > 0.1),
(self._mesh % 2 == 0)) * 1)
else:
is_shift = None
return is_shift | Tolerance is used to judge zero/half gird shift.
This value is not necessary to be changed usually. | Below is the the instruction that describes the task:
### Input:
Tolerance is used to judge zero/half gird shift.
This value is not necessary to be changed usually.
### Response:
def _shift2boolean(self,
q_mesh_shift,
is_gamma_center=False,
tolerance=1e-5):
"""
Tolerance is used to judge zero/half gird shift.
This value is not necessary to be changed usually.
"""
if q_mesh_shift is None:
shift = np.zeros(3, dtype='double')
else:
shift = np.array(q_mesh_shift, dtype='double')
diffby2 = np.abs(shift * 2 - np.rint(shift * 2))
if (diffby2 < 0.01).all(): # zero or half shift
diff = np.abs(shift - np.rint(shift))
if is_gamma_center:
is_shift = list(diff > 0.1)
else: # Monkhorst-pack
is_shift = list(np.logical_xor((diff > 0.1),
(self._mesh % 2 == 0)) * 1)
else:
is_shift = None
return is_shift |
def next(self):
"""Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
"""
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
# If there are no more files, StopIteration is raised here
self._cur_handle = super(GCSRecordInputReader, self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
self._slice_ctx.incr(self.COUNTER_IO_READ_BYTE, len(content))
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None | Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted. | Below is the the instruction that describes the task:
### Input:
Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
### Response:
def next(self):
"""Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
"""
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
# If there are no more files, StopIteration is raised here
self._cur_handle = super(GCSRecordInputReader, self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
self._slice_ctx.incr(self.COUNTER_IO_READ_BYTE, len(content))
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None |
def rpoplpush(self, src, dst):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
with self.pipe as pipe:
f = Future()
res = pipe.rpoplpush(self.redis_key(src), self.redis_key(dst))
def cb():
f.set(self.valueparse.decode(res.result))
pipe.on_execute(cb)
return f | RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value. | Below is the the instruction that describes the task:
### Input:
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
### Response:
def rpoplpush(self, src, dst):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
with self.pipe as pipe:
f = Future()
res = pipe.rpoplpush(self.redis_key(src), self.redis_key(dst))
def cb():
f.set(self.valueparse.decode(res.result))
pipe.on_execute(cb)
return f |
def load_srm(filename):
"""Load a Project from an ``.srm`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project`
"""
# .srm files are just decompressed projects without headers
# In order to determine the file's size in compressed blocks, we have to
# compress it first
with open(filename, 'rb') as fp:
raw_data = fp.read()
compressed_data = filepack.compress(raw_data)
factory = BlockFactory()
writer = BlockWriter()
writer.write(compressed_data, factory)
size_in_blocks = len(factory.blocks)
# We'll give the file a dummy name ("SRMLOAD") and version, since we know
# neither
name = "SRMLOAD"
version = 0
return Project(name, version, size_in_blocks, raw_data) | Load a Project from an ``.srm`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project` | Below is the the instruction that describes the task:
### Input:
Load a Project from an ``.srm`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project`
### Response:
def load_srm(filename):
"""Load a Project from an ``.srm`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project`
"""
# .srm files are just decompressed projects without headers
# In order to determine the file's size in compressed blocks, we have to
# compress it first
with open(filename, 'rb') as fp:
raw_data = fp.read()
compressed_data = filepack.compress(raw_data)
factory = BlockFactory()
writer = BlockWriter()
writer.write(compressed_data, factory)
size_in_blocks = len(factory.blocks)
# We'll give the file a dummy name ("SRMLOAD") and version, since we know
# neither
name = "SRMLOAD"
version = 0
return Project(name, version, size_in_blocks, raw_data) |
def equiv(self, other):
"""Return True if other is an equivalent weighting.
Returns
-------
equivalent : bool
``True`` if ``other`` is a `Weighting` instance with the same
`Weighting.impl`, which yields the same result as this
weighting for any input, ``False`` otherwise. This is checked
by entry-wise comparison of arrays/constants.
"""
# Optimization for equality
if self == other:
return True
elif (not isinstance(other, Weighting) or
self.exponent != other.exponent):
return False
elif isinstance(other, MatrixWeighting):
return other.equiv(self)
elif isinstance(other, ConstWeighting):
return np.array_equiv(self.array, other.const)
else:
return np.array_equal(self.array, other.array) | Return True if other is an equivalent weighting.
Returns
-------
equivalent : bool
``True`` if ``other`` is a `Weighting` instance with the same
`Weighting.impl`, which yields the same result as this
weighting for any input, ``False`` otherwise. This is checked
by entry-wise comparison of arrays/constants. | Below is the the instruction that describes the task:
### Input:
Return True if other is an equivalent weighting.
Returns
-------
equivalent : bool
``True`` if ``other`` is a `Weighting` instance with the same
`Weighting.impl`, which yields the same result as this
weighting for any input, ``False`` otherwise. This is checked
by entry-wise comparison of arrays/constants.
### Response:
def equiv(self, other):
"""Return True if other is an equivalent weighting.
Returns
-------
equivalent : bool
``True`` if ``other`` is a `Weighting` instance with the same
`Weighting.impl`, which yields the same result as this
weighting for any input, ``False`` otherwise. This is checked
by entry-wise comparison of arrays/constants.
"""
# Optimization for equality
if self == other:
return True
elif (not isinstance(other, Weighting) or
self.exponent != other.exponent):
return False
elif isinstance(other, MatrixWeighting):
return other.equiv(self)
elif isinstance(other, ConstWeighting):
return np.array_equiv(self.array, other.const)
else:
return np.array_equal(self.array, other.array) |
def __value_compare(self, target):
"""
Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean
"""
if self.expectation == "__ANY__":
return True
elif self.expectation == "__DEFINED__":
return True if target is not None else False
elif self.expectation == "__TYPE__":
return True if type(target) == self.target_type else False #pylint:disable=unidiomatic-typecheck
elif self.expectation == "__INSTANCE__":
return True if isinstance(target, self.target_type.__class__) else False
else:
return True if target == self.expectation else False | Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean | Below is the the instruction that describes the task:
### Input:
Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean
### Response:
def __value_compare(self, target):
"""
Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean
"""
if self.expectation == "__ANY__":
return True
elif self.expectation == "__DEFINED__":
return True if target is not None else False
elif self.expectation == "__TYPE__":
return True if type(target) == self.target_type else False #pylint:disable=unidiomatic-typecheck
elif self.expectation == "__INSTANCE__":
return True if isinstance(target, self.target_type.__class__) else False
else:
return True if target == self.expectation else False |
def is_active(self):
"""Determines whether this plugin is active.
This plugin is active if any health pills information is present for any
run.
Returns:
A boolean. Whether this plugin is active.
"""
return bool(
self._grpc_port is not None and
self._event_multiplexer and
self._event_multiplexer.PluginRunToTagToContent(
constants.DEBUGGER_PLUGIN_NAME)) | Determines whether this plugin is active.
This plugin is active if any health pills information is present for any
run.
Returns:
A boolean. Whether this plugin is active. | Below is the the instruction that describes the task:
### Input:
Determines whether this plugin is active.
This plugin is active if any health pills information is present for any
run.
Returns:
A boolean. Whether this plugin is active.
### Response:
def is_active(self):
"""Determines whether this plugin is active.
This plugin is active if any health pills information is present for any
run.
Returns:
A boolean. Whether this plugin is active.
"""
return bool(
self._grpc_port is not None and
self._event_multiplexer and
self._event_multiplexer.PluginRunToTagToContent(
constants.DEBUGGER_PLUGIN_NAME)) |
def writexlsx(self, path, sheetname="default"):
"""
Writes this table to an .xlsx file at the specified path.
If you'd like to specify a sheetname, you may do so.
If you'd like to write one workbook with different DataTables
for each sheet, import the `excel` function from acrylic. You
can see that code in `utils.py`.
Note that the outgoing file is an .xlsx file, so it'd make sense to
name that way.
"""
writer = ExcelRW.UnicodeWriter(path)
writer.set_active_sheet(sheetname)
writer.writerow(self.fields)
writer.writerows(self)
writer.save() | Writes this table to an .xlsx file at the specified path.
If you'd like to specify a sheetname, you may do so.
If you'd like to write one workbook with different DataTables
for each sheet, import the `excel` function from acrylic. You
can see that code in `utils.py`.
Note that the outgoing file is an .xlsx file, so it'd make sense to
name that way. | Below is the the instruction that describes the task:
### Input:
Writes this table to an .xlsx file at the specified path.
If you'd like to specify a sheetname, you may do so.
If you'd like to write one workbook with different DataTables
for each sheet, import the `excel` function from acrylic. You
can see that code in `utils.py`.
Note that the outgoing file is an .xlsx file, so it'd make sense to
name that way.
### Response:
def writexlsx(self, path, sheetname="default"):
"""
Writes this table to an .xlsx file at the specified path.
If you'd like to specify a sheetname, you may do so.
If you'd like to write one workbook with different DataTables
for each sheet, import the `excel` function from acrylic. You
can see that code in `utils.py`.
Note that the outgoing file is an .xlsx file, so it'd make sense to
name that way.
"""
writer = ExcelRW.UnicodeWriter(path)
writer.set_active_sheet(sheetname)
writer.writerow(self.fields)
writer.writerows(self)
writer.save() |
def process_bind_param(self, value, dialect):
"""
Returns the integer value of the usage mask bitmask. This value is
stored in the database.
Args:
value(list<enums.CryptographicUsageMask>): list of enums in the
usage mask
dialect(string): SQL dialect
"""
bitmask = 0x00
for e in value:
bitmask = bitmask | e.value
return bitmask | Returns the integer value of the usage mask bitmask. This value is
stored in the database.
Args:
value(list<enums.CryptographicUsageMask>): list of enums in the
usage mask
dialect(string): SQL dialect | Below is the the instruction that describes the task:
### Input:
Returns the integer value of the usage mask bitmask. This value is
stored in the database.
Args:
value(list<enums.CryptographicUsageMask>): list of enums in the
usage mask
dialect(string): SQL dialect
### Response:
def process_bind_param(self, value, dialect):
"""
Returns the integer value of the usage mask bitmask. This value is
stored in the database.
Args:
value(list<enums.CryptographicUsageMask>): list of enums in the
usage mask
dialect(string): SQL dialect
"""
bitmask = 0x00
for e in value:
bitmask = bitmask | e.value
return bitmask |
def list_packages(conn=None):
'''
List files for an installed package
'''
close = False
if conn is None:
close = True
conn = init()
ret = []
data = conn.execute('SELECT package FROM packages')
for pkg in data.fetchall():
ret.append(pkg)
if close:
conn.close()
return ret | List files for an installed package | Below is the the instruction that describes the task:
### Input:
List files for an installed package
### Response:
def list_packages(conn=None):
'''
List files for an installed package
'''
close = False
if conn is None:
close = True
conn = init()
ret = []
data = conn.execute('SELECT package FROM packages')
for pkg in data.fetchall():
ret.append(pkg)
if close:
conn.close()
return ret |
def _ReadPartial(self, length):
"""Read as much as possible, but not more than length."""
chunk = self.offset // self.chunksize
chunk_offset = self.offset % self.chunksize
# If we're past the end of the file, we don't have a chunk to read from, so
# we can't read anymore. We return the empty string here so we can read off
# the end of a file without raising, and get as much data as is there.
if chunk > self.last_chunk:
return ""
available_to_read = min(length, self.chunksize - chunk_offset)
fd = self._GetChunkForReading(chunk)
fd.seek(chunk_offset)
result = fd.read(available_to_read)
self.offset += len(result)
return result | Read as much as possible, but not more than length. | Below is the the instruction that describes the task:
### Input:
Read as much as possible, but not more than length.
### Response:
def _ReadPartial(self, length):
"""Read as much as possible, but not more than length."""
chunk = self.offset // self.chunksize
chunk_offset = self.offset % self.chunksize
# If we're past the end of the file, we don't have a chunk to read from, so
# we can't read anymore. We return the empty string here so we can read off
# the end of a file without raising, and get as much data as is there.
if chunk > self.last_chunk:
return ""
available_to_read = min(length, self.chunksize - chunk_offset)
fd = self._GetChunkForReading(chunk)
fd.seek(chunk_offset)
result = fd.read(available_to_read)
self.offset += len(result)
return result |
def store_atomic(self, value, ptr, ordering, align):
"""
Store value to pointer, with optional guaranteed alignment:
*ptr = name
"""
if not isinstance(ptr.type, types.PointerType):
raise TypeError("cannot store to value of type %s (%r): not a pointer"
% (ptr.type, str(ptr)))
if ptr.type.pointee != value.type:
raise TypeError("cannot store %s to %s: mismatching types"
% (value.type, ptr.type))
st = instructions.StoreAtomicInstr(self.block, value, ptr, ordering, align)
self._insert(st)
return st | Store value to pointer, with optional guaranteed alignment:
*ptr = name | Below is the the instruction that describes the task:
### Input:
Store value to pointer, with optional guaranteed alignment:
*ptr = name
### Response:
def store_atomic(self, value, ptr, ordering, align):
"""
Store value to pointer, with optional guaranteed alignment:
*ptr = name
"""
if not isinstance(ptr.type, types.PointerType):
raise TypeError("cannot store to value of type %s (%r): not a pointer"
% (ptr.type, str(ptr)))
if ptr.type.pointee != value.type:
raise TypeError("cannot store %s to %s: mismatching types"
% (value.type, ptr.type))
st = instructions.StoreAtomicInstr(self.block, value, ptr, ordering, align)
self._insert(st)
return st |
async def verify_credentials(self):
"""Verify credentials with device."""
_, public_key = self.srp.initialize()
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x01',
tlv8.TLV_PUBLIC_KEY: public_key})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
resp = _get_pairing_data(resp)
session_pub_key = resp[tlv8.TLV_PUBLIC_KEY]
encrypted = resp[tlv8.TLV_ENCRYPTED_DATA]
log_binary(_LOGGER,
'Device',
Public=self.credentials.ltpk,
Encrypted=encrypted)
encrypted_data = self.srp.verify1(
self.credentials, session_pub_key, encrypted)
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x03',
tlv8.TLV_ENCRYPTED_DATA: encrypted_data})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
# TODO: check status code
self._output_key, self._input_key = self.srp.verify2() | Verify credentials with device. | Below is the the instruction that describes the task:
### Input:
Verify credentials with device.
### Response:
async def verify_credentials(self):
"""Verify credentials with device."""
_, public_key = self.srp.initialize()
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x01',
tlv8.TLV_PUBLIC_KEY: public_key})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
resp = _get_pairing_data(resp)
session_pub_key = resp[tlv8.TLV_PUBLIC_KEY]
encrypted = resp[tlv8.TLV_ENCRYPTED_DATA]
log_binary(_LOGGER,
'Device',
Public=self.credentials.ltpk,
Encrypted=encrypted)
encrypted_data = self.srp.verify1(
self.credentials, session_pub_key, encrypted)
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x03',
tlv8.TLV_ENCRYPTED_DATA: encrypted_data})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
# TODO: check status code
self._output_key, self._input_key = self.srp.verify2() |
def normalize_range(e, n):
"""
Return the range tuple normalized for an ``n``-element object.
The semantics of a range is slightly different than that of a slice.
In particular, a range is similar to a list in meaning (and on Py2 it was
eagerly expanded into a list). Thus we do not allow the range to generate
indices that would be invalid for an ``n``-array. Furthermore, we restrict
the range to produce only positive or only negative indices. For example,
``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing
to treat the last "-1" as the last element in the list.
:param e: a range object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``, or None
if the range is invalid.
"""
if e.step > 0:
count = max(0, (e.stop - e.start - 1) // e.step + 1)
else:
count = max(0, (e.start - e.stop - 1) // -e.step + 1)
if count == 0:
return (0, 0, e.step)
start = e.start
finish = e.start + (count - 1) * e.step
if start >= 0:
if start >= n or finish < 0 or finish >= n:
return None
else:
start += n
finish += n
if start < 0 or start >= n or finish < 0 or finish >= n:
return None
assert count >= 0
return (start, count, e.step) | Return the range tuple normalized for an ``n``-element object.
The semantics of a range is slightly different than that of a slice.
In particular, a range is similar to a list in meaning (and on Py2 it was
eagerly expanded into a list). Thus we do not allow the range to generate
indices that would be invalid for an ``n``-array. Furthermore, we restrict
the range to produce only positive or only negative indices. For example,
``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing
to treat the last "-1" as the last element in the list.
:param e: a range object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``, or None
if the range is invalid. | Below is the the instruction that describes the task:
### Input:
Return the range tuple normalized for an ``n``-element object.
The semantics of a range is slightly different than that of a slice.
In particular, a range is similar to a list in meaning (and on Py2 it was
eagerly expanded into a list). Thus we do not allow the range to generate
indices that would be invalid for an ``n``-array. Furthermore, we restrict
the range to produce only positive or only negative indices. For example,
``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing
to treat the last "-1" as the last element in the list.
:param e: a range object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``, or None
if the range is invalid.
### Response:
def normalize_range(e, n):
"""
Return the range tuple normalized for an ``n``-element object.
The semantics of a range is slightly different than that of a slice.
In particular, a range is similar to a list in meaning (and on Py2 it was
eagerly expanded into a list). Thus we do not allow the range to generate
indices that would be invalid for an ``n``-array. Furthermore, we restrict
the range to produce only positive or only negative indices. For example,
``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing
to treat the last "-1" as the last element in the list.
:param e: a range object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``, or None
if the range is invalid.
"""
if e.step > 0:
count = max(0, (e.stop - e.start - 1) // e.step + 1)
else:
count = max(0, (e.start - e.stop - 1) // -e.step + 1)
if count == 0:
return (0, 0, e.step)
start = e.start
finish = e.start + (count - 1) * e.step
if start >= 0:
if start >= n or finish < 0 or finish >= n:
return None
else:
start += n
finish += n
if start < 0 or start >= n or finish < 0 or finish >= n:
return None
assert count >= 0
return (start, count, e.step) |
def _control_longitude(self):
''' Control on longitude values '''
if self.lonm < 0.0:
self.lonm = 360.0 + self.lonm
if self.lonM < 0.0:
self.lonM = 360.0 + self.lonM
if self.lonm > 360.0:
self.lonm = self.lonm - 360.0
if self.lonM > 360.0:
self.lonM = self.lonM - 360.0 | Control on longitude values | Below is the the instruction that describes the task:
### Input:
Control on longitude values
### Response:
def _control_longitude(self):
''' Control on longitude values '''
if self.lonm < 0.0:
self.lonm = 360.0 + self.lonm
if self.lonM < 0.0:
self.lonM = 360.0 + self.lonM
if self.lonm > 360.0:
self.lonm = self.lonm - 360.0
if self.lonM > 360.0:
self.lonM = self.lonM - 360.0 |
def cylinder(radius=1.0,
height=1.0,
sections=32,
segment=None,
transform=None,
**kwargs):
"""
Create a mesh of a cylinder along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float
The height of the cylinder
sections : int
How many pie wedges should the cylinder have
segment : (2, 3) float
Endpoints of axis, overrides transform and height
transform : (4, 4) float
Transform to apply
**kwargs:
passed to Trimesh to create cylinder
Returns
----------
cylinder: trimesh.Trimesh
Resulting mesh of a cylinder
"""
if segment is not None:
segment = np.asanyarray(segment, dtype=np.float64)
if segment.shape != (2, 3):
raise ValueError('segment must be 2 3D points!')
vector = segment[1] - segment[0]
# override height with segment length
height = np.linalg.norm(vector)
# point in middle of line
midpoint = segment[0] + (vector * 0.5)
# align Z with our desired direction
rotation = align_vectors([0, 0, 1], vector)
# translate to midpoint of segment
translation = transformations.translation_matrix(midpoint)
# compound the rotation and translation
transform = np.dot(translation, rotation)
# create a 2D pie out of wedges
theta = np.linspace(0, np.pi * 2, sections)
vertices = np.column_stack((np.sin(theta),
np.cos(theta))) * radius
# the single vertex at the center of the circle
# we're overwriting the duplicated start/end vertex
vertices[0] = [0, 0]
# whangle indexes into a triangulation of the pie wedges
index = np.arange(1, len(vertices) + 1).reshape((-1, 1))
index[-1] = 1
faces = np.tile(index, (1, 2)).reshape(-1)[1:-1].reshape((-1, 2))
faces = np.column_stack((np.zeros(len(faces), dtype=np.int), faces))
# extrude the 2D triangulation into a Trimesh object
cylinder = extrude_triangulation(vertices=vertices,
faces=faces,
height=height,
**kwargs)
# the extrusion was along +Z, so move the cylinder
# center of mass back to the origin
cylinder.vertices[:, 2] -= height * .5
if transform is not None:
# apply a transform here before any cache stuff is generated
# and would have to be dumped after the transform is applied
cylinder.apply_transform(transform)
return cylinder | Create a mesh of a cylinder along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float
The height of the cylinder
sections : int
How many pie wedges should the cylinder have
segment : (2, 3) float
Endpoints of axis, overrides transform and height
transform : (4, 4) float
Transform to apply
**kwargs:
passed to Trimesh to create cylinder
Returns
----------
cylinder: trimesh.Trimesh
Resulting mesh of a cylinder | Below is the the instruction that describes the task:
### Input:
Create a mesh of a cylinder along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float
The height of the cylinder
sections : int
How many pie wedges should the cylinder have
segment : (2, 3) float
Endpoints of axis, overrides transform and height
transform : (4, 4) float
Transform to apply
**kwargs:
passed to Trimesh to create cylinder
Returns
----------
cylinder: trimesh.Trimesh
Resulting mesh of a cylinder
### Response:
def cylinder(radius=1.0,
height=1.0,
sections=32,
segment=None,
transform=None,
**kwargs):
"""
Create a mesh of a cylinder along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float
The height of the cylinder
sections : int
How many pie wedges should the cylinder have
segment : (2, 3) float
Endpoints of axis, overrides transform and height
transform : (4, 4) float
Transform to apply
**kwargs:
passed to Trimesh to create cylinder
Returns
----------
cylinder: trimesh.Trimesh
Resulting mesh of a cylinder
"""
if segment is not None:
segment = np.asanyarray(segment, dtype=np.float64)
if segment.shape != (2, 3):
raise ValueError('segment must be 2 3D points!')
vector = segment[1] - segment[0]
# override height with segment length
height = np.linalg.norm(vector)
# point in middle of line
midpoint = segment[0] + (vector * 0.5)
# align Z with our desired direction
rotation = align_vectors([0, 0, 1], vector)
# translate to midpoint of segment
translation = transformations.translation_matrix(midpoint)
# compound the rotation and translation
transform = np.dot(translation, rotation)
# create a 2D pie out of wedges
theta = np.linspace(0, np.pi * 2, sections)
vertices = np.column_stack((np.sin(theta),
np.cos(theta))) * radius
# the single vertex at the center of the circle
# we're overwriting the duplicated start/end vertex
vertices[0] = [0, 0]
# whangle indexes into a triangulation of the pie wedges
index = np.arange(1, len(vertices) + 1).reshape((-1, 1))
index[-1] = 1
faces = np.tile(index, (1, 2)).reshape(-1)[1:-1].reshape((-1, 2))
faces = np.column_stack((np.zeros(len(faces), dtype=np.int), faces))
# extrude the 2D triangulation into a Trimesh object
cylinder = extrude_triangulation(vertices=vertices,
faces=faces,
height=height,
**kwargs)
# the extrusion was along +Z, so move the cylinder
# center of mass back to the origin
cylinder.vertices[:, 2] -= height * .5
if transform is not None:
# apply a transform here before any cache stuff is generated
# and would have to be dumped after the transform is applied
cylinder.apply_transform(transform)
return cylinder |
def map_element(self, obj, name, event):
""" Handles mapping elements to diagram components """
canvas = self.diagram.diagram_canvas
parser = XDotParser()
for element in event.added:
logger.debug("Mapping new element [%s] to diagram node" % element)
for node_mapping in self.nodes:
ct = name[:-6] #strip '_items'
if node_mapping.containment_trait == ct:
dot_attrs = node_mapping.dot_node
dot = Dot()
graph_node = Node(str(id(element)))
self._style_node(graph_node, dot_attrs)
dot.add_node(graph_node)
xdot = graph_from_dot_data(dot.create(self.program,"xdot"))
diagram_nodes = parser.parse_nodes(xdot)#.get_node_list())
for dn in diagram_nodes:
if dn is not None:
dn.element = element
# Tools
for tool in node_mapping.tools:
dn.tools.append(tool(dn))
canvas.add(dn)
canvas.request_redraw()
for element in event.removed:
logger.debug("Unmapping element [%s] from diagram" % element)
for component in canvas.components:
if element == component.element:
canvas.remove(component)
canvas.request_redraw()
break | Handles mapping elements to diagram components | Below is the the instruction that describes the task:
### Input:
Handles mapping elements to diagram components
### Response:
def map_element(self, obj, name, event):
""" Handles mapping elements to diagram components """
canvas = self.diagram.diagram_canvas
parser = XDotParser()
for element in event.added:
logger.debug("Mapping new element [%s] to diagram node" % element)
for node_mapping in self.nodes:
ct = name[:-6] #strip '_items'
if node_mapping.containment_trait == ct:
dot_attrs = node_mapping.dot_node
dot = Dot()
graph_node = Node(str(id(element)))
self._style_node(graph_node, dot_attrs)
dot.add_node(graph_node)
xdot = graph_from_dot_data(dot.create(self.program,"xdot"))
diagram_nodes = parser.parse_nodes(xdot)#.get_node_list())
for dn in diagram_nodes:
if dn is not None:
dn.element = element
# Tools
for tool in node_mapping.tools:
dn.tools.append(tool(dn))
canvas.add(dn)
canvas.request_redraw()
for element in event.removed:
logger.debug("Unmapping element [%s] from diagram" % element)
for component in canvas.components:
if element == component.element:
canvas.remove(component)
canvas.request_redraw()
break |
def _process_glsl_template(template, colors):
"""Replace $color_i by color #i in the GLSL template."""
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = 'vec4(%.3f, %.3f, %.3f, %.3f)' % tuple(color)
template = template.replace('$color_%d' % i, vec4_color)
return template | Replace $color_i by color #i in the GLSL template. | Below is the the instruction that describes the task:
### Input:
Replace $color_i by color #i in the GLSL template.
### Response:
def _process_glsl_template(template, colors):
"""Replace $color_i by color #i in the GLSL template."""
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = 'vec4(%.3f, %.3f, %.3f, %.3f)' % tuple(color)
template = template.replace('$color_%d' % i, vec4_color)
return template |
def _bumpUpWeakColumns(self):
"""
This method increases the permanence values of synapses of columns whose
activity level has been too low. Such columns are identified by having an
overlap duty cycle that drops too much below those of their peers. The
permanence values for such columns are increased.
"""
weakColumns = numpy.where(self._overlapDutyCycles
< self._minOverlapDutyCycles)[0]
for columnIndex in weakColumns:
perm = self._permanences[columnIndex].astype(realDType)
maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0]
perm[maskPotential] += self._synPermBelowStimulusInc
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=False) | This method increases the permanence values of synapses of columns whose
activity level has been too low. Such columns are identified by having an
overlap duty cycle that drops too much below those of their peers. The
permanence values for such columns are increased. | Below is the the instruction that describes the task:
### Input:
This method increases the permanence values of synapses of columns whose
activity level has been too low. Such columns are identified by having an
overlap duty cycle that drops too much below those of their peers. The
permanence values for such columns are increased.
### Response:
def _bumpUpWeakColumns(self):
"""
This method increases the permanence values of synapses of columns whose
activity level has been too low. Such columns are identified by having an
overlap duty cycle that drops too much below those of their peers. The
permanence values for such columns are increased.
"""
weakColumns = numpy.where(self._overlapDutyCycles
< self._minOverlapDutyCycles)[0]
for columnIndex in weakColumns:
perm = self._permanences[columnIndex].astype(realDType)
maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0]
perm[maskPotential] += self._synPermBelowStimulusInc
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=False) |
def set_state(key, value, namespace=None, table_name=None, environment=None,
layer=None, stage=None, shard_id=None, consistent=True,
serializer=json.dumps, wait_exponential_multiplier=500,
wait_exponential_max=5000, stop_max_delay=10000, ttl=None):
"""Set Lambda state value."""
if table_name is None:
table_name = _state_table_name(environment=environment, layer=layer,
stage=stage)
if not table_name:
msg = ("Can't produce state table name: unable to set state "
"item '{}'".format(key))
logger.error(msg)
raise StateTableError(msg)
return
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(table_name)
logger.info("Putting {} -> {} in DynamoDB table {}".format(key, value,
table_name))
if serializer:
try:
value = serializer(value)
except TypeError:
logger.error(
"Value for state key '{}' is not json-serializable".format(
key))
raise
if namespace:
key = "{}:{}".format(namespace, key)
if shard_id:
key = "{}:{}".format(shard_id, key)
item = {"id": key, "value": value}
if ttl:
item["ttl"] = {"N": str(int(time.time() + ttl))}
@retry(retry_on_exception=_is_critical_exception,
wait_exponential_multiplier=500,
wait_exponential_max=5000,
stop_max_delay=10000)
def put_item():
try:
return table.put_item(Item=item)
except Exception as err:
if _is_dynamodb_critical_exception(err):
raise CriticalError(err)
else:
raise
resp = put_item()
logger.info("Response from DynamoDB: '{}'".format(resp))
return resp | Set Lambda state value. | Below is the the instruction that describes the task:
### Input:
Set Lambda state value.
### Response:
def set_state(key, value, namespace=None, table_name=None, environment=None,
layer=None, stage=None, shard_id=None, consistent=True,
serializer=json.dumps, wait_exponential_multiplier=500,
wait_exponential_max=5000, stop_max_delay=10000, ttl=None):
"""Set Lambda state value."""
if table_name is None:
table_name = _state_table_name(environment=environment, layer=layer,
stage=stage)
if not table_name:
msg = ("Can't produce state table name: unable to set state "
"item '{}'".format(key))
logger.error(msg)
raise StateTableError(msg)
return
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(table_name)
logger.info("Putting {} -> {} in DynamoDB table {}".format(key, value,
table_name))
if serializer:
try:
value = serializer(value)
except TypeError:
logger.error(
"Value for state key '{}' is not json-serializable".format(
key))
raise
if namespace:
key = "{}:{}".format(namespace, key)
if shard_id:
key = "{}:{}".format(shard_id, key)
item = {"id": key, "value": value}
if ttl:
item["ttl"] = {"N": str(int(time.time() + ttl))}
@retry(retry_on_exception=_is_critical_exception,
wait_exponential_multiplier=500,
wait_exponential_max=5000,
stop_max_delay=10000)
def put_item():
try:
return table.put_item(Item=item)
except Exception as err:
if _is_dynamodb_critical_exception(err):
raise CriticalError(err)
else:
raise
resp = put_item()
logger.info("Response from DynamoDB: '{}'".format(resp))
return resp |
def add_instance(self, inst, index=None):
"""
Adds the specified instance to the dataset.
:param inst: the Instance to add
:type inst: Instance
:param index: the 0-based index where to add the Instance
:type index: int
"""
if index is None:
self.__append_instance(inst.jobject)
else:
self.__insert_instance(index, inst.jobject) | Adds the specified instance to the dataset.
:param inst: the Instance to add
:type inst: Instance
:param index: the 0-based index where to add the Instance
:type index: int | Below is the the instruction that describes the task:
### Input:
Adds the specified instance to the dataset.
:param inst: the Instance to add
:type inst: Instance
:param index: the 0-based index where to add the Instance
:type index: int
### Response:
def add_instance(self, inst, index=None):
"""
Adds the specified instance to the dataset.
:param inst: the Instance to add
:type inst: Instance
:param index: the 0-based index where to add the Instance
:type index: int
"""
if index is None:
self.__append_instance(inst.jobject)
else:
self.__insert_instance(index, inst.jobject) |
def add_mixl_specific_results_to_estimation_res(estimator, results_dict):
"""
Stores particular items in the results dictionary that are unique to mixed
logit-type models. In particular, this function calculates and adds
`sequence_probs` and `expanded_sequence_probs` to the results dictionary.
The `constrained_pos` object is also stored to the results_dict.
Parameters
----------
estimator : an instance of the MixedEstimator class.
Should contain a `choice_vector` attribute that is a 1D ndarray
representing the choices made for this model's dataset. Should also
contain a `rows_to_mixers` attribute that maps each row of the long
format data to a unit of observation that the mixing is being performed
over.
results_dict : dict.
This dictionary should be the dictionary returned from
scipy.optimize.minimize. In particular, it should have the following
`long_probs` key.
Returns
-------
results_dict.
"""
# Get the probability of each sequence of choices, given the draws
prob_res = mlc.calc_choice_sequence_probs(results_dict["long_probs"],
estimator.choice_vector,
estimator.rows_to_mixers,
return_type='all')
# Add the various items to the results_dict.
results_dict["simulated_sequence_probs"] = prob_res[0]
results_dict["expanded_sequence_probs"] = prob_res[1]
return results_dict | Stores particular items in the results dictionary that are unique to mixed
logit-type models. In particular, this function calculates and adds
`sequence_probs` and `expanded_sequence_probs` to the results dictionary.
The `constrained_pos` object is also stored to the results_dict.
Parameters
----------
estimator : an instance of the MixedEstimator class.
Should contain a `choice_vector` attribute that is a 1D ndarray
representing the choices made for this model's dataset. Should also
contain a `rows_to_mixers` attribute that maps each row of the long
format data to a unit of observation that the mixing is being performed
over.
results_dict : dict.
This dictionary should be the dictionary returned from
scipy.optimize.minimize. In particular, it should have the following
`long_probs` key.
Returns
-------
results_dict. | Below is the the instruction that describes the task:
### Input:
Stores particular items in the results dictionary that are unique to mixed
logit-type models. In particular, this function calculates and adds
`sequence_probs` and `expanded_sequence_probs` to the results dictionary.
The `constrained_pos` object is also stored to the results_dict.
Parameters
----------
estimator : an instance of the MixedEstimator class.
Should contain a `choice_vector` attribute that is a 1D ndarray
representing the choices made for this model's dataset. Should also
contain a `rows_to_mixers` attribute that maps each row of the long
format data to a unit of observation that the mixing is being performed
over.
results_dict : dict.
This dictionary should be the dictionary returned from
scipy.optimize.minimize. In particular, it should have the following
`long_probs` key.
Returns
-------
results_dict.
### Response:
def add_mixl_specific_results_to_estimation_res(estimator, results_dict):
"""
Stores particular items in the results dictionary that are unique to mixed
logit-type models. In particular, this function calculates and adds
`sequence_probs` and `expanded_sequence_probs` to the results dictionary.
The `constrained_pos` object is also stored to the results_dict.
Parameters
----------
estimator : an instance of the MixedEstimator class.
Should contain a `choice_vector` attribute that is a 1D ndarray
representing the choices made for this model's dataset. Should also
contain a `rows_to_mixers` attribute that maps each row of the long
format data to a unit of observation that the mixing is being performed
over.
results_dict : dict.
This dictionary should be the dictionary returned from
scipy.optimize.minimize. In particular, it should have the following
`long_probs` key.
Returns
-------
results_dict.
"""
# Get the probability of each sequence of choices, given the draws
prob_res = mlc.calc_choice_sequence_probs(results_dict["long_probs"],
estimator.choice_vector,
estimator.rows_to_mixers,
return_type='all')
# Add the various items to the results_dict.
results_dict["simulated_sequence_probs"] = prob_res[0]
results_dict["expanded_sequence_probs"] = prob_res[1]
return results_dict |
def measure_all(fbasename=None, log=None, ml_version=ml_version):
"""Measures mesh geometry, aabb and topology."""
ml_script1_file = 'TEMP3D_measure_gAndT.mlx'
if ml_version == '1.3.4BETA':
file_out = 'TEMP3D_aabb.xyz'
else:
file_out = None
ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version)
compute.measure_geometry(ml_script1)
compute.measure_topology(ml_script1)
ml_script1.save_to_file(ml_script1_file)
ml_script1.run_script(log=log, script_file=ml_script1_file)
geometry = ml_script1.geometry
topology = ml_script1.topology
if ml_version == '1.3.4BETA':
if log is not None:
log_file = open(log, 'a')
log_file.write(
'***Axis Aligned Bounding Results for file "%s":\n' %
fbasename)
log_file.close()
aabb = measure_aabb(file_out, log)
else:
aabb = geometry['aabb']
return aabb, geometry, topology | Measures mesh geometry, aabb and topology. | Below is the the instruction that describes the task:
### Input:
Measures mesh geometry, aabb and topology.
### Response:
def measure_all(fbasename=None, log=None, ml_version=ml_version):
"""Measures mesh geometry, aabb and topology."""
ml_script1_file = 'TEMP3D_measure_gAndT.mlx'
if ml_version == '1.3.4BETA':
file_out = 'TEMP3D_aabb.xyz'
else:
file_out = None
ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version)
compute.measure_geometry(ml_script1)
compute.measure_topology(ml_script1)
ml_script1.save_to_file(ml_script1_file)
ml_script1.run_script(log=log, script_file=ml_script1_file)
geometry = ml_script1.geometry
topology = ml_script1.topology
if ml_version == '1.3.4BETA':
if log is not None:
log_file = open(log, 'a')
log_file.write(
'***Axis Aligned Bounding Results for file "%s":\n' %
fbasename)
log_file.close()
aabb = measure_aabb(file_out, log)
else:
aabb = geometry['aabb']
return aabb, geometry, topology |
def get_fqhostname():
'''
Returns the fully qualified hostname
'''
# try getaddrinfo()
fqdn = None
try:
addrinfo = socket.getaddrinfo(
socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM,
socket.SOL_TCP, socket.AI_CANONNAME
)
for info in addrinfo:
# info struct [family, socktype, proto, canonname, sockaddr]
# On Windows `canonname` can be an empty string
# This can cause the function to return `None`
if len(info) > 3 and info[3]:
fqdn = info[3]
break
except socket.gaierror:
pass # NOTE: this used to log.error() but it was later disabled
except socket.error as err:
log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err)
if fqdn is None:
fqdn = socket.getfqdn()
return fqdn | Returns the fully qualified hostname | Below is the the instruction that describes the task:
### Input:
Returns the fully qualified hostname
### Response:
def get_fqhostname():
'''
Returns the fully qualified hostname
'''
# try getaddrinfo()
fqdn = None
try:
addrinfo = socket.getaddrinfo(
socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM,
socket.SOL_TCP, socket.AI_CANONNAME
)
for info in addrinfo:
# info struct [family, socktype, proto, canonname, sockaddr]
# On Windows `canonname` can be an empty string
# This can cause the function to return `None`
if len(info) > 3 and info[3]:
fqdn = info[3]
break
except socket.gaierror:
pass # NOTE: this used to log.error() but it was later disabled
except socket.error as err:
log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err)
if fqdn is None:
fqdn = socket.getfqdn()
return fqdn |
def delete_events(
self,
project_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_events" not in self._inner_api_calls:
self._inner_api_calls[
"delete_events"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_events,
default_retry=self._method_configs["DeleteEvents"].retry,
default_timeout=self._method_configs["DeleteEvents"].timeout,
client_info=self._client_info,
)
request = error_stats_service_pb2.DeleteEventsRequest(project_name=project_name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_name", project_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["delete_events"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def delete_events(
self,
project_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_events" not in self._inner_api_calls:
self._inner_api_calls[
"delete_events"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_events,
default_retry=self._method_configs["DeleteEvents"].retry,
default_timeout=self._method_configs["DeleteEvents"].timeout,
client_info=self._client_info,
)
request = error_stats_service_pb2.DeleteEventsRequest(project_name=project_name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_name", project_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["delete_events"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def size_of_varint(value):
""" Number of bytes needed to encode an integer in variable-length format.
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f:
return 1
if value <= 0x3fff:
return 2
if value <= 0x1fffff:
return 3
if value <= 0xfffffff:
return 4
if value <= 0x7ffffffff:
return 5
if value <= 0x3ffffffffff:
return 6
if value <= 0x1ffffffffffff:
return 7
if value <= 0xffffffffffffff:
return 8
if value <= 0x7fffffffffffffff:
return 9
return 10 | Number of bytes needed to encode an integer in variable-length format. | Below is the the instruction that describes the task:
### Input:
Number of bytes needed to encode an integer in variable-length format.
### Response:
def size_of_varint(value):
""" Number of bytes needed to encode an integer in variable-length format.
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f:
return 1
if value <= 0x3fff:
return 2
if value <= 0x1fffff:
return 3
if value <= 0xfffffff:
return 4
if value <= 0x7ffffffff:
return 5
if value <= 0x3ffffffffff:
return 6
if value <= 0x1ffffffffffff:
return 7
if value <= 0xffffffffffffff:
return 8
if value <= 0x7fffffffffffffff:
return 9
return 10 |
def next_chunk(self):
"""
Returns the chunk immediately following (and adjacent to) this one.
"""
raise NotImplementedError("%s not implemented for %s" % (self.next_chunk.__func__.__name__,
self.__class__.__name__)) | Returns the chunk immediately following (and adjacent to) this one. | Below is the the instruction that describes the task:
### Input:
Returns the chunk immediately following (and adjacent to) this one.
### Response:
def next_chunk(self):
"""
Returns the chunk immediately following (and adjacent to) this one.
"""
raise NotImplementedError("%s not implemented for %s" % (self.next_chunk.__func__.__name__,
self.__class__.__name__)) |
def get_cell_length(flow_model):
"""Get flow direction induced cell length dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
"""
assert flow_model.lower() in FlowModelConst.d8_lens
return FlowModelConst.d8_lens.get(flow_model.lower()) | Get flow direction induced cell length dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported. | Below is the the instruction that describes the task:
### Input:
Get flow direction induced cell length dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
### Response:
def get_cell_length(flow_model):
"""Get flow direction induced cell length dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
"""
assert flow_model.lower() in FlowModelConst.d8_lens
return FlowModelConst.d8_lens.get(flow_model.lower()) |
def turbulent_Nunner(Re, Pr, fd, fd_smooth):
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [2]_ as shown in [1]_.
.. math::
Nu = \frac{RePr(f/8)}{1 + 1.5Re^{-1/8}Pr^{-1/6}[Pr(f/f_s)-1]}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
fd : float
Darcy friction factor [-]
fd_smooth : float
Darcy friction factor of a smooth pipe [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Valid for Pr ≅ 0.7; bad results for Pr > 1.
Examples
--------
>>> turbulent_Nunner(Re=1E5, Pr=0.7, fd=0.0185, fd_smooth=0.005)
101.15841010919947
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] W. Nunner, "Warmeiibergang und Druckabfall in Rauhen Rohren,"
VDI-Forschungsheft 445, ser. B,(22): 5-39, 1956
'''
return Re*Pr*fd/8./(1 + 1.5*Re**-0.125*Pr**(-1/6.)*(Pr*fd/fd_smooth - 1.)) | r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [2]_ as shown in [1]_.
.. math::
Nu = \frac{RePr(f/8)}{1 + 1.5Re^{-1/8}Pr^{-1/6}[Pr(f/f_s)-1]}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
fd : float
Darcy friction factor [-]
fd_smooth : float
Darcy friction factor of a smooth pipe [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Valid for Pr ≅ 0.7; bad results for Pr > 1.
Examples
--------
>>> turbulent_Nunner(Re=1E5, Pr=0.7, fd=0.0185, fd_smooth=0.005)
101.15841010919947
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] W. Nunner, "Warmeiibergang und Druckabfall in Rauhen Rohren,"
VDI-Forschungsheft 445, ser. B,(22): 5-39, 1956 | Below is the the instruction that describes the task:
### Input:
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [2]_ as shown in [1]_.
.. math::
Nu = \frac{RePr(f/8)}{1 + 1.5Re^{-1/8}Pr^{-1/6}[Pr(f/f_s)-1]}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
fd : float
Darcy friction factor [-]
fd_smooth : float
Darcy friction factor of a smooth pipe [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Valid for Pr ≅ 0.7; bad results for Pr > 1.
Examples
--------
>>> turbulent_Nunner(Re=1E5, Pr=0.7, fd=0.0185, fd_smooth=0.005)
101.15841010919947
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] W. Nunner, "Warmeiibergang und Druckabfall in Rauhen Rohren,"
VDI-Forschungsheft 445, ser. B,(22): 5-39, 1956
### Response:
def turbulent_Nunner(Re, Pr, fd, fd_smooth):
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [2]_ as shown in [1]_.
.. math::
Nu = \frac{RePr(f/8)}{1 + 1.5Re^{-1/8}Pr^{-1/6}[Pr(f/f_s)-1]}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
fd : float
Darcy friction factor [-]
fd_smooth : float
Darcy friction factor of a smooth pipe [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Valid for Pr ≅ 0.7; bad results for Pr > 1.
Examples
--------
>>> turbulent_Nunner(Re=1E5, Pr=0.7, fd=0.0185, fd_smooth=0.005)
101.15841010919947
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] W. Nunner, "Warmeiibergang und Druckabfall in Rauhen Rohren,"
VDI-Forschungsheft 445, ser. B,(22): 5-39, 1956
'''
return Re*Pr*fd/8./(1 + 1.5*Re**-0.125*Pr**(-1/6.)*(Pr*fd/fd_smooth - 1.)) |
def match_config(filters, device, kind, default):
"""
Matches devices against multiple :class:`DeviceFilter`s.
:param list filters: device filters
:param Device device: device to be mounted
:param str kind: value kind
:param default: default value
:returns: value of the first matching filter
"""
if device is None:
return default
matches = (f.value(kind, device)
for f in filters
if f.has_value(kind) and f.match(device))
return next(matches, default) | Matches devices against multiple :class:`DeviceFilter`s.
:param list filters: device filters
:param Device device: device to be mounted
:param str kind: value kind
:param default: default value
:returns: value of the first matching filter | Below is the the instruction that describes the task:
### Input:
Matches devices against multiple :class:`DeviceFilter`s.
:param list filters: device filters
:param Device device: device to be mounted
:param str kind: value kind
:param default: default value
:returns: value of the first matching filter
### Response:
def match_config(filters, device, kind, default):
"""
Matches devices against multiple :class:`DeviceFilter`s.
:param list filters: device filters
:param Device device: device to be mounted
:param str kind: value kind
:param default: default value
:returns: value of the first matching filter
"""
if device is None:
return default
matches = (f.value(kind, device)
for f in filters
if f.has_value(kind) and f.match(device))
return next(matches, default) |
def set_lic_text(self, doc, text):
"""Sets license extracted text.
Raises SPDXValueError if text is not free form text.
Raises OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_text_set:
self.extr_text_set = True
if validations.validate_is_free_form_text(text):
self.extr_lic(doc).text = str_from_text(text)
return True
else:
raise SPDXValueError('ExtractedLicense::text')
else:
raise CardinalityError('ExtractedLicense::text')
else:
raise OrderError('ExtractedLicense::text') | Sets license extracted text.
Raises SPDXValueError if text is not free form text.
Raises OrderError if no license ID defined. | Below is the the instruction that describes the task:
### Input:
Sets license extracted text.
Raises SPDXValueError if text is not free form text.
Raises OrderError if no license ID defined.
### Response:
def set_lic_text(self, doc, text):
"""Sets license extracted text.
Raises SPDXValueError if text is not free form text.
Raises OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_text_set:
self.extr_text_set = True
if validations.validate_is_free_form_text(text):
self.extr_lic(doc).text = str_from_text(text)
return True
else:
raise SPDXValueError('ExtractedLicense::text')
else:
raise CardinalityError('ExtractedLicense::text')
else:
raise OrderError('ExtractedLicense::text') |
def show_inputs(client, workflow):
"""Show workflow inputs and exit."""
for input_ in workflow.inputs:
click.echo(
'{id}: {default}'.format(
id=input_.id,
default=_format_default(client, input_.default),
)
)
sys.exit(0) | Show workflow inputs and exit. | Below is the the instruction that describes the task:
### Input:
Show workflow inputs and exit.
### Response:
def show_inputs(client, workflow):
"""Show workflow inputs and exit."""
for input_ in workflow.inputs:
click.echo(
'{id}: {default}'.format(
id=input_.id,
default=_format_default(client, input_.default),
)
)
sys.exit(0) |
def get_folder_contents_iter(self, uri):
"""Return iterator for directory contents.
uri -- mediafire URI
Example:
for item in get_folder_contents_iter('mf:///Documents'):
print(item)
"""
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, Folder):
raise NotAFolderError(uri)
folder_key = resource['folderkey']
for item in self._folder_get_content_iter(folder_key):
if 'filename' in item:
# Work around https://mediafire.mantishub.com/view.php?id=5
# TODO: remove in 1.0
if ".patch." in item['filename']:
continue
yield File(item)
elif 'name' in item:
yield Folder(item) | Return iterator for directory contents.
uri -- mediafire URI
Example:
for item in get_folder_contents_iter('mf:///Documents'):
print(item) | Below is the the instruction that describes the task:
### Input:
Return iterator for directory contents.
uri -- mediafire URI
Example:
for item in get_folder_contents_iter('mf:///Documents'):
print(item)
### Response:
def get_folder_contents_iter(self, uri):
"""Return iterator for directory contents.
uri -- mediafire URI
Example:
for item in get_folder_contents_iter('mf:///Documents'):
print(item)
"""
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, Folder):
raise NotAFolderError(uri)
folder_key = resource['folderkey']
for item in self._folder_get_content_iter(folder_key):
if 'filename' in item:
# Work around https://mediafire.mantishub.com/view.php?id=5
# TODO: remove in 1.0
if ".patch." in item['filename']:
continue
yield File(item)
elif 'name' in item:
yield Folder(item) |
async def create_collection(db, model_class: MongoCollectionMixin):
'''
Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin``
'''
name = model_class.get_collection_name()
if name:
try:
# create collection
coll = await db.create_collection(name, **model_class._meta.creation_args)
except CollectionInvalid: # collection already exists
coll = db[name]
# create indices
if hasattr(model_class._meta, 'indices') and isinstance(model_class._meta.indices, list):
for index in model_class._meta.indices:
try:
index_kwargs = {
'name': index.get('name', '_'.join([x[0] for x in index['fields']])),
'unique': index.get('unique', False),
'sparse': index.get('sparse', False),
'expireAfterSeconds': index.get('expireAfterSeconds', None),
'background': True
}
if 'partialFilterExpression' in index:
index_kwargs['partialFilterExpression'] = index.get('partialFilterExpression', {})
await db[name].create_index(
index['fields'],
**index_kwargs
)
except OperationFailure as ex:
pass # index already exists ? TODO: do something with this
return coll
return None | Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin`` | Below is the the instruction that describes the task:
### Input:
Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin``
### Response:
async def create_collection(db, model_class: MongoCollectionMixin):
'''
Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin``
'''
name = model_class.get_collection_name()
if name:
try:
# create collection
coll = await db.create_collection(name, **model_class._meta.creation_args)
except CollectionInvalid: # collection already exists
coll = db[name]
# create indices
if hasattr(model_class._meta, 'indices') and isinstance(model_class._meta.indices, list):
for index in model_class._meta.indices:
try:
index_kwargs = {
'name': index.get('name', '_'.join([x[0] for x in index['fields']])),
'unique': index.get('unique', False),
'sparse': index.get('sparse', False),
'expireAfterSeconds': index.get('expireAfterSeconds', None),
'background': True
}
if 'partialFilterExpression' in index:
index_kwargs['partialFilterExpression'] = index.get('partialFilterExpression', {})
await db[name].create_index(
index['fields'],
**index_kwargs
)
except OperationFailure as ex:
pass # index already exists ? TODO: do something with this
return coll
return None |
def remove_sister(self, sister=None):
"""
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
"""
sisters = self.get_sisters()
if len(sisters) > 0:
if sister is None:
sister = sisters.pop(0)
return self.up.remove_child(sister) | Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed | Below is the the instruction that describes the task:
### Input:
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
### Response:
def remove_sister(self, sister=None):
"""
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
"""
sisters = self.get_sisters()
if len(sisters) > 0:
if sister is None:
sister = sisters.pop(0)
return self.up.remove_child(sister) |
def get_study_items(self):
"""Get all study items (e.g., geneids)."""
study_items = set()
for rec in self.goea_results:
study_items |= rec.study_items
return study_items | Get all study items (e.g., geneids). | Below is the the instruction that describes the task:
### Input:
Get all study items (e.g., geneids).
### Response:
def get_study_items(self):
"""Get all study items (e.g., geneids)."""
study_items = set()
for rec in self.goea_results:
study_items |= rec.study_items
return study_items |
def _save_message(self, stack, type_, message, context=None,
from_merge=False):
'Stores a message in the appropriate message stack.'
uid = uuid.uuid4().hex
message['uid'] = uid
# Get the context for the message (if there's a context available)
if context is not None:
if isinstance(context, tuple):
message['context'] = context
else:
message['context'] = (
context.get_context(line=message['line'],
column=message['column']))
else:
message['context'] = None
if self.package_stack:
if not isinstance(message['file'], list):
message['file'] = [message['file']]
message['file'] = self.package_stack + message['file']
# Test that if for_appversions is set that we're only applying to
# supported add-ons. THIS IS THE LAST FILTER BEFORE THE MESSAGE IS
# ADDED TO THE STACK!
if message['for_appversions']:
if not self.supports_version(message['for_appversions']):
if self.instant:
print '(Instant error discarded)'
self._print_message(type_, message, verbose=True)
return
elif self.version_requirements:
# If there was no for_appversions but there were version
# requirements detailed in the decorator, use the ones from the
# decorator.
message['for_appversions'] = self.version_requirements
# Save the message to the stack.
stack.append(message)
# Mark the tier that the error occurred at.
if message['tier'] is None:
message['tier'] = self.tier
# Build out the compatibility summary if possible.
if message['compatibility_type'] and not from_merge:
self.compat_summary['%ss' % message['compatibility_type']] += 1
# Build out the message tree entry.
if message['id']:
tree = self.message_tree
last_id = None
for eid in message['id']:
if last_id is not None:
tree = tree[last_id]
if eid not in tree:
tree[eid] = {'__errors': 0,
'__warnings': 0,
'__notices': 0,
'__messages': []}
tree[eid]['__%s' % type_] += 1
last_id = eid
tree[last_id]['__messages'].append(uid)
# If instant mode is turned on, output the message immediately.
if self.instant:
self._print_message(type_, message, verbose=True) | Stores a message in the appropriate message stack. | Below is the the instruction that describes the task:
### Input:
Stores a message in the appropriate message stack.
### Response:
def _save_message(self, stack, type_, message, context=None,
from_merge=False):
'Stores a message in the appropriate message stack.'
uid = uuid.uuid4().hex
message['uid'] = uid
# Get the context for the message (if there's a context available)
if context is not None:
if isinstance(context, tuple):
message['context'] = context
else:
message['context'] = (
context.get_context(line=message['line'],
column=message['column']))
else:
message['context'] = None
if self.package_stack:
if not isinstance(message['file'], list):
message['file'] = [message['file']]
message['file'] = self.package_stack + message['file']
# Test that if for_appversions is set that we're only applying to
# supported add-ons. THIS IS THE LAST FILTER BEFORE THE MESSAGE IS
# ADDED TO THE STACK!
if message['for_appversions']:
if not self.supports_version(message['for_appversions']):
if self.instant:
print '(Instant error discarded)'
self._print_message(type_, message, verbose=True)
return
elif self.version_requirements:
# If there was no for_appversions but there were version
# requirements detailed in the decorator, use the ones from the
# decorator.
message['for_appversions'] = self.version_requirements
# Save the message to the stack.
stack.append(message)
# Mark the tier that the error occurred at.
if message['tier'] is None:
message['tier'] = self.tier
# Build out the compatibility summary if possible.
if message['compatibility_type'] and not from_merge:
self.compat_summary['%ss' % message['compatibility_type']] += 1
# Build out the message tree entry.
if message['id']:
tree = self.message_tree
last_id = None
for eid in message['id']:
if last_id is not None:
tree = tree[last_id]
if eid not in tree:
tree[eid] = {'__errors': 0,
'__warnings': 0,
'__notices': 0,
'__messages': []}
tree[eid]['__%s' % type_] += 1
last_id = eid
tree[last_id]['__messages'].append(uid)
# If instant mode is turned on, output the message immediately.
if self.instant:
self._print_message(type_, message, verbose=True) |
async def receive_events(self, request: HttpRequest):
"""
Events received from Facebook
"""
body = await request.read()
s = self.settings()
try:
content = ujson.loads(body)
except ValueError:
return json_response({
'error': True,
'message': 'Cannot decode body'
}, status=400)
secret = s['app_secret']
actual_sig = request.headers['X-Hub-Signature']
expected_sig = sign_message(body, secret)
if not hmac.compare_digest(actual_sig, expected_sig):
return json_response({
'error': True,
'message': 'Invalid signature',
}, status=401)
for entry in content['entry']:
for raw_message in entry.get('messaging', []):
message = FacebookMessage(raw_message, self)
await self.handle_event(message)
return json_response({
'ok': True,
}) | Events received from Facebook | Below is the the instruction that describes the task:
### Input:
Events received from Facebook
### Response:
async def receive_events(self, request: HttpRequest):
"""
Events received from Facebook
"""
body = await request.read()
s = self.settings()
try:
content = ujson.loads(body)
except ValueError:
return json_response({
'error': True,
'message': 'Cannot decode body'
}, status=400)
secret = s['app_secret']
actual_sig = request.headers['X-Hub-Signature']
expected_sig = sign_message(body, secret)
if not hmac.compare_digest(actual_sig, expected_sig):
return json_response({
'error': True,
'message': 'Invalid signature',
}, status=401)
for entry in content['entry']:
for raw_message in entry.get('messaging', []):
message = FacebookMessage(raw_message, self)
await self.handle_event(message)
return json_response({
'ok': True,
}) |
def param(name, help=""):
"""Decorator that add a parameter to the wrapped command or function."""
def decorator(func):
params = getattr(func, "params", [])
_param = Param(name, help)
# Insert at the beginning so the apparent order is preserved
params.insert(0, _param)
func.params = params
return func
return decorator | Decorator that add a parameter to the wrapped command or function. | Below is the the instruction that describes the task:
### Input:
Decorator that add a parameter to the wrapped command or function.
### Response:
def param(name, help=""):
"""Decorator that add a parameter to the wrapped command or function."""
def decorator(func):
params = getattr(func, "params", [])
_param = Param(name, help)
# Insert at the beginning so the apparent order is preserved
params.insert(0, _param)
func.params = params
return func
return decorator |
def assemble_tlg_author_filepaths():
"""Reads TLG index and builds a list of absolute filepaths."""
plaintext_dir_rel = '~/cltk_data/greek/text/tlg/plaintext/'
plaintext_dir = os.path.expanduser(plaintext_dir_rel)
filepaths = [os.path.join(plaintext_dir, x + '.TXT') for x in TLG_INDEX]
return filepaths | Reads TLG index and builds a list of absolute filepaths. | Below is the the instruction that describes the task:
### Input:
Reads TLG index and builds a list of absolute filepaths.
### Response:
def assemble_tlg_author_filepaths():
"""Reads TLG index and builds a list of absolute filepaths."""
plaintext_dir_rel = '~/cltk_data/greek/text/tlg/plaintext/'
plaintext_dir = os.path.expanduser(plaintext_dir_rel)
filepaths = [os.path.join(plaintext_dir, x + '.TXT') for x in TLG_INDEX]
return filepaths |
def create_crop(self, name, file_obj,
x=None, x2=None, y=None, y2=None):
"""
Generate Version for an Image.
value has to be a serverpath relative to MEDIA_ROOT.
Returns the spec for the crop that was created.
"""
if name not in self._registry:
return
file_obj.seek(0)
im = Image.open(file_obj)
config = self._registry[name]
if x is not None and x2 and y is not None and y2 and not config.editable:
# You can't ask for something special
# for non editable images
return
im = config.rotate_by_exif(im)
crop_spec = config.get_crop_spec(im, x=x, x2=x2, y=y, y2=y2)
image = config.process_image(im, crop_spec=crop_spec)
if image:
crop_name = utils.get_size_filename(file_obj.name, name)
self._save_file(image, crop_name)
return crop_spec | Generate Version for an Image.
value has to be a serverpath relative to MEDIA_ROOT.
Returns the spec for the crop that was created. | Below is the the instruction that describes the task:
### Input:
Generate Version for an Image.
value has to be a serverpath relative to MEDIA_ROOT.
Returns the spec for the crop that was created.
### Response:
def create_crop(self, name, file_obj,
x=None, x2=None, y=None, y2=None):
"""
Generate Version for an Image.
value has to be a serverpath relative to MEDIA_ROOT.
Returns the spec for the crop that was created.
"""
if name not in self._registry:
return
file_obj.seek(0)
im = Image.open(file_obj)
config = self._registry[name]
if x is not None and x2 and y is not None and y2 and not config.editable:
# You can't ask for something special
# for non editable images
return
im = config.rotate_by_exif(im)
crop_spec = config.get_crop_spec(im, x=x, x2=x2, y=y, y2=y2)
image = config.process_image(im, crop_spec=crop_spec)
if image:
crop_name = utils.get_size_filename(file_obj.name, name)
self._save_file(image, crop_name)
return crop_spec |
def _swap_bytes(data):
"""swaps bytes for 16 bit, leaves remaining trailing bytes alone"""
a, b = data[1::2], data[::2]
data = bytearray().join(bytearray(x) for x in zip(a, b))
if len(b) > len(a):
data += b[-1:]
return bytes(data) | swaps bytes for 16 bit, leaves remaining trailing bytes alone | Below is the the instruction that describes the task:
### Input:
swaps bytes for 16 bit, leaves remaining trailing bytes alone
### Response:
def _swap_bytes(data):
"""swaps bytes for 16 bit, leaves remaining trailing bytes alone"""
a, b = data[1::2], data[::2]
data = bytearray().join(bytearray(x) for x in zip(a, b))
if len(b) > len(a):
data += b[-1:]
return bytes(data) |
def walk(self, start, end):
"""
Walk from `start` node to `end` node.
Returns:
(upwards, common, downwards): `upwards` is a list of nodes to go upward to.
`common` top node. `downwards` is a list of nodes to go downward to.
Raises:
WalkError: on no common root node.
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()))
Node('/f')
|-- Node('/f/b')
| |-- Node('/f/b/a')
| +-- Node('/f/b/d')
| |-- Node('/f/b/d/c')
| +-- Node('/f/b/d/e')
+-- Node('/f/g')
+-- Node('/f/g/i')
+-- Node('/f/g/i/h')
Create a walker:
>>> w = Walker()
This class is made for walking:
>>> w.walk(f, f)
((), Node('/f'), ())
>>> w.walk(f, b)
((), Node('/f'), (Node('/f/b'),))
>>> w.walk(b, f)
((Node('/f/b'),), Node('/f'), ())
>>> w.walk(h, e)
((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e')))
>>> w.walk(d, e)
((), Node('/f/b/d'), (Node('/f/b/d/e'),))
For a proper walking the nodes need to be part of the same tree:
>>> w.walk(Node("a"), Node("b"))
Traceback (most recent call last):
...
anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree.
"""
s = start.path
e = end.path
if start.root != end.root:
msg = "%r and %r are not part of the same tree." % (start, end)
raise WalkError(msg)
# common
c = Walker.__calc_common(s, e)
assert c[0] is start.root
len_c = len(c)
# up
if start is c[-1]:
up = tuple()
else:
up = tuple(reversed(s[len_c:]))
# down
if end is c[-1]:
down = tuple()
else:
down = e[len_c:]
return up, c[-1], down | Walk from `start` node to `end` node.
Returns:
(upwards, common, downwards): `upwards` is a list of nodes to go upward to.
`common` top node. `downwards` is a list of nodes to go downward to.
Raises:
WalkError: on no common root node.
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()))
Node('/f')
|-- Node('/f/b')
| |-- Node('/f/b/a')
| +-- Node('/f/b/d')
| |-- Node('/f/b/d/c')
| +-- Node('/f/b/d/e')
+-- Node('/f/g')
+-- Node('/f/g/i')
+-- Node('/f/g/i/h')
Create a walker:
>>> w = Walker()
This class is made for walking:
>>> w.walk(f, f)
((), Node('/f'), ())
>>> w.walk(f, b)
((), Node('/f'), (Node('/f/b'),))
>>> w.walk(b, f)
((Node('/f/b'),), Node('/f'), ())
>>> w.walk(h, e)
((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e')))
>>> w.walk(d, e)
((), Node('/f/b/d'), (Node('/f/b/d/e'),))
For a proper walking the nodes need to be part of the same tree:
>>> w.walk(Node("a"), Node("b"))
Traceback (most recent call last):
...
anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree. | Below is the the instruction that describes the task:
### Input:
Walk from `start` node to `end` node.
Returns:
(upwards, common, downwards): `upwards` is a list of nodes to go upward to.
`common` top node. `downwards` is a list of nodes to go downward to.
Raises:
WalkError: on no common root node.
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()))
Node('/f')
|-- Node('/f/b')
| |-- Node('/f/b/a')
| +-- Node('/f/b/d')
| |-- Node('/f/b/d/c')
| +-- Node('/f/b/d/e')
+-- Node('/f/g')
+-- Node('/f/g/i')
+-- Node('/f/g/i/h')
Create a walker:
>>> w = Walker()
This class is made for walking:
>>> w.walk(f, f)
((), Node('/f'), ())
>>> w.walk(f, b)
((), Node('/f'), (Node('/f/b'),))
>>> w.walk(b, f)
((Node('/f/b'),), Node('/f'), ())
>>> w.walk(h, e)
((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e')))
>>> w.walk(d, e)
((), Node('/f/b/d'), (Node('/f/b/d/e'),))
For a proper walking the nodes need to be part of the same tree:
>>> w.walk(Node("a"), Node("b"))
Traceback (most recent call last):
...
anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree.
### Response:
def walk(self, start, end):
"""
Walk from `start` node to `end` node.
Returns:
(upwards, common, downwards): `upwards` is a list of nodes to go upward to.
`common` top node. `downwards` is a list of nodes to go downward to.
Raises:
WalkError: on no common root node.
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()))
Node('/f')
|-- Node('/f/b')
| |-- Node('/f/b/a')
| +-- Node('/f/b/d')
| |-- Node('/f/b/d/c')
| +-- Node('/f/b/d/e')
+-- Node('/f/g')
+-- Node('/f/g/i')
+-- Node('/f/g/i/h')
Create a walker:
>>> w = Walker()
This class is made for walking:
>>> w.walk(f, f)
((), Node('/f'), ())
>>> w.walk(f, b)
((), Node('/f'), (Node('/f/b'),))
>>> w.walk(b, f)
((Node('/f/b'),), Node('/f'), ())
>>> w.walk(h, e)
((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e')))
>>> w.walk(d, e)
((), Node('/f/b/d'), (Node('/f/b/d/e'),))
For a proper walking the nodes need to be part of the same tree:
>>> w.walk(Node("a"), Node("b"))
Traceback (most recent call last):
...
anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree.
"""
s = start.path
e = end.path
if start.root != end.root:
msg = "%r and %r are not part of the same tree." % (start, end)
raise WalkError(msg)
# common
c = Walker.__calc_common(s, e)
assert c[0] is start.root
len_c = len(c)
# up
if start is c[-1]:
up = tuple()
else:
up = tuple(reversed(s[len_c:]))
# down
if end is c[-1]:
down = tuple()
else:
down = e[len_c:]
return up, c[-1], down |
def input(self, data):
"""Set the input text data."""
self.data = data
self.lexer.input(data) | Set the input text data. | Below is the the instruction that describes the task:
### Input:
Set the input text data.
### Response:
def input(self, data):
"""Set the input text data."""
self.data = data
self.lexer.input(data) |
def subprocess_run(*popenargs, input=None, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.
The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.
If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.
There is an optional argument "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the "input" argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes.
"""
#pylint: disable=redefined-builtin
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = subprocess.PIPE
with subprocess.Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
raise subprocess.TimeoutExpired(process.args, timeout, output=stdout,
stderr=stderr)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise subprocess.CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
return CompletedProcess(process.args, retcode, stdout, stderr) | Run command with arguments and return a CompletedProcess instance.
The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.
If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.
There is an optional argument "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the "input" argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes. | Below is the the instruction that describes the task:
### Input:
Run command with arguments and return a CompletedProcess instance.
The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.
If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.
There is an optional argument "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the "input" argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes.
### Response:
def subprocess_run(*popenargs, input=None, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.
The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.
If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.
There is an optional argument "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the "input" argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes.
"""
#pylint: disable=redefined-builtin
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = subprocess.PIPE
with subprocess.Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
raise subprocess.TimeoutExpired(process.args, timeout, output=stdout,
stderr=stderr)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise subprocess.CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
return CompletedProcess(process.args, retcode, stdout, stderr) |
def join_event_view(request, id):
"""Join event page. If a POST request, actually add or remove the attendance of the current
user. Otherwise, display a page with confirmation.
id: event id
"""
event = get_object_or_404(Event, id=id)
if request.method == "POST":
if not event.show_attending:
return redirect("events")
if "attending" in request.POST:
attending = request.POST.get("attending")
attending = (attending == "true")
if attending:
event.attending.add(request.user)
else:
event.attending.remove(request.user)
return redirect("events")
context = {"event": event, "is_events_admin": request.user.has_admin_permission('events')}
return render(request, "events/join_event.html", context) | Join event page. If a POST request, actually add or remove the attendance of the current
user. Otherwise, display a page with confirmation.
id: event id | Below is the the instruction that describes the task:
### Input:
Join event page. If a POST request, actually add or remove the attendance of the current
user. Otherwise, display a page with confirmation.
id: event id
### Response:
def join_event_view(request, id):
"""Join event page. If a POST request, actually add or remove the attendance of the current
user. Otherwise, display a page with confirmation.
id: event id
"""
event = get_object_or_404(Event, id=id)
if request.method == "POST":
if not event.show_attending:
return redirect("events")
if "attending" in request.POST:
attending = request.POST.get("attending")
attending = (attending == "true")
if attending:
event.attending.add(request.user)
else:
event.attending.remove(request.user)
return redirect("events")
context = {"event": event, "is_events_admin": request.user.has_admin_permission('events')}
return render(request, "events/join_event.html", context) |
def OnSelectReader(self, reader):
"""Called when a reader is selected by clicking on the
reader tree control or toolbar."""
SimpleSCardAppEventObserver.OnSelectReader(self, reader)
self.feedbacktext.SetLabel('Selected reader: ' + repr(reader))
self.transmitbutton.Disable() | Called when a reader is selected by clicking on the
reader tree control or toolbar. | Below is the the instruction that describes the task:
### Input:
Called when a reader is selected by clicking on the
reader tree control or toolbar.
### Response:
def OnSelectReader(self, reader):
"""Called when a reader is selected by clicking on the
reader tree control or toolbar."""
SimpleSCardAppEventObserver.OnSelectReader(self, reader)
self.feedbacktext.SetLabel('Selected reader: ' + repr(reader))
self.transmitbutton.Disable() |
def fill_parameters(self, path, blocks, exclude_free_params=False, check_parameters=False):
"""
Load parameters from file to fill all blocks sequentially.
:type blocks: list of deepy.layers.Block
"""
if not os.path.exists(path):
raise Exception("model {} does not exist".format(path))
# Decide which parameters to load
normal_params = sum([nn.parameters for nn in blocks], [])
all_params = sum([nn.all_parameters for nn in blocks], [])
# Load parameters
if path.endswith(".gz"):
opener = gzip.open if path.lower().endswith('.gz') else open
handle = opener(path, 'rb')
saved_params = pickle.load(handle)
handle.close()
# Write parameters
if len(all_params) != len(saved_params):
logging.warning(
"parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params),
len(saved_params)))
for target, source in zip(all_params, saved_params):
if not exclude_free_params or target not in normal_params:
target.set_value(source)
elif path.endswith(".npz"):
arrs = np.load(path)
# Write parameters
if len(all_params) != len(arrs.keys()):
logging.warning(
"parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params),
len(arrs.keys())))
for target, idx in zip(all_params, range(len(arrs.keys()))):
if not exclude_free_params or target not in normal_params:
source = arrs['arr_%d' % idx]
target.set_value(source)
else:
raise Exception("File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'" % path) | Load parameters from file to fill all blocks sequentially.
:type blocks: list of deepy.layers.Block | Below is the the instruction that describes the task:
### Input:
Load parameters from file to fill all blocks sequentially.
:type blocks: list of deepy.layers.Block
### Response:
def fill_parameters(self, path, blocks, exclude_free_params=False, check_parameters=False):
"""
Load parameters from file to fill all blocks sequentially.
:type blocks: list of deepy.layers.Block
"""
if not os.path.exists(path):
raise Exception("model {} does not exist".format(path))
# Decide which parameters to load
normal_params = sum([nn.parameters for nn in blocks], [])
all_params = sum([nn.all_parameters for nn in blocks], [])
# Load parameters
if path.endswith(".gz"):
opener = gzip.open if path.lower().endswith('.gz') else open
handle = opener(path, 'rb')
saved_params = pickle.load(handle)
handle.close()
# Write parameters
if len(all_params) != len(saved_params):
logging.warning(
"parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params),
len(saved_params)))
for target, source in zip(all_params, saved_params):
if not exclude_free_params or target not in normal_params:
target.set_value(source)
elif path.endswith(".npz"):
arrs = np.load(path)
# Write parameters
if len(all_params) != len(arrs.keys()):
logging.warning(
"parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params),
len(arrs.keys())))
for target, idx in zip(all_params, range(len(arrs.keys()))):
if not exclude_free_params or target not in normal_params:
source = arrs['arr_%d' % idx]
target.set_value(source)
else:
raise Exception("File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'" % path) |
def crc16(cmd, use_byte=False):
"""
CRC16 检验
- 启用``use_byte`` 则返回 bytes 类型.
:param cmd: 无crc检验的指令
:type cmd:
:param use_byte: 是否返回byte类型
:type use_byte:
:return: 返回crc值
:rtype:
"""
crc = 0xFFFF
# crc16 计算方法, 需要使用 byte
if hasattr(cmd, 'encode'):
cmd = bytes.fromhex(cmd)
for _ in cmd:
c = _ & 0x00FF
crc ^= c
for i in range(8):
if crc & 0x0001 > 0:
crc >>= 1
crc ^= 0xA001
else:
crc >>= 1
# modbus crc16计算时,需要高/低位倒置
t = [(crc & 0x00FF), (crc >> 8 & 0xFF)]
crc = '%02X%02X' % (t[0], t[1])
if use_byte:
crc = bytes.fromhex(crc)
return crc | CRC16 检验
- 启用``use_byte`` 则返回 bytes 类型.
:param cmd: 无crc检验的指令
:type cmd:
:param use_byte: 是否返回byte类型
:type use_byte:
:return: 返回crc值
:rtype: | Below is the the instruction that describes the task:
### Input:
CRC16 检验
- 启用``use_byte`` 则返回 bytes 类型.
:param cmd: 无crc检验的指令
:type cmd:
:param use_byte: 是否返回byte类型
:type use_byte:
:return: 返回crc值
:rtype:
### Response:
def crc16(cmd, use_byte=False):
"""
CRC16 检验
- 启用``use_byte`` 则返回 bytes 类型.
:param cmd: 无crc检验的指令
:type cmd:
:param use_byte: 是否返回byte类型
:type use_byte:
:return: 返回crc值
:rtype:
"""
crc = 0xFFFF
# crc16 计算方法, 需要使用 byte
if hasattr(cmd, 'encode'):
cmd = bytes.fromhex(cmd)
for _ in cmd:
c = _ & 0x00FF
crc ^= c
for i in range(8):
if crc & 0x0001 > 0:
crc >>= 1
crc ^= 0xA001
else:
crc >>= 1
# modbus crc16计算时,需要高/低位倒置
t = [(crc & 0x00FF), (crc >> 8 & 0xFF)]
crc = '%02X%02X' % (t[0], t[1])
if use_byte:
crc = bytes.fromhex(crc)
return crc |
def entities(self, entity_ids):
'''Get the default data for entities.
@param entity_ids A list of entity ids either as strings or references.
'''
url = '%s/meta/any?include=id&' % self.url
for entity_id in entity_ids:
url += 'id=%s&' % _get_path(entity_id)
# Remove the trailing '&' from the URL.
url = url[:-1]
data = self._get(url)
return data.json() | Get the default data for entities.
@param entity_ids A list of entity ids either as strings or references. | Below is the the instruction that describes the task:
### Input:
Get the default data for entities.
@param entity_ids A list of entity ids either as strings or references.
### Response:
def entities(self, entity_ids):
'''Get the default data for entities.
@param entity_ids A list of entity ids either as strings or references.
'''
url = '%s/meta/any?include=id&' % self.url
for entity_id in entity_ids:
url += 'id=%s&' % _get_path(entity_id)
# Remove the trailing '&' from the URL.
url = url[:-1]
data = self._get(url)
return data.json() |
def remove_dups(head):
"""
Time Complexity: O(N)
Space Complexity: O(N)
"""
hashset = set()
prev = Node()
while head:
if head.val in hashset:
prev.next = head.next
else:
hashset.add(head.val)
prev = head
head = head.next | Time Complexity: O(N)
Space Complexity: O(N) | Below is the the instruction that describes the task:
### Input:
Time Complexity: O(N)
Space Complexity: O(N)
### Response:
def remove_dups(head):
"""
Time Complexity: O(N)
Space Complexity: O(N)
"""
hashset = set()
prev = Node()
while head:
if head.val in hashset:
prev.next = head.next
else:
hashset.add(head.val)
prev = head
head = head.next |
def select(self, comp_name, options=None):
""" Select the components that will by played (with given options).
`options` will be passed to :func:`.Optionable.parse_options` if the
component is a subclass of :class:`Optionable`.
.. Warning:: this function also setup the options (if given) of the
selected component. Use :func:`clear_selections` to restore both
selection and component's options.
This method may be call at play 'time', before to call :func:`play` to
run all selected components.
:param name: name of the component to select
:type comp_name: str
:param options: options to set to the components
:type options: dict
"""
self._logger.info("select comp '%s' for block '%s' (options: %s)" % (comp_name, self._name, options))
if comp_name not in self._components:
raise ValueError("'%s' has no component '%s' (components are: %s)"\
% (self._name, comp_name, ", ".join(self.component_names())))
if options is None:
options = {}
# get the componsent
component = self._components[comp_name]
# check options make sens
if not isinstance(component, Optionable) and len(options):
raise ValueError("the component %s is not optionable you can't provide options..." % comp_name)
# add component as selected, aware of multiple
if comp_name not in self._selected:
if not self.multiple and len(self._selected):
assert len(self._selected) == 1
self._selected[0] = comp_name
else:
self._selected.append(comp_name)
else:
# TODO the component has already been selected
pass
# component might be a function or any callable
# only Optionable will get options
if isinstance(component, Optionable):
component.set_options_values(options, parse=True, strict=True) | Select the components that will by played (with given options).
`options` will be passed to :func:`.Optionable.parse_options` if the
component is a subclass of :class:`Optionable`.
.. Warning:: this function also setup the options (if given) of the
selected component. Use :func:`clear_selections` to restore both
selection and component's options.
This method may be call at play 'time', before to call :func:`play` to
run all selected components.
:param name: name of the component to select
:type comp_name: str
:param options: options to set to the components
:type options: dict | Below is the the instruction that describes the task:
### Input:
Select the components that will by played (with given options).
`options` will be passed to :func:`.Optionable.parse_options` if the
component is a subclass of :class:`Optionable`.
.. Warning:: this function also setup the options (if given) of the
selected component. Use :func:`clear_selections` to restore both
selection and component's options.
This method may be call at play 'time', before to call :func:`play` to
run all selected components.
:param name: name of the component to select
:type comp_name: str
:param options: options to set to the components
:type options: dict
### Response:
def select(self, comp_name, options=None):
""" Select the components that will by played (with given options).
`options` will be passed to :func:`.Optionable.parse_options` if the
component is a subclass of :class:`Optionable`.
.. Warning:: this function also setup the options (if given) of the
selected component. Use :func:`clear_selections` to restore both
selection and component's options.
This method may be call at play 'time', before to call :func:`play` to
run all selected components.
:param name: name of the component to select
:type comp_name: str
:param options: options to set to the components
:type options: dict
"""
self._logger.info("select comp '%s' for block '%s' (options: %s)" % (comp_name, self._name, options))
if comp_name not in self._components:
raise ValueError("'%s' has no component '%s' (components are: %s)"\
% (self._name, comp_name, ", ".join(self.component_names())))
if options is None:
options = {}
# get the componsent
component = self._components[comp_name]
# check options make sens
if not isinstance(component, Optionable) and len(options):
raise ValueError("the component %s is not optionable you can't provide options..." % comp_name)
# add component as selected, aware of multiple
if comp_name not in self._selected:
if not self.multiple and len(self._selected):
assert len(self._selected) == 1
self._selected[0] = comp_name
else:
self._selected.append(comp_name)
else:
# TODO the component has already been selected
pass
# component might be a function or any callable
# only Optionable will get options
if isinstance(component, Optionable):
component.set_options_values(options, parse=True, strict=True) |
def to_javascript_(self, table_name: str="data") -> str:
"""Convert the main dataframe to javascript code
:param table_name: javascript variable name, defaults to "data"
:param table_name: str, optional
:return: a javascript constant with the data
:rtype: str
:example: ``ds.to_javastript_("myconst")``
"""
try:
renderer = pytablewriter.JavaScriptTableWriter
data = self._build_export(renderer, table_name)
return data
except Exception as e:
self.err(e, "Can not convert data to javascript code") | Convert the main dataframe to javascript code
:param table_name: javascript variable name, defaults to "data"
:param table_name: str, optional
:return: a javascript constant with the data
:rtype: str
:example: ``ds.to_javastript_("myconst")`` | Below is the the instruction that describes the task:
### Input:
Convert the main dataframe to javascript code
:param table_name: javascript variable name, defaults to "data"
:param table_name: str, optional
:return: a javascript constant with the data
:rtype: str
:example: ``ds.to_javastript_("myconst")``
### Response:
def to_javascript_(self, table_name: str="data") -> str:
"""Convert the main dataframe to javascript code
:param table_name: javascript variable name, defaults to "data"
:param table_name: str, optional
:return: a javascript constant with the data
:rtype: str
:example: ``ds.to_javastript_("myconst")``
"""
try:
renderer = pytablewriter.JavaScriptTableWriter
data = self._build_export(renderer, table_name)
return data
except Exception as e:
self.err(e, "Can not convert data to javascript code") |
def rmtree (self, errors='warn'):
"""Recursively delete this directory and its contents. The *errors* keyword
specifies how errors are handled:
"warn" (the default)
Print a warning to standard error.
"ignore"
Ignore errors.
"""
import shutil
if errors == 'ignore':
ignore_errors = True
onerror = None
elif errors == 'warn':
ignore_errors = False
from .cli import warn
def onerror (func, path, exc_info):
warn ('couldn\'t rmtree %s: in %s of %s: %s', self, func.__name__,
path, exc_info[1])
else:
raise ValueError ('unexpected "errors" keyword %r' % (errors,))
shutil.rmtree (text_type (self), ignore_errors=ignore_errors, onerror=onerror)
return self | Recursively delete this directory and its contents. The *errors* keyword
specifies how errors are handled:
"warn" (the default)
Print a warning to standard error.
"ignore"
Ignore errors. | Below is the the instruction that describes the task:
### Input:
Recursively delete this directory and its contents. The *errors* keyword
specifies how errors are handled:
"warn" (the default)
Print a warning to standard error.
"ignore"
Ignore errors.
### Response:
def rmtree (self, errors='warn'):
"""Recursively delete this directory and its contents. The *errors* keyword
specifies how errors are handled:
"warn" (the default)
Print a warning to standard error.
"ignore"
Ignore errors.
"""
import shutil
if errors == 'ignore':
ignore_errors = True
onerror = None
elif errors == 'warn':
ignore_errors = False
from .cli import warn
def onerror (func, path, exc_info):
warn ('couldn\'t rmtree %s: in %s of %s: %s', self, func.__name__,
path, exc_info[1])
else:
raise ValueError ('unexpected "errors" keyword %r' % (errors,))
shutil.rmtree (text_type (self), ignore_errors=ignore_errors, onerror=onerror)
return self |
def _defer_to_worker(deliver, worker, work, *args, **kwargs):
"""
Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread.
"""
deferred = Deferred()
def wrapped_work():
try:
result = work(*args, **kwargs)
except BaseException:
f = Failure()
deliver(lambda: deferred.errback(f))
else:
deliver(lambda: deferred.callback(result))
worker.do(wrapped_work)
return deferred | Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread. | Below is the the instruction that describes the task:
### Input:
Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread.
### Response:
def _defer_to_worker(deliver, worker, work, *args, **kwargs):
"""
Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread.
"""
deferred = Deferred()
def wrapped_work():
try:
result = work(*args, **kwargs)
except BaseException:
f = Failure()
deliver(lambda: deferred.errback(f))
else:
deliver(lambda: deferred.callback(result))
worker.do(wrapped_work)
return deferred |
def ldap_server_host_use_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop('use_vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ldap_server_host_use_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop('use_vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _is_sub_intrinsic(data):
"""
Is this input data a Fn::Sub intrinsic function
Parameters
----------
data
Data to check
Returns
-------
bool
True if the data Fn::Sub intrinsic function
"""
return isinstance(data, dict) and len(data) == 1 and LambdaUri._FN_SUB in data | Is this input data a Fn::Sub intrinsic function
Parameters
----------
data
Data to check
Returns
-------
bool
True if the data Fn::Sub intrinsic function | Below is the the instruction that describes the task:
### Input:
Is this input data a Fn::Sub intrinsic function
Parameters
----------
data
Data to check
Returns
-------
bool
True if the data Fn::Sub intrinsic function
### Response:
def _is_sub_intrinsic(data):
"""
Is this input data a Fn::Sub intrinsic function
Parameters
----------
data
Data to check
Returns
-------
bool
True if the data Fn::Sub intrinsic function
"""
return isinstance(data, dict) and len(data) == 1 and LambdaUri._FN_SUB in data |
def parse_date_range_arguments(options: dict, default_range='last_month') -> (datetime, datetime, list):
"""
:param options:
:param default_range: Default datetime range to return if no other selected
:return: begin, end, [(begin1,end1), (begin2,end2), ...]
"""
begin, end = get_date_range_by_name(default_range)
for range_name in TIME_RANGE_NAMES:
if options.get(range_name):
begin, end = get_date_range_by_name(range_name)
if options.get('begin'):
t = parse(options['begin'], default=datetime(2000, 1, 1))
begin = pytz.utc.localize(t)
end = now()
if options.get('end'):
end = pytz.utc.localize(parse(options['end'], default=datetime(2000, 1, 1)))
step_type = None
after_end = end
for step_name in TIME_STEP_NAMES:
if options.get(step_name):
step_type = getattr(rrule, step_name.upper())
if rrule.DAILY == step_type:
after_end += timedelta(days=1)
if rrule.WEEKLY == step_type:
after_end += timedelta(days=7)
if rrule.MONTHLY == step_type:
after_end += timedelta(days=31)
steps = None
if step_type:
begins = [t for t in rrule.rrule(step_type, dtstart=begin, until=after_end)]
steps = [(begins[i], begins[i+1]) for i in range(len(begins)-1)]
if steps is None:
steps = [(begin, end)]
return begin, end, steps | :param options:
:param default_range: Default datetime range to return if no other selected
:return: begin, end, [(begin1,end1), (begin2,end2), ...] | Below is the the instruction that describes the task:
### Input:
:param options:
:param default_range: Default datetime range to return if no other selected
:return: begin, end, [(begin1,end1), (begin2,end2), ...]
### Response:
def parse_date_range_arguments(options: dict, default_range='last_month') -> (datetime, datetime, list):
"""
:param options:
:param default_range: Default datetime range to return if no other selected
:return: begin, end, [(begin1,end1), (begin2,end2), ...]
"""
begin, end = get_date_range_by_name(default_range)
for range_name in TIME_RANGE_NAMES:
if options.get(range_name):
begin, end = get_date_range_by_name(range_name)
if options.get('begin'):
t = parse(options['begin'], default=datetime(2000, 1, 1))
begin = pytz.utc.localize(t)
end = now()
if options.get('end'):
end = pytz.utc.localize(parse(options['end'], default=datetime(2000, 1, 1)))
step_type = None
after_end = end
for step_name in TIME_STEP_NAMES:
if options.get(step_name):
step_type = getattr(rrule, step_name.upper())
if rrule.DAILY == step_type:
after_end += timedelta(days=1)
if rrule.WEEKLY == step_type:
after_end += timedelta(days=7)
if rrule.MONTHLY == step_type:
after_end += timedelta(days=31)
steps = None
if step_type:
begins = [t for t in rrule.rrule(step_type, dtstart=begin, until=after_end)]
steps = [(begins[i], begins[i+1]) for i in range(len(begins)-1)]
if steps is None:
steps = [(begin, end)]
return begin, end, steps |
def decode_value(stream):
"""Decode the contents of a value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded value
:rtype: bytes
"""
length = decode_length(stream)
(value,) = unpack_value(">{:d}s".format(length), stream)
return value | Decode the contents of a value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded value
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Decode the contents of a value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded value
:rtype: bytes
### Response:
def decode_value(stream):
"""Decode the contents of a value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded value
:rtype: bytes
"""
length = decode_length(stream)
(value,) = unpack_value(">{:d}s".format(length), stream)
return value |
def posterior_covariance_between_points(self, X1, X2):
"""
Computes the posterior covariance between points.
:param X1: some input observations
:param X2: other input observations
"""
return self.posterior.covariance_between_points(self.kern, self.X, X1, X2) | Computes the posterior covariance between points.
:param X1: some input observations
:param X2: other input observations | Below is the the instruction that describes the task:
### Input:
Computes the posterior covariance between points.
:param X1: some input observations
:param X2: other input observations
### Response:
def posterior_covariance_between_points(self, X1, X2):
"""
Computes the posterior covariance between points.
:param X1: some input observations
:param X2: other input observations
"""
return self.posterior.covariance_between_points(self.kern, self.X, X1, X2) |
def addView(self, viewType):
"""
Adds a new view of the inputed view type.
:param viewType | <subclass of XView>
:return <XView> || None
"""
if not viewType:
return None
view = viewType.createInstance(self, self.viewWidget())
self.addTab(view, view.windowTitle())
return view | Adds a new view of the inputed view type.
:param viewType | <subclass of XView>
:return <XView> || None | Below is the the instruction that describes the task:
### Input:
Adds a new view of the inputed view type.
:param viewType | <subclass of XView>
:return <XView> || None
### Response:
def addView(self, viewType):
"""
Adds a new view of the inputed view type.
:param viewType | <subclass of XView>
:return <XView> || None
"""
if not viewType:
return None
view = viewType.createInstance(self, self.viewWidget())
self.addTab(view, view.windowTitle())
return view |
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output | Export all hash prefix values.
Returns a list of known hash prefix values | Below is the the instruction that describes the task:
### Input:
Export all hash prefix values.
Returns a list of known hash prefix values
### Response:
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output |
def is_domain(value, **kwargs):
"""Indicate whether ``value`` is a valid domain.
.. caution::
This validator does not verify that ``value`` **exists** as a domain. It
merely verifies that its contents *might* exist as a domain.
.. note::
This validator checks to validate that ``value`` resembles a valid
domain name. It is - generally - compliant with
`RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and
`RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges
in a number of key ways:
* Including authentication (e.g. ``username:[email protected]``) will
fail validation.
* Including a path (e.g. ``domain.dev/path/to/file``) will fail validation.
* Including a port (e.g. ``domain.dev:8080``) will fail validation.
If you are hoping to validate a more complete URL, we recommend that you
see :func:`url <validator_collection.validators.url>`.
:param value: The value to evaluate.
:param allow_ips: If ``True``, will succeed when validating IP addresses,
If ``False``, will fail if ``value`` is an IP address. Defaults to ``False``.
:type allow_ips: :class:`bool <python:bool>`
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.domain(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | Indicate whether ``value`` is a valid domain.
.. caution::
This validator does not verify that ``value`` **exists** as a domain. It
merely verifies that its contents *might* exist as a domain.
.. note::
This validator checks to validate that ``value`` resembles a valid
domain name. It is - generally - compliant with
`RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and
`RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges
in a number of key ways:
* Including authentication (e.g. ``username:[email protected]``) will
fail validation.
* Including a path (e.g. ``domain.dev/path/to/file``) will fail validation.
* Including a port (e.g. ``domain.dev:8080``) will fail validation.
If you are hoping to validate a more complete URL, we recommend that you
see :func:`url <validator_collection.validators.url>`.
:param value: The value to evaluate.
:param allow_ips: If ``True``, will succeed when validating IP addresses,
If ``False``, will fail if ``value`` is an IP address. Defaults to ``False``.
:type allow_ips: :class:`bool <python:bool>`
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator | Below is the the instruction that describes the task:
### Input:
Indicate whether ``value`` is a valid domain.
.. caution::
This validator does not verify that ``value`` **exists** as a domain. It
merely verifies that its contents *might* exist as a domain.
.. note::
This validator checks to validate that ``value`` resembles a valid
domain name. It is - generally - compliant with
`RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and
`RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges
in a number of key ways:
* Including authentication (e.g. ``username:[email protected]``) will
fail validation.
* Including a path (e.g. ``domain.dev/path/to/file``) will fail validation.
* Including a port (e.g. ``domain.dev:8080``) will fail validation.
If you are hoping to validate a more complete URL, we recommend that you
see :func:`url <validator_collection.validators.url>`.
:param value: The value to evaluate.
:param allow_ips: If ``True``, will succeed when validating IP addresses,
If ``False``, will fail if ``value`` is an IP address. Defaults to ``False``.
:type allow_ips: :class:`bool <python:bool>`
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
### Response:
def is_domain(value, **kwargs):
"""Indicate whether ``value`` is a valid domain.
.. caution::
This validator does not verify that ``value`` **exists** as a domain. It
merely verifies that its contents *might* exist as a domain.
.. note::
This validator checks to validate that ``value`` resembles a valid
domain name. It is - generally - compliant with
`RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and
`RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges
in a number of key ways:
* Including authentication (e.g. ``username:[email protected]``) will
fail validation.
* Including a path (e.g. ``domain.dev/path/to/file``) will fail validation.
* Including a port (e.g. ``domain.dev:8080``) will fail validation.
If you are hoping to validate a more complete URL, we recommend that you
see :func:`url <validator_collection.validators.url>`.
:param value: The value to evaluate.
:param allow_ips: If ``True``, will succeed when validating IP addresses,
If ``False``, will fail if ``value`` is an IP address. Defaults to ``False``.
:type allow_ips: :class:`bool <python:bool>`
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.domain(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True |
def pipe_to_process(self, payload):
"""Send something to stdin of a specific process."""
message = payload['input']
key = payload['key']
if not self.process_handler.is_running(key):
return {'message': 'No running process for this key',
'status': 'error'}
self.process_handler.send_to_process(message, key)
return {'message': 'Message sent',
'status': 'success'} | Send something to stdin of a specific process. | Below is the the instruction that describes the task:
### Input:
Send something to stdin of a specific process.
### Response:
def pipe_to_process(self, payload):
"""Send something to stdin of a specific process."""
message = payload['input']
key = payload['key']
if not self.process_handler.is_running(key):
return {'message': 'No running process for this key',
'status': 'error'}
self.process_handler.send_to_process(message, key)
return {'message': 'Message sent',
'status': 'success'} |
def search(self, q=None, has_geo=False, callback=None, errback=None):
"""
Search within a zone for specific metadata. Zone must already be loaded.
"""
if not self.data:
raise ZoneException('zone not loaded')
return self._rest.search(self.zone, q, has_geo, callback, errback) | Search within a zone for specific metadata. Zone must already be loaded. | Below is the the instruction that describes the task:
### Input:
Search within a zone for specific metadata. Zone must already be loaded.
### Response:
def search(self, q=None, has_geo=False, callback=None, errback=None):
"""
Search within a zone for specific metadata. Zone must already be loaded.
"""
if not self.data:
raise ZoneException('zone not loaded')
return self._rest.search(self.zone, q, has_geo, callback, errback) |
def make_folium_polyline(edge, edge_color, edge_width, edge_opacity, popup_attribute=None):
"""
Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with
attributes.
Parameters
----------
edge : GeoSeries
a row from the gdf_edges GeoDataFrame
edge_color : string
color of the edge lines
edge_width : numeric
width of the edge lines
edge_opacity : numeric
opacity of the edge lines
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked, if None,
no popup
Returns
-------
pl : folium.PolyLine
"""
# check if we were able to import folium successfully
if not folium:
raise ImportError('The folium package must be installed to use this optional feature.')
# locations is a list of points for the polyline
# folium takes coords in lat,lon but geopandas provides them in lon,lat
# so we have to flip them around
locations = list([(lat, lon) for lon, lat in edge['geometry'].coords])
# if popup_attribute is None, then create no pop-up
if popup_attribute is None:
popup = None
else:
# folium doesn't interpret html in the html argument (weird), so can't
# do newlines without an iframe
popup_text = json.dumps(edge[popup_attribute])
popup = folium.Popup(html=popup_text)
# create a folium polyline with attributes
pl = folium.PolyLine(locations=locations, popup=popup,
color=edge_color, weight=edge_width, opacity=edge_opacity)
return pl | Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with
attributes.
Parameters
----------
edge : GeoSeries
a row from the gdf_edges GeoDataFrame
edge_color : string
color of the edge lines
edge_width : numeric
width of the edge lines
edge_opacity : numeric
opacity of the edge lines
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked, if None,
no popup
Returns
-------
pl : folium.PolyLine | Below is the the instruction that describes the task:
### Input:
Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with
attributes.
Parameters
----------
edge : GeoSeries
a row from the gdf_edges GeoDataFrame
edge_color : string
color of the edge lines
edge_width : numeric
width of the edge lines
edge_opacity : numeric
opacity of the edge lines
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked, if None,
no popup
Returns
-------
pl : folium.PolyLine
### Response:
def make_folium_polyline(edge, edge_color, edge_width, edge_opacity, popup_attribute=None):
"""
Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with
attributes.
Parameters
----------
edge : GeoSeries
a row from the gdf_edges GeoDataFrame
edge_color : string
color of the edge lines
edge_width : numeric
width of the edge lines
edge_opacity : numeric
opacity of the edge lines
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked, if None,
no popup
Returns
-------
pl : folium.PolyLine
"""
# check if we were able to import folium successfully
if not folium:
raise ImportError('The folium package must be installed to use this optional feature.')
# locations is a list of points for the polyline
# folium takes coords in lat,lon but geopandas provides them in lon,lat
# so we have to flip them around
locations = list([(lat, lon) for lon, lat in edge['geometry'].coords])
# if popup_attribute is None, then create no pop-up
if popup_attribute is None:
popup = None
else:
# folium doesn't interpret html in the html argument (weird), so can't
# do newlines without an iframe
popup_text = json.dumps(edge[popup_attribute])
popup = folium.Popup(html=popup_text)
# create a folium polyline with attributes
pl = folium.PolyLine(locations=locations, popup=popup,
color=edge_color, weight=edge_width, opacity=edge_opacity)
return pl |
def print_vessel_errors(retdict):
"""
<Purpose>
Prints out any errors that occurred while performing an action on vessels,
in a human readable way.
Errors will be printed out in the following format:
description [reason]
Affected vessels: nodelist
To define a new error, add the following entry to ERROR_RESPONSES in this
function:
'error_identifier': {
'error': 'description for the error',
'reason': 'reason for the error' (optional).
'error_identifier'
This is the substring of the error that can be used to identify it.
Longer identifiers will have a higher priority over shorter identifiers.
For example, authentication errors could be identified using the string
'Insufficient Permissions'.
'error'
This is where you put the description for the error to show to the user.
'reason' (optional)
This is where you put clarification for the error to ease the user.
Additionally, you may put things that they could do to fix the error here,
if applicable. If you don't want to show a reason, don't include this key
in the dictionary.
Examples when you would not put a reason is if you received a timeout,
since the user can't do anything to fix them.
<Arguments>
retdict:
A list of longnames mapped against a tuple (Success?, Message/Errortext).
<Side Effects>
Prints error messages onto the screen. See documentation for ERROR_RESPONSES
for more information.
<Exceptions>
Exception
<Return>
None
"""
ERROR_RESPONSES = {
"Node Manager error 'Insufficient Permissions'": {
'error': "You lack sufficient permissions to perform this action.",
'reason': "Did you release the resource(s) by accident?"},
'timed out': {
'error':'Connection timed out.'},
"file not found": {
'error': "The specified file(s) could not be found.",
'reason': "Please check the filename."},
"Node Manager error 'Programming language platform is not supported.'": {
'error': "Requested platform is not supported by the target vessel."},
}
# A dictionary mapping error identifiers to a list of vessels that share
# that error.
error_longnames = {}
for longname in retdict:
# if the first item is true, then there is no error.
if not retdict[longname][0]:
matches = []
# Loop until we find the response
for error_string in ERROR_RESPONSES:
if error_string.lower() in retdict[longname][1].lower():
# This is the first match
if not matches:
matches = [error_string]
else:
# This is a better match, forget about the previous matches
if len(error_string) > len(matches[0]):
matches = [error_string]
elif len(error_string) == len(matches[0]):
matches.append(error_string)
# If there isn't a match, use the error string as an error identifier
if not matches:
errorid = retdict[longname][1]
else:
# There should not be more than 1 match for any error.
# If there is, log the error to a file.
if len(matches) != 1:
errfile = open('seasherrors.txt', 'a')
errorstring = "Multiple matches with same priority:" + '\n'.join(matches)
errfile.write(errorstring)
errfile.close()
raise Exception(errorstring)
errorid = matches[0]
# Create the longname list if it doesn't already exist
if errorid not in error_longnames:
error_longnames[errorid] = []
error_longnames[errorid].append(longname)
# Print the errors out
for errorid in error_longnames:
# Represent the list of nodes in a human readable way.
nodestring = ''
for node in error_longnames[errorid]:
# This is the first node
if node == error_longnames[errorid][0]:
divider = ''
# This is a node in the middle
elif node != error_longnames[errorid][-1]:
divider = ', '
# This is the last node
else:
# We will always have at least 2 nodes at this point, since if there
# is only one node, it will be treated as the first node. Therefore,
# we only have two cases, where there are exactly 2 nodes, or more than
# 2 nodes.
# If we have two nodes, we want: "node_1 and node_2".
# Otherwise, we want: "node_1, node_2, ..., and node_n".
divider = " and "
if len(error_longnames[errorid]) > 2:
divider = ',' + divider
nodestring += divider + node
if errorid in ERROR_RESPONSES:
print ERROR_RESPONSES[errorid]['error'],
if 'reason' in ERROR_RESPONSES[errorid]:
print ERROR_RESPONSES[errorid]['reason']
else:
# Caret is still on the same line as the list of nodes
print
else:
# Unknown error.
print "An error occurred: " + errorid
print "Affected vessels:", nodestring + '.' | <Purpose>
Prints out any errors that occurred while performing an action on vessels,
in a human readable way.
Errors will be printed out in the following format:
description [reason]
Affected vessels: nodelist
To define a new error, add the following entry to ERROR_RESPONSES in this
function:
'error_identifier': {
'error': 'description for the error',
'reason': 'reason for the error' (optional).
'error_identifier'
This is the substring of the error that can be used to identify it.
Longer identifiers will have a higher priority over shorter identifiers.
For example, authentication errors could be identified using the string
'Insufficient Permissions'.
'error'
This is where you put the description for the error to show to the user.
'reason' (optional)
This is where you put clarification for the error to ease the user.
Additionally, you may put things that they could do to fix the error here,
if applicable. If you don't want to show a reason, don't include this key
in the dictionary.
Examples when you would not put a reason is if you received a timeout,
since the user can't do anything to fix them.
<Arguments>
retdict:
A list of longnames mapped against a tuple (Success?, Message/Errortext).
<Side Effects>
Prints error messages onto the screen. See documentation for ERROR_RESPONSES
for more information.
<Exceptions>
Exception
<Return>
None | Below is the the instruction that describes the task:
### Input:
<Purpose>
Prints out any errors that occurred while performing an action on vessels,
in a human readable way.
Errors will be printed out in the following format:
description [reason]
Affected vessels: nodelist
To define a new error, add the following entry to ERROR_RESPONSES in this
function:
'error_identifier': {
'error': 'description for the error',
'reason': 'reason for the error' (optional).
'error_identifier'
This is the substring of the error that can be used to identify it.
Longer identifiers will have a higher priority over shorter identifiers.
For example, authentication errors could be identified using the string
'Insufficient Permissions'.
'error'
This is where you put the description for the error to show to the user.
'reason' (optional)
This is where you put clarification for the error to ease the user.
Additionally, you may put things that they could do to fix the error here,
if applicable. If you don't want to show a reason, don't include this key
in the dictionary.
Examples when you would not put a reason is if you received a timeout,
since the user can't do anything to fix them.
<Arguments>
retdict:
A list of longnames mapped against a tuple (Success?, Message/Errortext).
<Side Effects>
Prints error messages onto the screen. See documentation for ERROR_RESPONSES
for more information.
<Exceptions>
Exception
<Return>
None
### Response:
def print_vessel_errors(retdict):
"""
<Purpose>
Prints out any errors that occurred while performing an action on vessels,
in a human readable way.
Errors will be printed out in the following format:
description [reason]
Affected vessels: nodelist
To define a new error, add the following entry to ERROR_RESPONSES in this
function:
'error_identifier': {
'error': 'description for the error',
'reason': 'reason for the error' (optional).
'error_identifier'
This is the substring of the error that can be used to identify it.
Longer identifiers will have a higher priority over shorter identifiers.
For example, authentication errors could be identified using the string
'Insufficient Permissions'.
'error'
This is where you put the description for the error to show to the user.
'reason' (optional)
This is where you put clarification for the error to ease the user.
Additionally, you may put things that they could do to fix the error here,
if applicable. If you don't want to show a reason, don't include this key
in the dictionary.
Examples when you would not put a reason is if you received a timeout,
since the user can't do anything to fix them.
<Arguments>
retdict:
A list of longnames mapped against a tuple (Success?, Message/Errortext).
<Side Effects>
Prints error messages onto the screen. See documentation for ERROR_RESPONSES
for more information.
<Exceptions>
Exception
<Return>
None
"""
ERROR_RESPONSES = {
"Node Manager error 'Insufficient Permissions'": {
'error': "You lack sufficient permissions to perform this action.",
'reason': "Did you release the resource(s) by accident?"},
'timed out': {
'error':'Connection timed out.'},
"file not found": {
'error': "The specified file(s) could not be found.",
'reason': "Please check the filename."},
"Node Manager error 'Programming language platform is not supported.'": {
'error': "Requested platform is not supported by the target vessel."},
}
# A dictionary mapping error identifiers to a list of vessels that share
# that error.
error_longnames = {}
for longname in retdict:
# if the first item is true, then there is no error.
if not retdict[longname][0]:
matches = []
# Loop until we find the response
for error_string in ERROR_RESPONSES:
if error_string.lower() in retdict[longname][1].lower():
# This is the first match
if not matches:
matches = [error_string]
else:
# This is a better match, forget about the previous matches
if len(error_string) > len(matches[0]):
matches = [error_string]
elif len(error_string) == len(matches[0]):
matches.append(error_string)
# If there isn't a match, use the error string as an error identifier
if not matches:
errorid = retdict[longname][1]
else:
# There should not be more than 1 match for any error.
# If there is, log the error to a file.
if len(matches) != 1:
errfile = open('seasherrors.txt', 'a')
errorstring = "Multiple matches with same priority:" + '\n'.join(matches)
errfile.write(errorstring)
errfile.close()
raise Exception(errorstring)
errorid = matches[0]
# Create the longname list if it doesn't already exist
if errorid not in error_longnames:
error_longnames[errorid] = []
error_longnames[errorid].append(longname)
# Print the errors out
for errorid in error_longnames:
# Represent the list of nodes in a human readable way.
nodestring = ''
for node in error_longnames[errorid]:
# This is the first node
if node == error_longnames[errorid][0]:
divider = ''
# This is a node in the middle
elif node != error_longnames[errorid][-1]:
divider = ', '
# This is the last node
else:
# We will always have at least 2 nodes at this point, since if there
# is only one node, it will be treated as the first node. Therefore,
# we only have two cases, where there are exactly 2 nodes, or more than
# 2 nodes.
# If we have two nodes, we want: "node_1 and node_2".
# Otherwise, we want: "node_1, node_2, ..., and node_n".
divider = " and "
if len(error_longnames[errorid]) > 2:
divider = ',' + divider
nodestring += divider + node
if errorid in ERROR_RESPONSES:
print ERROR_RESPONSES[errorid]['error'],
if 'reason' in ERROR_RESPONSES[errorid]:
print ERROR_RESPONSES[errorid]['reason']
else:
# Caret is still on the same line as the list of nodes
print
else:
# Unknown error.
print "An error occurred: " + errorid
print "Affected vessels:", nodestring + '.' |
def _get_tau_vector(self, tau_mean, tau_std, imt_list):
"""
Gets the vector of mean and variance of tau values corresponding to
the specific model and returns them as dictionaries
"""
self.magnitude_limits = MAG_LIMS_KEYS[self.tau_model]["mag"]
self.tau_keys = MAG_LIMS_KEYS[self.tau_model]["keys"]
t_bar = {}
t_std = {}
for imt in imt_list:
t_bar[imt] = []
t_std[imt] = []
for mag, key in zip(self.magnitude_limits, self.tau_keys):
t_bar[imt].append(
TAU_EXECUTION[self.tau_model](imt, mag, tau_mean))
t_std[imt].append(
TAU_EXECUTION[self.tau_model](imt, mag, tau_std))
return t_bar, t_std | Gets the vector of mean and variance of tau values corresponding to
the specific model and returns them as dictionaries | Below is the the instruction that describes the task:
### Input:
Gets the vector of mean and variance of tau values corresponding to
the specific model and returns them as dictionaries
### Response:
def _get_tau_vector(self, tau_mean, tau_std, imt_list):
"""
Gets the vector of mean and variance of tau values corresponding to
the specific model and returns them as dictionaries
"""
self.magnitude_limits = MAG_LIMS_KEYS[self.tau_model]["mag"]
self.tau_keys = MAG_LIMS_KEYS[self.tau_model]["keys"]
t_bar = {}
t_std = {}
for imt in imt_list:
t_bar[imt] = []
t_std[imt] = []
for mag, key in zip(self.magnitude_limits, self.tau_keys):
t_bar[imt].append(
TAU_EXECUTION[self.tau_model](imt, mag, tau_mean))
t_std[imt].append(
TAU_EXECUTION[self.tau_model](imt, mag, tau_std))
return t_bar, t_std |
def transform(src, dst, converter,
overwrite=False, stream=True, chunksize=1024**2, **kwargs):
"""
A file stream transform IO utility function.
:param src: original file path
:param dst: destination file path
:param converter: binary content converter function
:param overwrite: default False,
:param stream: default True, if True, use stream IO mode, chunksize has to
be specified.
:param chunksize: default 1MB
"""
if not overwrite: # pragma: no cover
if Path(dst).exists():
raise EnvironmentError("'%s' already exists!" % dst)
with open(src, "rb") as f_input:
with open(dst, "wb") as f_output:
if stream:
# fix chunksize to a reasonable range
if chunksize > 1024 ** 2 * 10:
chunksize = 1024 ** 2 * 10
elif chunksize < 1024 ** 2:
chunksize = 1024 ** 2
# write file
while 1:
content = f_input.read(chunksize)
if content:
f_output.write(converter(content, **kwargs))
else:
break
else: # pragma: no cover
f_output.write(converter(f_input.read(), **kwargs)) | A file stream transform IO utility function.
:param src: original file path
:param dst: destination file path
:param converter: binary content converter function
:param overwrite: default False,
:param stream: default True, if True, use stream IO mode, chunksize has to
be specified.
:param chunksize: default 1MB | Below is the the instruction that describes the task:
### Input:
A file stream transform IO utility function.
:param src: original file path
:param dst: destination file path
:param converter: binary content converter function
:param overwrite: default False,
:param stream: default True, if True, use stream IO mode, chunksize has to
be specified.
:param chunksize: default 1MB
### Response:
def transform(src, dst, converter,
overwrite=False, stream=True, chunksize=1024**2, **kwargs):
"""
A file stream transform IO utility function.
:param src: original file path
:param dst: destination file path
:param converter: binary content converter function
:param overwrite: default False,
:param stream: default True, if True, use stream IO mode, chunksize has to
be specified.
:param chunksize: default 1MB
"""
if not overwrite: # pragma: no cover
if Path(dst).exists():
raise EnvironmentError("'%s' already exists!" % dst)
with open(src, "rb") as f_input:
with open(dst, "wb") as f_output:
if stream:
# fix chunksize to a reasonable range
if chunksize > 1024 ** 2 * 10:
chunksize = 1024 ** 2 * 10
elif chunksize < 1024 ** 2:
chunksize = 1024 ** 2
# write file
while 1:
content = f_input.read(chunksize)
if content:
f_output.write(converter(content, **kwargs))
else:
break
else: # pragma: no cover
f_output.write(converter(f_input.read(), **kwargs)) |
def assign_descriptors(mol):
"""
Throws:
RuntimeError: if minify_ring failed
"""
topology.recognize(mol)
descriptor.assign_valence(mol)
descriptor.assign_rotatable(mol)
topology.minify_ring(mol)
descriptor.assign_aromatic(mol) | Throws:
RuntimeError: if minify_ring failed | Below is the the instruction that describes the task:
### Input:
Throws:
RuntimeError: if minify_ring failed
### Response:
def assign_descriptors(mol):
"""
Throws:
RuntimeError: if minify_ring failed
"""
topology.recognize(mol)
descriptor.assign_valence(mol)
descriptor.assign_rotatable(mol)
topology.minify_ring(mol)
descriptor.assign_aromatic(mol) |
def get_members(self, selector):
"""
Returns the members that satisfy the given selector.
:param selector: (:class:`~hazelcast.core.MemberSelector`), Selector to be applied to the members.
:return: (List), List of members.
"""
members = []
for member in self.get_member_list():
if selector.select(member):
members.append(member)
return members | Returns the members that satisfy the given selector.
:param selector: (:class:`~hazelcast.core.MemberSelector`), Selector to be applied to the members.
:return: (List), List of members. | Below is the the instruction that describes the task:
### Input:
Returns the members that satisfy the given selector.
:param selector: (:class:`~hazelcast.core.MemberSelector`), Selector to be applied to the members.
:return: (List), List of members.
### Response:
def get_members(self, selector):
"""
Returns the members that satisfy the given selector.
:param selector: (:class:`~hazelcast.core.MemberSelector`), Selector to be applied to the members.
:return: (List), List of members.
"""
members = []
for member in self.get_member_list():
if selector.select(member):
members.append(member)
return members |
def get_private_key_from_wif(wif: str) -> bytes:
"""
This interface is used to decode a WIF encode ECDSA private key.
:param wif: a WIF encode private key.
:return: a ECDSA private key in the form of bytes.
"""
if wif is None or wif is "":
raise Exception("none wif")
data = base58.b58decode(wif)
if len(data) != 38 or data[0] != 0x80 or data[33] != 0x01:
raise Exception("wif wrong")
checksum = Digest.hash256(data[0:34])
for i in range(4):
if data[len(data) - 4 + i] != checksum[i]:
raise Exception("wif wrong")
return data[1:33] | This interface is used to decode a WIF encode ECDSA private key.
:param wif: a WIF encode private key.
:return: a ECDSA private key in the form of bytes. | Below is the the instruction that describes the task:
### Input:
This interface is used to decode a WIF encode ECDSA private key.
:param wif: a WIF encode private key.
:return: a ECDSA private key in the form of bytes.
### Response:
def get_private_key_from_wif(wif: str) -> bytes:
"""
This interface is used to decode a WIF encode ECDSA private key.
:param wif: a WIF encode private key.
:return: a ECDSA private key in the form of bytes.
"""
if wif is None or wif is "":
raise Exception("none wif")
data = base58.b58decode(wif)
if len(data) != 38 or data[0] != 0x80 or data[33] != 0x01:
raise Exception("wif wrong")
checksum = Digest.hash256(data[0:34])
for i in range(4):
if data[len(data) - 4 + i] != checksum[i]:
raise Exception("wif wrong")
return data[1:33] |
def filter_to_pass_and_reject(in_file, paired, out_dir=None):
"""Filter VCF to only those with a strict PASS/REJECT: somatic + germline.
Removes low quality calls filtered but also labeled with REJECT.
"""
from bcbio.heterogeneity import bubbletree
out_file = "%s-prfilter.vcf.gz" % utils.splitext_plus(in_file)[0]
if out_dir:
out_file = os.path.join(out_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
max_depth = bubbletree.max_normal_germline_depth(in_file, bubbletree.PARAMS, paired)
tx_out_plain = tx_out_file.replace(".vcf.gz", ".vcf")
with contextlib.closing(cyvcf2.VCF(in_file)) as reader:
reader = _add_db_to_header(reader)
with contextlib.closing(cyvcf2.Writer(tx_out_plain, reader)) as writer:
for rec in reader:
filters = rec.FILTER.split(";") if rec.FILTER else []
other_filters = [x for x in filters if x not in ["PASS", ".", "REJECT"]]
if len(other_filters) == 0 or bubbletree.is_info_germline(rec):
# Germline, check if we should include based on frequencies
if "REJECT" in filters or bubbletree.is_info_germline(rec):
stats = bubbletree._is_possible_loh(rec, reader, bubbletree.PARAMS, paired,
use_status=True, max_normal_depth=max_depth)
if stats:
rec.FILTER = "PASS"
rec.INFO["DB"] = True
writer.write_record(rec)
# Somatic, always include
else:
writer.write_record(rec)
vcfutils.bgzip_and_index(tx_out_plain, paired.tumor_data["config"])
return out_file | Filter VCF to only those with a strict PASS/REJECT: somatic + germline.
Removes low quality calls filtered but also labeled with REJECT. | Below is the the instruction that describes the task:
### Input:
Filter VCF to only those with a strict PASS/REJECT: somatic + germline.
Removes low quality calls filtered but also labeled with REJECT.
### Response:
def filter_to_pass_and_reject(in_file, paired, out_dir=None):
"""Filter VCF to only those with a strict PASS/REJECT: somatic + germline.
Removes low quality calls filtered but also labeled with REJECT.
"""
from bcbio.heterogeneity import bubbletree
out_file = "%s-prfilter.vcf.gz" % utils.splitext_plus(in_file)[0]
if out_dir:
out_file = os.path.join(out_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
max_depth = bubbletree.max_normal_germline_depth(in_file, bubbletree.PARAMS, paired)
tx_out_plain = tx_out_file.replace(".vcf.gz", ".vcf")
with contextlib.closing(cyvcf2.VCF(in_file)) as reader:
reader = _add_db_to_header(reader)
with contextlib.closing(cyvcf2.Writer(tx_out_plain, reader)) as writer:
for rec in reader:
filters = rec.FILTER.split(";") if rec.FILTER else []
other_filters = [x for x in filters if x not in ["PASS", ".", "REJECT"]]
if len(other_filters) == 0 or bubbletree.is_info_germline(rec):
# Germline, check if we should include based on frequencies
if "REJECT" in filters or bubbletree.is_info_germline(rec):
stats = bubbletree._is_possible_loh(rec, reader, bubbletree.PARAMS, paired,
use_status=True, max_normal_depth=max_depth)
if stats:
rec.FILTER = "PASS"
rec.INFO["DB"] = True
writer.write_record(rec)
# Somatic, always include
else:
writer.write_record(rec)
vcfutils.bgzip_and_index(tx_out_plain, paired.tumor_data["config"])
return out_file |
def sitetree_tree(parser, token):
"""Parses sitetree tag parameters.
Two notation types are possible:
1. Two arguments:
{% sitetree_tree from "mytree" %}
Used to render tree for "mytree" site tree.
2. Four arguments:
{% sitetree_tree from "mytree" template "sitetree/mytree.html" %}
Used to render tree for "mytree" site tree using specific
template "sitetree/mytree.html"
"""
tokens = token.split_contents()
use_template = detect_clause(parser, 'template', tokens)
tokens_num = len(tokens)
if tokens_num in (3, 5):
tree_alias = parser.compile_filter(tokens[2])
return sitetree_treeNode(tree_alias, use_template)
else:
raise template.TemplateSyntaxError(
'%r tag requires two arguments. E.g. {%% sitetree_tree from "mytree" %%}.' % tokens[0]) | Parses sitetree tag parameters.
Two notation types are possible:
1. Two arguments:
{% sitetree_tree from "mytree" %}
Used to render tree for "mytree" site tree.
2. Four arguments:
{% sitetree_tree from "mytree" template "sitetree/mytree.html" %}
Used to render tree for "mytree" site tree using specific
template "sitetree/mytree.html" | Below is the the instruction that describes the task:
### Input:
Parses sitetree tag parameters.
Two notation types are possible:
1. Two arguments:
{% sitetree_tree from "mytree" %}
Used to render tree for "mytree" site tree.
2. Four arguments:
{% sitetree_tree from "mytree" template "sitetree/mytree.html" %}
Used to render tree for "mytree" site tree using specific
template "sitetree/mytree.html"
### Response:
def sitetree_tree(parser, token):
"""Parses sitetree tag parameters.
Two notation types are possible:
1. Two arguments:
{% sitetree_tree from "mytree" %}
Used to render tree for "mytree" site tree.
2. Four arguments:
{% sitetree_tree from "mytree" template "sitetree/mytree.html" %}
Used to render tree for "mytree" site tree using specific
template "sitetree/mytree.html"
"""
tokens = token.split_contents()
use_template = detect_clause(parser, 'template', tokens)
tokens_num = len(tokens)
if tokens_num in (3, 5):
tree_alias = parser.compile_filter(tokens[2])
return sitetree_treeNode(tree_alias, use_template)
else:
raise template.TemplateSyntaxError(
'%r tag requires two arguments. E.g. {%% sitetree_tree from "mytree" %%}.' % tokens[0]) |
def compute_tls13_resumption_secret(self):
"""
self.handshake_messages should be ClientHello...ClientFinished.
"""
if self.connection_end == "server":
hkdf = self.prcs.hkdf
elif self.connection_end == "client":
hkdf = self.pwcs.hkdf
rs = hkdf.derive_secret(self.tls13_master_secret,
b"resumption master secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["resumption_secret"] = rs | self.handshake_messages should be ClientHello...ClientFinished. | Below is the the instruction that describes the task:
### Input:
self.handshake_messages should be ClientHello...ClientFinished.
### Response:
def compute_tls13_resumption_secret(self):
"""
self.handshake_messages should be ClientHello...ClientFinished.
"""
if self.connection_end == "server":
hkdf = self.prcs.hkdf
elif self.connection_end == "client":
hkdf = self.pwcs.hkdf
rs = hkdf.derive_secret(self.tls13_master_secret,
b"resumption master secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["resumption_secret"] = rs |
def init_app(self, app):
"""Flask application initialization."""
app.cli.add_command(upgrader_cmd)
app.extensions['invenio-upgrader'] = self | Flask application initialization. | Below is the the instruction that describes the task:
### Input:
Flask application initialization.
### Response:
def init_app(self, app):
"""Flask application initialization."""
app.cli.add_command(upgrader_cmd)
app.extensions['invenio-upgrader'] = self |
def express_route_connections(self):
"""Instance depends on the API version:
* 2018-08-01: :class:`ExpressRouteConnectionsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteConnectionsOperations>`
"""
api_version = self._get_api_version('express_route_connections')
if api_version == '2018-08-01':
from .v2018_08_01.operations import ExpressRouteConnectionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-08-01: :class:`ExpressRouteConnectionsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteConnectionsOperations>` | Below is the the instruction that describes the task:
### Input:
Instance depends on the API version:
* 2018-08-01: :class:`ExpressRouteConnectionsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteConnectionsOperations>`
### Response:
def express_route_connections(self):
"""Instance depends on the API version:
* 2018-08-01: :class:`ExpressRouteConnectionsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteConnectionsOperations>`
"""
api_version = self._get_api_version('express_route_connections')
if api_version == '2018-08-01':
from .v2018_08_01.operations import ExpressRouteConnectionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def upcoming_viewings(self):
"""
Returns an array of upcoming viewings for a property.
:return:
"""
upcoming_viewings = []
try:
if self._data_from_search:
viewings = self._data_from_search.find_all(
'div', {'class': 'smi-onview-text'})
else:
viewings = []
except Exception as e:
if self._debug:
logging.error(
"Error getting upcoming_viewings. Error message: " + e.args[0])
return
for viewing in viewings:
upcoming_viewings.append(viewing.text.strip())
return upcoming_viewings | Returns an array of upcoming viewings for a property.
:return: | Below is the the instruction that describes the task:
### Input:
Returns an array of upcoming viewings for a property.
:return:
### Response:
def upcoming_viewings(self):
"""
Returns an array of upcoming viewings for a property.
:return:
"""
upcoming_viewings = []
try:
if self._data_from_search:
viewings = self._data_from_search.find_all(
'div', {'class': 'smi-onview-text'})
else:
viewings = []
except Exception as e:
if self._debug:
logging.error(
"Error getting upcoming_viewings. Error message: " + e.args[0])
return
for viewing in viewings:
upcoming_viewings.append(viewing.text.strip())
return upcoming_viewings |
def main(ylib: str = None, path: str = None,
scope: ValidationScope = ValidationScope.all,
ctype: ContentType = ContentType.config, set_id: bool = False,
tree: bool = False, no_types: bool = False,
digest: bool = False, validate: str = None) -> int:
"""Entry-point for a validation script.
Args:
ylib: Name of the file with YANG library
path: Colon-separated list of directories to search for YANG modules.
scope: Validation scope (syntax, semantics or all).
ctype: Content type of the data instance (config, nonconfig or all)
set_id: If `True`, print module set id.
tree: If `True`, print schema tree.
no_types: If `True`, don't print types in schema tree.
digest: If `True`, print schema digest.
validate: Name of file to validate against the schema.
Returns:
Numeric return code (0=no error, 2=YANG error, 1=other)
"""
if ylib is None:
parser = argparse.ArgumentParser(
prog="yangson",
description="Validate JSON data against a YANG data model.")
parser.add_argument(
"-V", "--version", action="version",
version=f"%(prog)s {pkg_resources.get_distribution('yangson').version}")
parser.add_argument(
"ylib", metavar="YLIB",
help=("name of the file with description of the data model"
" in JSON-encoded YANG library format [RFC 7895]"))
parser.add_argument(
"-p", "--path",
help=("colon-separated list of directories to search"
" for YANG modules"))
grp = parser.add_mutually_exclusive_group()
grp.add_argument(
"-i", "--id", action="store_true",
help="print module set id")
grp.add_argument(
"-t", "--tree", action="store_true",
help="print schema tree as ASCII art")
grp.add_argument(
"-d", "--digest", action="store_true",
help="print schema digest in JSON format")
grp.add_argument(
"-v", "--validate", metavar="INST",
help="name of the file with JSON-encoded instance data")
parser.add_argument(
"-s", "--scope", choices=["syntax", "semantics", "all"],
default="all", help="validation scope (default: %(default)s)")
parser.add_argument(
"-c", "--ctype", type=str, choices=["config", "nonconfig", "all"],
default="config",
help="content type of the data instance (default: %(default)s)")
parser.add_argument(
"-n", "--no-types", action="store_true",
help="suppress type info in tree output")
args = parser.parse_args()
ylib: str = args.ylib
path: Optional[str] = args.path
scope = ValidationScope[args.scope]
ctype = ContentType[args.ctype]
set_id: bool = args.id
tree: bool = args.tree
no_types = args.no_types
digest: bool = args.digest
validate: str = args.validate
try:
with open(ylib, encoding="utf-8") as infile:
yl = infile.read()
except (FileNotFoundError, PermissionError,
json.decoder.JSONDecodeError) as e:
print("YANG library:", str(e), file=sys.stderr)
return 1
sp = path if path else os.environ.get("YANG_MODPATH", ".")
try:
dm = DataModel(yl, tuple(sp.split(":")))
except BadYangLibraryData as e:
print("Invalid YANG library:", str(e), file=sys.stderr)
return 2
except FeaturePrerequisiteError as e:
print("Unsupported pre-requisite feature:", str(e), file=sys.stderr)
return 2
except MultipleImplementedRevisions as e:
print("Multiple implemented revisions:", str(e), file=sys.stderr)
return 2
except ModuleNotFound as e:
print("Module not found:", str(e), file=sys.stderr)
return 2
except ModuleNotRegistered as e:
print("Module not registered:", str(e), file=sys.stderr)
return 2
if set_id:
print(dm.module_set_id())
return 0
if tree:
print(dm.ascii_tree(no_types))
return 0
if digest:
print(dm.schema_digest())
return 0
if not validate:
return 0
try:
with open(validate, encoding="utf-8") as infile:
itxt = json.load(infile)
except (FileNotFoundError, PermissionError,
json.decoder.JSONDecodeError) as e:
print("Instance data:", str(e), file=sys.stderr)
return 1
try:
i = dm.from_raw(itxt)
except RawMemberError as e:
print("Illegal object member:", str(e), file=sys.stderr)
return 3
except RawTypeError as e:
print("Invalid type:", str(e), file=sys.stderr)
return 3
try:
i.validate(scope, ctype)
except SchemaError as e:
print("Schema error:", str(e), file=sys.stderr)
return 3
except SemanticError as e:
print("Semantic error:", str(e), file=sys.stderr)
return 3
except YangTypeError as e:
print("Invalid type:", str(e), file=sys.stderr)
return 3
return 0 | Entry-point for a validation script.
Args:
ylib: Name of the file with YANG library
path: Colon-separated list of directories to search for YANG modules.
scope: Validation scope (syntax, semantics or all).
ctype: Content type of the data instance (config, nonconfig or all)
set_id: If `True`, print module set id.
tree: If `True`, print schema tree.
no_types: If `True`, don't print types in schema tree.
digest: If `True`, print schema digest.
validate: Name of file to validate against the schema.
Returns:
Numeric return code (0=no error, 2=YANG error, 1=other) | Below is the the instruction that describes the task:
### Input:
Entry-point for a validation script.
Args:
ylib: Name of the file with YANG library
path: Colon-separated list of directories to search for YANG modules.
scope: Validation scope (syntax, semantics or all).
ctype: Content type of the data instance (config, nonconfig or all)
set_id: If `True`, print module set id.
tree: If `True`, print schema tree.
no_types: If `True`, don't print types in schema tree.
digest: If `True`, print schema digest.
validate: Name of file to validate against the schema.
Returns:
Numeric return code (0=no error, 2=YANG error, 1=other)
### Response:
def main(ylib: str = None, path: str = None,
scope: ValidationScope = ValidationScope.all,
ctype: ContentType = ContentType.config, set_id: bool = False,
tree: bool = False, no_types: bool = False,
digest: bool = False, validate: str = None) -> int:
"""Entry-point for a validation script.
Args:
ylib: Name of the file with YANG library
path: Colon-separated list of directories to search for YANG modules.
scope: Validation scope (syntax, semantics or all).
ctype: Content type of the data instance (config, nonconfig or all)
set_id: If `True`, print module set id.
tree: If `True`, print schema tree.
no_types: If `True`, don't print types in schema tree.
digest: If `True`, print schema digest.
validate: Name of file to validate against the schema.
Returns:
Numeric return code (0=no error, 2=YANG error, 1=other)
"""
if ylib is None:
parser = argparse.ArgumentParser(
prog="yangson",
description="Validate JSON data against a YANG data model.")
parser.add_argument(
"-V", "--version", action="version",
version=f"%(prog)s {pkg_resources.get_distribution('yangson').version}")
parser.add_argument(
"ylib", metavar="YLIB",
help=("name of the file with description of the data model"
" in JSON-encoded YANG library format [RFC 7895]"))
parser.add_argument(
"-p", "--path",
help=("colon-separated list of directories to search"
" for YANG modules"))
grp = parser.add_mutually_exclusive_group()
grp.add_argument(
"-i", "--id", action="store_true",
help="print module set id")
grp.add_argument(
"-t", "--tree", action="store_true",
help="print schema tree as ASCII art")
grp.add_argument(
"-d", "--digest", action="store_true",
help="print schema digest in JSON format")
grp.add_argument(
"-v", "--validate", metavar="INST",
help="name of the file with JSON-encoded instance data")
parser.add_argument(
"-s", "--scope", choices=["syntax", "semantics", "all"],
default="all", help="validation scope (default: %(default)s)")
parser.add_argument(
"-c", "--ctype", type=str, choices=["config", "nonconfig", "all"],
default="config",
help="content type of the data instance (default: %(default)s)")
parser.add_argument(
"-n", "--no-types", action="store_true",
help="suppress type info in tree output")
args = parser.parse_args()
ylib: str = args.ylib
path: Optional[str] = args.path
scope = ValidationScope[args.scope]
ctype = ContentType[args.ctype]
set_id: bool = args.id
tree: bool = args.tree
no_types = args.no_types
digest: bool = args.digest
validate: str = args.validate
try:
with open(ylib, encoding="utf-8") as infile:
yl = infile.read()
except (FileNotFoundError, PermissionError,
json.decoder.JSONDecodeError) as e:
print("YANG library:", str(e), file=sys.stderr)
return 1
sp = path if path else os.environ.get("YANG_MODPATH", ".")
try:
dm = DataModel(yl, tuple(sp.split(":")))
except BadYangLibraryData as e:
print("Invalid YANG library:", str(e), file=sys.stderr)
return 2
except FeaturePrerequisiteError as e:
print("Unsupported pre-requisite feature:", str(e), file=sys.stderr)
return 2
except MultipleImplementedRevisions as e:
print("Multiple implemented revisions:", str(e), file=sys.stderr)
return 2
except ModuleNotFound as e:
print("Module not found:", str(e), file=sys.stderr)
return 2
except ModuleNotRegistered as e:
print("Module not registered:", str(e), file=sys.stderr)
return 2
if set_id:
print(dm.module_set_id())
return 0
if tree:
print(dm.ascii_tree(no_types))
return 0
if digest:
print(dm.schema_digest())
return 0
if not validate:
return 0
try:
with open(validate, encoding="utf-8") as infile:
itxt = json.load(infile)
except (FileNotFoundError, PermissionError,
json.decoder.JSONDecodeError) as e:
print("Instance data:", str(e), file=sys.stderr)
return 1
try:
i = dm.from_raw(itxt)
except RawMemberError as e:
print("Illegal object member:", str(e), file=sys.stderr)
return 3
except RawTypeError as e:
print("Invalid type:", str(e), file=sys.stderr)
return 3
try:
i.validate(scope, ctype)
except SchemaError as e:
print("Schema error:", str(e), file=sys.stderr)
return 3
except SemanticError as e:
print("Semantic error:", str(e), file=sys.stderr)
return 3
except YangTypeError as e:
print("Invalid type:", str(e), file=sys.stderr)
return 3
return 0 |
def queryProxy(self, query):
"""Override Qt method."""
# Query is a QNetworkProxyQuery
valid_proxies = []
query_scheme = query.url().scheme()
query_host = query.url().host()
query_scheme_host = '{0}://{1}'.format(query_scheme, query_host)
proxy_servers = process_proxy_servers(self.proxy_servers)
# print(proxy_servers)
if proxy_servers:
for key in proxy_servers:
proxy_settings = proxy_servers[key]
if key == 'http' and query_scheme == 'http':
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
elif key == 'https' and query_scheme == 'https':
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
if key == query_scheme_host:
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
else:
valid_proxies.append(QNetworkProxy(QNetworkProxy.DefaultProxy))
# print('factoy', query.url().toString())
# print(valid_proxies)
# for pr in valid_proxies:
# user = pr.user()
# password = pr.password()
# host = pr.hostName()
# port = pr.port()
# print(query.url(), user, password, host, port)
# print('\n')
return valid_proxies | Override Qt method. | Below is the the instruction that describes the task:
### Input:
Override Qt method.
### Response:
def queryProxy(self, query):
"""Override Qt method."""
# Query is a QNetworkProxyQuery
valid_proxies = []
query_scheme = query.url().scheme()
query_host = query.url().host()
query_scheme_host = '{0}://{1}'.format(query_scheme, query_host)
proxy_servers = process_proxy_servers(self.proxy_servers)
# print(proxy_servers)
if proxy_servers:
for key in proxy_servers:
proxy_settings = proxy_servers[key]
if key == 'http' and query_scheme == 'http':
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
elif key == 'https' and query_scheme == 'https':
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
if key == query_scheme_host:
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
else:
valid_proxies.append(QNetworkProxy(QNetworkProxy.DefaultProxy))
# print('factoy', query.url().toString())
# print(valid_proxies)
# for pr in valid_proxies:
# user = pr.user()
# password = pr.password()
# host = pr.hostName()
# port = pr.port()
# print(query.url(), user, password, host, port)
# print('\n')
return valid_proxies |
def get_channel(self, name):
"""
Details about an individual channel.
:param name: The channel name
:type name: str
"""
return self._api_get('/api/channels/{0}'.format(
urllib.parse.quote_plus(name)
)) | Details about an individual channel.
:param name: The channel name
:type name: str | Below is the the instruction that describes the task:
### Input:
Details about an individual channel.
:param name: The channel name
:type name: str
### Response:
def get_channel(self, name):
"""
Details about an individual channel.
:param name: The channel name
:type name: str
"""
return self._api_get('/api/channels/{0}'.format(
urllib.parse.quote_plus(name)
)) |
def generatorInit(self, U0):
""" Based on GeneratorInit.m from MatDyn by Stijn Cole, developed at
Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/
electa/teaching/matdyn/} for more information.
@rtype: tuple
@return: Initial generator conditions.
"""
j = 0 + 1j
generators = self.dyn_generators
Efd0 = zeros(len(generators))
Xgen0 = zeros((len(generators), 4))
typ1 = [g._i for g in generators if g.model == CLASSICAL]
typ2 = [g._i for g in generators if g.model == FOURTH_ORDER]
# Generator type 1: classical model
x_tr = array([g.x_tr for g in generators])
omega0 = ones(len(typ1)) * 2 * pi * self.freq
# Initial machine armature currents.
Sg = array([g.p + j * g.q for g in generators])
Ia0 = conj(Sg[typ1]) / conj(U0) / self.base_mva
# Initial Steady-state internal EMF.
Eq_tr0 = U0[typ1] + j * x_tr * Ia0
delta0 = angle(Eq_tr0)
Eq_tr0 = abs(Eq_tr0)
Xgen0[typ1, :] = c_[delta0, omega0, Eq_tr0]
# Generator type 2: 4th order model
xd = array([g.xd for g in generators])
xq = array([g.xq for g in generators])
xd_tr = array([g.xd_tr for g in generators])
xq_tr = array([g.xq_tr for g in generators])
omega0 = ones(len(typ2)) * 2 * pi * self.freq
# Initial machine armature currents.
Ia0 = conj(Sg[typ2]) / conj(U0[typ2]) / self.base_mva
phi0 = angle(Ia0)
# Initial Steady-state internal EMF.
Eq0 = U0[typ2] + j * xq * Ia0
delta0 = angle(Eq0)
# Machine currents in dq frame.
Id0 = -abs(Ia0) * sin(delta0 - phi0)
Iq0 = abs(Ia0) * cos(delta0 - phi0)
# Field voltage.
Efd0[typ2] = abs(Eq0) - (xd - xq) * Id0
# Initial Transient internal EMF.
Eq_tr0 = Efd0[typ2] + (xd - xd_tr) * Id0
Ed_tr0 = -(xq - xq_tr) * Iq0
Xgen0[typ2, :] = c_[delta0, omega0, Eq_tr0, Ed_tr0]
# Generator type 3:
# Generator type 4:
return Efd0, Xgen0 | Based on GeneratorInit.m from MatDyn by Stijn Cole, developed at
Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/
electa/teaching/matdyn/} for more information.
@rtype: tuple
@return: Initial generator conditions. | Below is the the instruction that describes the task:
### Input:
Based on GeneratorInit.m from MatDyn by Stijn Cole, developed at
Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/
electa/teaching/matdyn/} for more information.
@rtype: tuple
@return: Initial generator conditions.
### Response:
def generatorInit(self, U0):
""" Based on GeneratorInit.m from MatDyn by Stijn Cole, developed at
Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/
electa/teaching/matdyn/} for more information.
@rtype: tuple
@return: Initial generator conditions.
"""
j = 0 + 1j
generators = self.dyn_generators
Efd0 = zeros(len(generators))
Xgen0 = zeros((len(generators), 4))
typ1 = [g._i for g in generators if g.model == CLASSICAL]
typ2 = [g._i for g in generators if g.model == FOURTH_ORDER]
# Generator type 1: classical model
x_tr = array([g.x_tr for g in generators])
omega0 = ones(len(typ1)) * 2 * pi * self.freq
# Initial machine armature currents.
Sg = array([g.p + j * g.q for g in generators])
Ia0 = conj(Sg[typ1]) / conj(U0) / self.base_mva
# Initial Steady-state internal EMF.
Eq_tr0 = U0[typ1] + j * x_tr * Ia0
delta0 = angle(Eq_tr0)
Eq_tr0 = abs(Eq_tr0)
Xgen0[typ1, :] = c_[delta0, omega0, Eq_tr0]
# Generator type 2: 4th order model
xd = array([g.xd for g in generators])
xq = array([g.xq for g in generators])
xd_tr = array([g.xd_tr for g in generators])
xq_tr = array([g.xq_tr for g in generators])
omega0 = ones(len(typ2)) * 2 * pi * self.freq
# Initial machine armature currents.
Ia0 = conj(Sg[typ2]) / conj(U0[typ2]) / self.base_mva
phi0 = angle(Ia0)
# Initial Steady-state internal EMF.
Eq0 = U0[typ2] + j * xq * Ia0
delta0 = angle(Eq0)
# Machine currents in dq frame.
Id0 = -abs(Ia0) * sin(delta0 - phi0)
Iq0 = abs(Ia0) * cos(delta0 - phi0)
# Field voltage.
Efd0[typ2] = abs(Eq0) - (xd - xq) * Id0
# Initial Transient internal EMF.
Eq_tr0 = Efd0[typ2] + (xd - xd_tr) * Id0
Ed_tr0 = -(xq - xq_tr) * Iq0
Xgen0[typ2, :] = c_[delta0, omega0, Eq_tr0, Ed_tr0]
# Generator type 3:
# Generator type 4:
return Efd0, Xgen0 |
def _aloadstr(ins):
''' Loads a string value from a memory address.
'''
output = _addr(ins.quad[2])
output.append('call __ILOADSTR')
output.append('push hl')
REQUIRES.add('loadstr.asm')
return output | Loads a string value from a memory address. | Below is the the instruction that describes the task:
### Input:
Loads a string value from a memory address.
### Response:
def _aloadstr(ins):
''' Loads a string value from a memory address.
'''
output = _addr(ins.quad[2])
output.append('call __ILOADSTR')
output.append('push hl')
REQUIRES.add('loadstr.asm')
return output |
def extract(self):
"""Extract a common dependency.
Returns:
A (_PackageScope, Requirement) tuple, containing the new scope copy
with the extraction, and the extracted package range. If no package
was extracted, then (self,None) is returned.
"""
if not self.package_request.conflict:
new_slice, package_request = self.variant_slice.extract()
if package_request:
assert(new_slice is not self.variant_slice)
scope = copy.copy(self)
scope.variant_slice = new_slice
if self.pr:
self.pr("extracted %s from %s", package_request, self)
return (scope, package_request)
return (self, None) | Extract a common dependency.
Returns:
A (_PackageScope, Requirement) tuple, containing the new scope copy
with the extraction, and the extracted package range. If no package
was extracted, then (self,None) is returned. | Below is the the instruction that describes the task:
### Input:
Extract a common dependency.
Returns:
A (_PackageScope, Requirement) tuple, containing the new scope copy
with the extraction, and the extracted package range. If no package
was extracted, then (self,None) is returned.
### Response:
def extract(self):
"""Extract a common dependency.
Returns:
A (_PackageScope, Requirement) tuple, containing the new scope copy
with the extraction, and the extracted package range. If no package
was extracted, then (self,None) is returned.
"""
if not self.package_request.conflict:
new_slice, package_request = self.variant_slice.extract()
if package_request:
assert(new_slice is not self.variant_slice)
scope = copy.copy(self)
scope.variant_slice = new_slice
if self.pr:
self.pr("extracted %s from %s", package_request, self)
return (scope, package_request)
return (self, None) |
def dotted(self):
"""Return just the tract number, excluding the state and county, in the dotted format"""
v = str(self.geoid.tract).zfill(6)
return v[0:4] + '.' + v[4:] | Return just the tract number, excluding the state and county, in the dotted format | Below is the the instruction that describes the task:
### Input:
Return just the tract number, excluding the state and county, in the dotted format
### Response:
def dotted(self):
"""Return just the tract number, excluding the state and county, in the dotted format"""
v = str(self.geoid.tract).zfill(6)
return v[0:4] + '.' + v[4:] |
Subsets and Splits