code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def output(self, pin, value):
"""Set the specified pin the provided high/low value. Value should be
either HIGH/LOW or a boolean (true = high)."""
if pin < 0 or pin > 15:
raise ValueError('Pin must be between 0 and 15 (inclusive).')
self._output_pin(pin, value)
self.mpsse_write_gpio() | Set the specified pin the provided high/low value. Value should be
either HIGH/LOW or a boolean (true = high). | Below is the the instruction that describes the task:
### Input:
Set the specified pin the provided high/low value. Value should be
either HIGH/LOW or a boolean (true = high).
### Response:
def output(self, pin, value):
"""Set the specified pin the provided high/low value. Value should be
either HIGH/LOW or a boolean (true = high)."""
if pin < 0 or pin > 15:
raise ValueError('Pin must be between 0 and 15 (inclusive).')
self._output_pin(pin, value)
self.mpsse_write_gpio() |
def group_and_sort_statements(stmt_list, ev_totals=None):
"""Group statements by type and arguments, and sort by prevalence.
Parameters
----------
stmt_list : list[Statement]
A list of INDRA statements.
ev_totals : dict{int: int}
A dictionary, keyed by statement hash (shallow) with counts of total
evidence as the values. Including this will allow statements to be
better sorted.
Returns
-------
sorted_groups : list[tuple]
A list of tuples containing a sort key, the statement type, and a list
of statements, also sorted by evidence count, for that key and type.
The sort key contains a count of statements with those argument, the
arguments (normalized strings), the count of statements with those
arguements and type, and then the statement type.
"""
def _count(stmt):
if ev_totals is None:
return len(stmt.evidence)
else:
return ev_totals[stmt.get_hash()]
stmt_rows = defaultdict(list)
stmt_counts = defaultdict(lambda: 0)
arg_counts = defaultdict(lambda: 0)
for key, s in _get_keyed_stmts(stmt_list):
# Update the counts, and add key if needed.
stmt_rows[key].append(s)
# Keep track of the total evidence counts for this statement and the
# arguments.
stmt_counts[key] += _count(s)
# Add up the counts for the arguments, pairwise for Complexes and
# Conversions. This allows, for example, a complex between MEK, ERK,
# and something else to lend weight to the interactions between MEK
# and ERK.
if key[0] == 'Conversion':
subj = key[1]
for obj in key[2] + key[3]:
arg_counts[(subj, obj)] += _count(s)
else:
arg_counts[key[1:]] += _count(s)
# Sort the rows by count and agent names.
def process_rows(stmt_rows):
for key, stmts in stmt_rows.items():
verb = key[0]
inps = key[1:]
sub_count = stmt_counts[key]
arg_count = arg_counts[inps]
if verb == 'Complex' and sub_count == arg_count and len(inps) <= 2:
if all([len(set(ag.name for ag in s.agent_list())) > 2
for s in stmts]):
continue
new_key = (arg_count, inps, sub_count, verb)
stmts = sorted(stmts,
key=lambda s: _count(s) + 1/(1+len(s.agent_list())),
reverse=True)
yield new_key, verb, stmts
sorted_groups = sorted(process_rows(stmt_rows),
key=lambda tpl: tpl[0], reverse=True)
return sorted_groups | Group statements by type and arguments, and sort by prevalence.
Parameters
----------
stmt_list : list[Statement]
A list of INDRA statements.
ev_totals : dict{int: int}
A dictionary, keyed by statement hash (shallow) with counts of total
evidence as the values. Including this will allow statements to be
better sorted.
Returns
-------
sorted_groups : list[tuple]
A list of tuples containing a sort key, the statement type, and a list
of statements, also sorted by evidence count, for that key and type.
The sort key contains a count of statements with those argument, the
arguments (normalized strings), the count of statements with those
arguements and type, and then the statement type. | Below is the the instruction that describes the task:
### Input:
Group statements by type and arguments, and sort by prevalence.
Parameters
----------
stmt_list : list[Statement]
A list of INDRA statements.
ev_totals : dict{int: int}
A dictionary, keyed by statement hash (shallow) with counts of total
evidence as the values. Including this will allow statements to be
better sorted.
Returns
-------
sorted_groups : list[tuple]
A list of tuples containing a sort key, the statement type, and a list
of statements, also sorted by evidence count, for that key and type.
The sort key contains a count of statements with those argument, the
arguments (normalized strings), the count of statements with those
arguements and type, and then the statement type.
### Response:
def group_and_sort_statements(stmt_list, ev_totals=None):
"""Group statements by type and arguments, and sort by prevalence.
Parameters
----------
stmt_list : list[Statement]
A list of INDRA statements.
ev_totals : dict{int: int}
A dictionary, keyed by statement hash (shallow) with counts of total
evidence as the values. Including this will allow statements to be
better sorted.
Returns
-------
sorted_groups : list[tuple]
A list of tuples containing a sort key, the statement type, and a list
of statements, also sorted by evidence count, for that key and type.
The sort key contains a count of statements with those argument, the
arguments (normalized strings), the count of statements with those
arguements and type, and then the statement type.
"""
def _count(stmt):
if ev_totals is None:
return len(stmt.evidence)
else:
return ev_totals[stmt.get_hash()]
stmt_rows = defaultdict(list)
stmt_counts = defaultdict(lambda: 0)
arg_counts = defaultdict(lambda: 0)
for key, s in _get_keyed_stmts(stmt_list):
# Update the counts, and add key if needed.
stmt_rows[key].append(s)
# Keep track of the total evidence counts for this statement and the
# arguments.
stmt_counts[key] += _count(s)
# Add up the counts for the arguments, pairwise for Complexes and
# Conversions. This allows, for example, a complex between MEK, ERK,
# and something else to lend weight to the interactions between MEK
# and ERK.
if key[0] == 'Conversion':
subj = key[1]
for obj in key[2] + key[3]:
arg_counts[(subj, obj)] += _count(s)
else:
arg_counts[key[1:]] += _count(s)
# Sort the rows by count and agent names.
def process_rows(stmt_rows):
for key, stmts in stmt_rows.items():
verb = key[0]
inps = key[1:]
sub_count = stmt_counts[key]
arg_count = arg_counts[inps]
if verb == 'Complex' and sub_count == arg_count and len(inps) <= 2:
if all([len(set(ag.name for ag in s.agent_list())) > 2
for s in stmts]):
continue
new_key = (arg_count, inps, sub_count, verb)
stmts = sorted(stmts,
key=lambda s: _count(s) + 1/(1+len(s.agent_list())),
reverse=True)
yield new_key, verb, stmts
sorted_groups = sorted(process_rows(stmt_rows),
key=lambda tpl: tpl[0], reverse=True)
return sorted_groups |
def great_circle_distance(self, other):
"""
Return the great-circle distance, in meters, from this geographic
coordinates to the specified other point, i.e., the shortest distance
over the earth’s surface, ‘as-the-crow-flies’ distance between the
points, ignoring any natural elevations of the ground.
Haversine formula::
R = earth’s radius (mean radius = 6,371km)
Δlat = lat2 − lat1
Δlong = long2 − long1
a = sin²(Δlat / 2) + cos(lat1).cos(lat2).sin²(Δlong/2)
c = 2.atan2(√a, √(1−a))
d = R.c
@param other: a ``GeoPoint`` instance.
@return: the great-circle distance, in meters, between this geographic
coordinates to the specified other point.
"""
distance_latitude = math.radians(abs(self.latitude - other.latitude))
distance_longitude = math.radians(abs(self.longitude - other.longitude))
a = math.sin(distance_latitude / 2) * math.sin(distance_latitude / 2) \
+ math.cos(math.radians(self.latitude)) \
* math.cos(math.radians(other.latitude)) \
* math.sin(distance_longitude / 2) \
* math.sin(distance_longitude / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return GeoPoint.EARTH_RADIUS_METERS * c | Return the great-circle distance, in meters, from this geographic
coordinates to the specified other point, i.e., the shortest distance
over the earth’s surface, ‘as-the-crow-flies’ distance between the
points, ignoring any natural elevations of the ground.
Haversine formula::
R = earth’s radius (mean radius = 6,371km)
Δlat = lat2 − lat1
Δlong = long2 − long1
a = sin²(Δlat / 2) + cos(lat1).cos(lat2).sin²(Δlong/2)
c = 2.atan2(√a, √(1−a))
d = R.c
@param other: a ``GeoPoint`` instance.
@return: the great-circle distance, in meters, between this geographic
coordinates to the specified other point. | Below is the the instruction that describes the task:
### Input:
Return the great-circle distance, in meters, from this geographic
coordinates to the specified other point, i.e., the shortest distance
over the earth’s surface, ‘as-the-crow-flies’ distance between the
points, ignoring any natural elevations of the ground.
Haversine formula::
R = earth’s radius (mean radius = 6,371km)
Δlat = lat2 − lat1
Δlong = long2 − long1
a = sin²(Δlat / 2) + cos(lat1).cos(lat2).sin²(Δlong/2)
c = 2.atan2(√a, √(1−a))
d = R.c
@param other: a ``GeoPoint`` instance.
@return: the great-circle distance, in meters, between this geographic
coordinates to the specified other point.
### Response:
def great_circle_distance(self, other):
"""
Return the great-circle distance, in meters, from this geographic
coordinates to the specified other point, i.e., the shortest distance
over the earth’s surface, ‘as-the-crow-flies’ distance between the
points, ignoring any natural elevations of the ground.
Haversine formula::
R = earth’s radius (mean radius = 6,371km)
Δlat = lat2 − lat1
Δlong = long2 − long1
a = sin²(Δlat / 2) + cos(lat1).cos(lat2).sin²(Δlong/2)
c = 2.atan2(√a, √(1−a))
d = R.c
@param other: a ``GeoPoint`` instance.
@return: the great-circle distance, in meters, between this geographic
coordinates to the specified other point.
"""
distance_latitude = math.radians(abs(self.latitude - other.latitude))
distance_longitude = math.radians(abs(self.longitude - other.longitude))
a = math.sin(distance_latitude / 2) * math.sin(distance_latitude / 2) \
+ math.cos(math.radians(self.latitude)) \
* math.cos(math.radians(other.latitude)) \
* math.sin(distance_longitude / 2) \
* math.sin(distance_longitude / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return GeoPoint.EARTH_RADIUS_METERS * c |
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params.vocab_size),
"approx_bleu_score": _convert_to_eval_metric(bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(rouge_l_fscore)(logits, labels),
}
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics | Return dictionary of model evaluation metrics. | Below is the the instruction that describes the task:
### Input:
Return dictionary of model evaluation metrics.
### Response:
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params.vocab_size),
"approx_bleu_score": _convert_to_eval_metric(bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(rouge_l_fscore)(logits, labels),
}
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics |
def enbw(data):
r"""Computes the equivalent noise bandwidth
.. math:: ENBW = N \frac{\sum_{n=1}^{N} w_n^2}{\left(\sum_{n=1}^{N} w_n \right)^2}
.. doctest::
>>> from spectrum import create_window, enbw
>>> w = create_window(64, 'rectangular')
>>> enbw(w)
1.0
The following table contains the ENBW values for some of the
implemented windows in this module (with N=16384). They have been
double checked against litterature (Source: [Harris]_, [Marple]_).
If not present, it means that it has not been checked.
=================== ============ =============
name ENBW litterature
=================== ============ =============
rectangular 1. 1.
triangle 1.3334 1.33
Hann 1.5001 1.5
Hamming 1.3629 1.36
blackman 1.7268 1.73
kaiser 1.7
blackmanharris,4 2.004 2.
riesz 1.2000 1.2
riemann 1.32 1.3
parzen 1.917 1.92
tukey 0.25 1.102 1.1
bohman 1.7858 1.79
poisson 2 1.3130 1.3
hanningpoisson 0.5 1.609 1.61
cauchy 1.489 1.48
lanczos 1.3
=================== ============ =============
"""
N = len(data)
return N * np.sum(data**2) / np.sum(data)**2 | r"""Computes the equivalent noise bandwidth
.. math:: ENBW = N \frac{\sum_{n=1}^{N} w_n^2}{\left(\sum_{n=1}^{N} w_n \right)^2}
.. doctest::
>>> from spectrum import create_window, enbw
>>> w = create_window(64, 'rectangular')
>>> enbw(w)
1.0
The following table contains the ENBW values for some of the
implemented windows in this module (with N=16384). They have been
double checked against litterature (Source: [Harris]_, [Marple]_).
If not present, it means that it has not been checked.
=================== ============ =============
name ENBW litterature
=================== ============ =============
rectangular 1. 1.
triangle 1.3334 1.33
Hann 1.5001 1.5
Hamming 1.3629 1.36
blackman 1.7268 1.73
kaiser 1.7
blackmanharris,4 2.004 2.
riesz 1.2000 1.2
riemann 1.32 1.3
parzen 1.917 1.92
tukey 0.25 1.102 1.1
bohman 1.7858 1.79
poisson 2 1.3130 1.3
hanningpoisson 0.5 1.609 1.61
cauchy 1.489 1.48
lanczos 1.3
=================== ============ ============= | Below is the the instruction that describes the task:
### Input:
r"""Computes the equivalent noise bandwidth
.. math:: ENBW = N \frac{\sum_{n=1}^{N} w_n^2}{\left(\sum_{n=1}^{N} w_n \right)^2}
.. doctest::
>>> from spectrum import create_window, enbw
>>> w = create_window(64, 'rectangular')
>>> enbw(w)
1.0
The following table contains the ENBW values for some of the
implemented windows in this module (with N=16384). They have been
double checked against litterature (Source: [Harris]_, [Marple]_).
If not present, it means that it has not been checked.
=================== ============ =============
name ENBW litterature
=================== ============ =============
rectangular 1. 1.
triangle 1.3334 1.33
Hann 1.5001 1.5
Hamming 1.3629 1.36
blackman 1.7268 1.73
kaiser 1.7
blackmanharris,4 2.004 2.
riesz 1.2000 1.2
riemann 1.32 1.3
parzen 1.917 1.92
tukey 0.25 1.102 1.1
bohman 1.7858 1.79
poisson 2 1.3130 1.3
hanningpoisson 0.5 1.609 1.61
cauchy 1.489 1.48
lanczos 1.3
=================== ============ =============
### Response:
def enbw(data):
r"""Computes the equivalent noise bandwidth
.. math:: ENBW = N \frac{\sum_{n=1}^{N} w_n^2}{\left(\sum_{n=1}^{N} w_n \right)^2}
.. doctest::
>>> from spectrum import create_window, enbw
>>> w = create_window(64, 'rectangular')
>>> enbw(w)
1.0
The following table contains the ENBW values for some of the
implemented windows in this module (with N=16384). They have been
double checked against litterature (Source: [Harris]_, [Marple]_).
If not present, it means that it has not been checked.
=================== ============ =============
name ENBW litterature
=================== ============ =============
rectangular 1. 1.
triangle 1.3334 1.33
Hann 1.5001 1.5
Hamming 1.3629 1.36
blackman 1.7268 1.73
kaiser 1.7
blackmanharris,4 2.004 2.
riesz 1.2000 1.2
riemann 1.32 1.3
parzen 1.917 1.92
tukey 0.25 1.102 1.1
bohman 1.7858 1.79
poisson 2 1.3130 1.3
hanningpoisson 0.5 1.609 1.61
cauchy 1.489 1.48
lanczos 1.3
=================== ============ =============
"""
N = len(data)
return N * np.sum(data**2) / np.sum(data)**2 |
def spherical_histogram(data=None, radial_bins="numpy", theta_bins=16, phi_bins=16, transformed=False, *args, **kwargs):
"""Facade construction function for the SphericalHistogram.
"""
dropna = kwargs.pop("dropna", True)
data = _prepare_data(data, transformed=transformed, klass=SphericalHistogram, dropna=dropna)
if isinstance(theta_bins, int):
theta_range = (0, np.pi)
if "theta_range" in "kwargs":
theta_range = kwargs["theta_range"]
elif "range" in "kwargs":
theta_range = kwargs["range"][1]
theta_range = list(theta_range) + [theta_bins + 1]
theta_bins = np.linspace(*theta_range)
if isinstance(phi_bins, int):
phi_range = (0, 2 * np.pi)
if "phi_range" in "kwargs":
phi_range = kwargs["phi_range"]
elif "range" in "kwargs":
phi_range = kwargs["range"][2]
phi_range = list(phi_range) + [phi_bins + 1]
phi_bins = np.linspace(*phi_range)
bin_schemas = binnings.calculate_bins_nd(data, [radial_bins, theta_bins, phi_bins], *args,
check_nan=not dropna, **kwargs)
weights = kwargs.pop("weights", None)
frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=3,
binnings=bin_schemas,
weights=weights)
return SphericalHistogram(binnings=bin_schemas, frequencies=frequencies, errors2=errors2, missed=missed) | Facade construction function for the SphericalHistogram. | Below is the the instruction that describes the task:
### Input:
Facade construction function for the SphericalHistogram.
### Response:
def spherical_histogram(data=None, radial_bins="numpy", theta_bins=16, phi_bins=16, transformed=False, *args, **kwargs):
"""Facade construction function for the SphericalHistogram.
"""
dropna = kwargs.pop("dropna", True)
data = _prepare_data(data, transformed=transformed, klass=SphericalHistogram, dropna=dropna)
if isinstance(theta_bins, int):
theta_range = (0, np.pi)
if "theta_range" in "kwargs":
theta_range = kwargs["theta_range"]
elif "range" in "kwargs":
theta_range = kwargs["range"][1]
theta_range = list(theta_range) + [theta_bins + 1]
theta_bins = np.linspace(*theta_range)
if isinstance(phi_bins, int):
phi_range = (0, 2 * np.pi)
if "phi_range" in "kwargs":
phi_range = kwargs["phi_range"]
elif "range" in "kwargs":
phi_range = kwargs["range"][2]
phi_range = list(phi_range) + [phi_bins + 1]
phi_bins = np.linspace(*phi_range)
bin_schemas = binnings.calculate_bins_nd(data, [radial_bins, theta_bins, phi_bins], *args,
check_nan=not dropna, **kwargs)
weights = kwargs.pop("weights", None)
frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=3,
binnings=bin_schemas,
weights=weights)
return SphericalHistogram(binnings=bin_schemas, frequencies=frequencies, errors2=errors2, missed=missed) |
def _convert_operator(self, node_name, op_name, attrs, inputs):
"""Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
:param node_name : str
name of the node to be translated.
:param op_name : str
Operator name, such as Convolution, FullyConnected
:param attrs : dict
Dict of operator attributes
:param inputs: list
list of inputs to the operator
Returns
-------
:return mxnet_sym
Converted mxnet symbol
"""
if op_name in convert_map:
op_name, new_attrs, inputs = convert_map[op_name](attrs, inputs, self)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
if isinstance(op_name, string_types):
new_op = getattr(symbol, op_name, None)
if not new_op:
raise RuntimeError("Unable to map op_name {} to sym".format(op_name))
if node_name is None:
mxnet_sym = new_op(*inputs, **new_attrs)
else:
mxnet_sym = new_op(name=node_name, *inputs, **new_attrs)
return mxnet_sym
return op_name | Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
:param node_name : str
name of the node to be translated.
:param op_name : str
Operator name, such as Convolution, FullyConnected
:param attrs : dict
Dict of operator attributes
:param inputs: list
list of inputs to the operator
Returns
-------
:return mxnet_sym
Converted mxnet symbol | Below is the the instruction that describes the task:
### Input:
Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
:param node_name : str
name of the node to be translated.
:param op_name : str
Operator name, such as Convolution, FullyConnected
:param attrs : dict
Dict of operator attributes
:param inputs: list
list of inputs to the operator
Returns
-------
:return mxnet_sym
Converted mxnet symbol
### Response:
def _convert_operator(self, node_name, op_name, attrs, inputs):
"""Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
:param node_name : str
name of the node to be translated.
:param op_name : str
Operator name, such as Convolution, FullyConnected
:param attrs : dict
Dict of operator attributes
:param inputs: list
list of inputs to the operator
Returns
-------
:return mxnet_sym
Converted mxnet symbol
"""
if op_name in convert_map:
op_name, new_attrs, inputs = convert_map[op_name](attrs, inputs, self)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
if isinstance(op_name, string_types):
new_op = getattr(symbol, op_name, None)
if not new_op:
raise RuntimeError("Unable to map op_name {} to sym".format(op_name))
if node_name is None:
mxnet_sym = new_op(*inputs, **new_attrs)
else:
mxnet_sym = new_op(name=node_name, *inputs, **new_attrs)
return mxnet_sym
return op_name |
def default_software_reset_type(self, reset_type):
"""! @brief Modify the default software reset method.
@param self
@param reset_type Must be one of the software reset types: Target.ResetType.SW_SYSRESETREQ,
Target.ResetType.SW_VECTRESET, or Target.ResetType.SW_EMULATED.
"""
assert isinstance(reset_type, Target.ResetType)
assert reset_type in (Target.ResetType.SW_SYSRESETREQ, Target.ResetType.SW_VECTRESET,
Target.ResetType.SW_EMULATED)
self._default_software_reset_type = reset_type | ! @brief Modify the default software reset method.
@param self
@param reset_type Must be one of the software reset types: Target.ResetType.SW_SYSRESETREQ,
Target.ResetType.SW_VECTRESET, or Target.ResetType.SW_EMULATED. | Below is the the instruction that describes the task:
### Input:
! @brief Modify the default software reset method.
@param self
@param reset_type Must be one of the software reset types: Target.ResetType.SW_SYSRESETREQ,
Target.ResetType.SW_VECTRESET, or Target.ResetType.SW_EMULATED.
### Response:
def default_software_reset_type(self, reset_type):
"""! @brief Modify the default software reset method.
@param self
@param reset_type Must be one of the software reset types: Target.ResetType.SW_SYSRESETREQ,
Target.ResetType.SW_VECTRESET, or Target.ResetType.SW_EMULATED.
"""
assert isinstance(reset_type, Target.ResetType)
assert reset_type in (Target.ResetType.SW_SYSRESETREQ, Target.ResetType.SW_VECTRESET,
Target.ResetType.SW_EMULATED)
self._default_software_reset_type = reset_type |
def get_choices_for(self, field):
"""
Get the choices for the given fields.
Args:
field (str): Name of field.
Returns:
List of tuples. [(name, value),...]
"""
choices = self._fields[field].choices
if isinstance(choices, six.string_types):
return [(d['value'], d['name']) for d in self._choices_manager.get_all(choices)]
else:
return choices | Get the choices for the given fields.
Args:
field (str): Name of field.
Returns:
List of tuples. [(name, value),...] | Below is the the instruction that describes the task:
### Input:
Get the choices for the given fields.
Args:
field (str): Name of field.
Returns:
List of tuples. [(name, value),...]
### Response:
def get_choices_for(self, field):
"""
Get the choices for the given fields.
Args:
field (str): Name of field.
Returns:
List of tuples. [(name, value),...]
"""
choices = self._fields[field].choices
if isinstance(choices, six.string_types):
return [(d['value'], d['name']) for d in self._choices_manager.get_all(choices)]
else:
return choices |
def set_time(self, vfy_time):
"""
Set the time against which the certificates are verified.
Normally the current time is used.
.. note::
For example, you can determine if a certificate was valid at a given
time.
.. versionadded:: 17.0.0
:param datetime vfy_time: The verification time to set on this store.
:return: ``None`` if the verification time was successfully set.
"""
param = _lib.X509_VERIFY_PARAM_new()
param = _ffi.gc(param, _lib.X509_VERIFY_PARAM_free)
_lib.X509_VERIFY_PARAM_set_time(param, int(vfy_time.strftime('%s')))
_openssl_assert(_lib.X509_STORE_set1_param(self._store, param) != 0) | Set the time against which the certificates are verified.
Normally the current time is used.
.. note::
For example, you can determine if a certificate was valid at a given
time.
.. versionadded:: 17.0.0
:param datetime vfy_time: The verification time to set on this store.
:return: ``None`` if the verification time was successfully set. | Below is the the instruction that describes the task:
### Input:
Set the time against which the certificates are verified.
Normally the current time is used.
.. note::
For example, you can determine if a certificate was valid at a given
time.
.. versionadded:: 17.0.0
:param datetime vfy_time: The verification time to set on this store.
:return: ``None`` if the verification time was successfully set.
### Response:
def set_time(self, vfy_time):
"""
Set the time against which the certificates are verified.
Normally the current time is used.
.. note::
For example, you can determine if a certificate was valid at a given
time.
.. versionadded:: 17.0.0
:param datetime vfy_time: The verification time to set on this store.
:return: ``None`` if the verification time was successfully set.
"""
param = _lib.X509_VERIFY_PARAM_new()
param = _ffi.gc(param, _lib.X509_VERIFY_PARAM_free)
_lib.X509_VERIFY_PARAM_set_time(param, int(vfy_time.strftime('%s')))
_openssl_assert(_lib.X509_STORE_set1_param(self._store, param) != 0) |
def default_username_algo(email):
"""Generate username for the Django user.
:arg str/unicode email: the email address to use to generate a username
:returns: str/unicode
"""
# bluntly stolen from django-browserid
# store the username as a base64 encoded sha224 of the email address
# this protects against data leakage because usernames are often
# treated as public identifiers (so we can't use the email address).
username = base64.urlsafe_b64encode(
hashlib.sha1(force_bytes(email)).digest()
).rstrip(b'=')
return smart_text(username) | Generate username for the Django user.
:arg str/unicode email: the email address to use to generate a username
:returns: str/unicode | Below is the the instruction that describes the task:
### Input:
Generate username for the Django user.
:arg str/unicode email: the email address to use to generate a username
:returns: str/unicode
### Response:
def default_username_algo(email):
"""Generate username for the Django user.
:arg str/unicode email: the email address to use to generate a username
:returns: str/unicode
"""
# bluntly stolen from django-browserid
# store the username as a base64 encoded sha224 of the email address
# this protects against data leakage because usernames are often
# treated as public identifiers (so we can't use the email address).
username = base64.urlsafe_b64encode(
hashlib.sha1(force_bytes(email)).digest()
).rstrip(b'=')
return smart_text(username) |
def push_new_themes(catalog, portal_url, apikey):
"""Toma un catálogo y escribe los temas de la taxonomía que no están
presentes.
Args:
catalog (DataJson): El catálogo de origen que contiene la
taxonomía.
portal_url (str): La URL del portal CKAN de destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar los temas.
Returns:
str: Los ids de los temas creados.
"""
ckan_portal = RemoteCKAN(portal_url, apikey=apikey)
existing_themes = ckan_portal.call_action('group_list')
new_themes = [theme['id'] for theme in catalog[
'themeTaxonomy'] if theme['id'] not in existing_themes]
pushed_names = []
for new_theme in new_themes:
name = push_theme_to_ckan(
catalog, portal_url, apikey, identifier=new_theme)
pushed_names.append(name)
return pushed_names | Toma un catálogo y escribe los temas de la taxonomía que no están
presentes.
Args:
catalog (DataJson): El catálogo de origen que contiene la
taxonomía.
portal_url (str): La URL del portal CKAN de destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar los temas.
Returns:
str: Los ids de los temas creados. | Below is the the instruction that describes the task:
### Input:
Toma un catálogo y escribe los temas de la taxonomía que no están
presentes.
Args:
catalog (DataJson): El catálogo de origen que contiene la
taxonomía.
portal_url (str): La URL del portal CKAN de destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar los temas.
Returns:
str: Los ids de los temas creados.
### Response:
def push_new_themes(catalog, portal_url, apikey):
"""Toma un catálogo y escribe los temas de la taxonomía que no están
presentes.
Args:
catalog (DataJson): El catálogo de origen que contiene la
taxonomía.
portal_url (str): La URL del portal CKAN de destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar los temas.
Returns:
str: Los ids de los temas creados.
"""
ckan_portal = RemoteCKAN(portal_url, apikey=apikey)
existing_themes = ckan_portal.call_action('group_list')
new_themes = [theme['id'] for theme in catalog[
'themeTaxonomy'] if theme['id'] not in existing_themes]
pushed_names = []
for new_theme in new_themes:
name = push_theme_to_ckan(
catalog, portal_url, apikey, identifier=new_theme)
pushed_names.append(name)
return pushed_names |
def vecs_to_datmesh(x, y):
"""
Converts input arguments x and y to a 2d meshgrid,
suitable for calling Means, Covariances and Realizations.
"""
x, y = meshgrid(x, y)
out = zeros(x.shape + (2,), dtype=float)
out[:, :, 0] = x
out[:, :, 1] = y
return out | Converts input arguments x and y to a 2d meshgrid,
suitable for calling Means, Covariances and Realizations. | Below is the the instruction that describes the task:
### Input:
Converts input arguments x and y to a 2d meshgrid,
suitable for calling Means, Covariances and Realizations.
### Response:
def vecs_to_datmesh(x, y):
"""
Converts input arguments x and y to a 2d meshgrid,
suitable for calling Means, Covariances and Realizations.
"""
x, y = meshgrid(x, y)
out = zeros(x.shape + (2,), dtype=float)
out[:, :, 0] = x
out[:, :, 1] = y
return out |
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.items()
if not k.startswith('oauth_')]) | Get any non-OAuth parameters. | Below is the the instruction that describes the task:
### Input:
Get any non-OAuth parameters.
### Response:
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.items()
if not k.startswith('oauth_')]) |
def _row_to_str(self, row):
# type: (List[str]) -> str
"""Converts a list of strings to a correctly spaced and formatted
row string.
e.g.
['some', 'foo', 'bar'] --> '| some | foo | bar |'
:param row: list
:return: str
"""
_row_text = ''
for col, width in self.col_widths.items():
_row_text += self.COLUMN_SEP
l_pad, r_pad = self._split_int(width - len(row[col]))
_row_text += '{0}{1}{2}'.format(' ' * (l_pad + self.PADDING),
row[col],
' ' * (r_pad + self.PADDING))
_row_text += self.COLUMN_SEP + '\n'
return _row_text | Converts a list of strings to a correctly spaced and formatted
row string.
e.g.
['some', 'foo', 'bar'] --> '| some | foo | bar |'
:param row: list
:return: str | Below is the the instruction that describes the task:
### Input:
Converts a list of strings to a correctly spaced and formatted
row string.
e.g.
['some', 'foo', 'bar'] --> '| some | foo | bar |'
:param row: list
:return: str
### Response:
def _row_to_str(self, row):
# type: (List[str]) -> str
"""Converts a list of strings to a correctly spaced and formatted
row string.
e.g.
['some', 'foo', 'bar'] --> '| some | foo | bar |'
:param row: list
:return: str
"""
_row_text = ''
for col, width in self.col_widths.items():
_row_text += self.COLUMN_SEP
l_pad, r_pad = self._split_int(width - len(row[col]))
_row_text += '{0}{1}{2}'.format(' ' * (l_pad + self.PADDING),
row[col],
' ' * (r_pad + self.PADDING))
_row_text += self.COLUMN_SEP + '\n'
return _row_text |
def qos(self, prefetch_size=0, prefetch_count=0, is_global=False):
'''
Set QoS on this channel.
'''
args = Writer()
args.write_long(prefetch_size).\
write_short(prefetch_count).\
write_bit(is_global)
self.send_frame(MethodFrame(self.channel_id, 60, 10, args))
self.channel.add_synchronous_cb(self._recv_qos_ok) | Set QoS on this channel. | Below is the the instruction that describes the task:
### Input:
Set QoS on this channel.
### Response:
def qos(self, prefetch_size=0, prefetch_count=0, is_global=False):
'''
Set QoS on this channel.
'''
args = Writer()
args.write_long(prefetch_size).\
write_short(prefetch_count).\
write_bit(is_global)
self.send_frame(MethodFrame(self.channel_id, 60, 10, args))
self.channel.add_synchronous_cb(self._recv_qos_ok) |
def kw_changelist_view(self, request: HttpRequest, extra_context=None, **kw):
"""
Changelist view which allow key-value arguments.
:param request: HttpRequest
:param extra_context: Extra context dict
:param kw: Key-value dict
:return: See changelist_view()
"""
return self.changelist_view(request, extra_context) | Changelist view which allow key-value arguments.
:param request: HttpRequest
:param extra_context: Extra context dict
:param kw: Key-value dict
:return: See changelist_view() | Below is the the instruction that describes the task:
### Input:
Changelist view which allow key-value arguments.
:param request: HttpRequest
:param extra_context: Extra context dict
:param kw: Key-value dict
:return: See changelist_view()
### Response:
def kw_changelist_view(self, request: HttpRequest, extra_context=None, **kw):
"""
Changelist view which allow key-value arguments.
:param request: HttpRequest
:param extra_context: Extra context dict
:param kw: Key-value dict
:return: See changelist_view()
"""
return self.changelist_view(request, extra_context) |
def setCurrentProfile(self, prof):
"""
Sets the current profile for this toolbar to the inputed profile.
:param prof | <projexui.widgets.xviewwidget.XViewProfile> || <str>
"""
if prof is None:
self.clearActive()
return
# loop through the profiles looking for a match
profile = None
blocked = self.signalsBlocked()
self.blockSignals(True)
for act in self._profileGroup.actions():
if prof in (act.profile(), act.profile().name()):
act.setChecked(True)
profile = act.profile()
else:
act.setChecked(False)
self.blockSignals(blocked)
# update the current profile
if profile == self._currentProfile and not self._viewWidget.isEmpty():
return
self._currentProfile = profile
if self._viewWidget and profile and not blocked:
self._viewWidget.restoreProfile(profile)
if not blocked:
self.loadProfileFinished.emit(profile)
self.currentProfileChanged.emit(profile) | Sets the current profile for this toolbar to the inputed profile.
:param prof | <projexui.widgets.xviewwidget.XViewProfile> || <str> | Below is the the instruction that describes the task:
### Input:
Sets the current profile for this toolbar to the inputed profile.
:param prof | <projexui.widgets.xviewwidget.XViewProfile> || <str>
### Response:
def setCurrentProfile(self, prof):
"""
Sets the current profile for this toolbar to the inputed profile.
:param prof | <projexui.widgets.xviewwidget.XViewProfile> || <str>
"""
if prof is None:
self.clearActive()
return
# loop through the profiles looking for a match
profile = None
blocked = self.signalsBlocked()
self.blockSignals(True)
for act in self._profileGroup.actions():
if prof in (act.profile(), act.profile().name()):
act.setChecked(True)
profile = act.profile()
else:
act.setChecked(False)
self.blockSignals(blocked)
# update the current profile
if profile == self._currentProfile and not self._viewWidget.isEmpty():
return
self._currentProfile = profile
if self._viewWidget and profile and not blocked:
self._viewWidget.restoreProfile(profile)
if not blocked:
self.loadProfileFinished.emit(profile)
self.currentProfileChanged.emit(profile) |
def clear_annotation_data(self):
"""Clear annotation data.
Parameters
----------
Returns
-------
None
"""
self.genes = set()
self.annotations = []
self.term_annotations = {}
self.gene_annotations = {} | Clear annotation data.
Parameters
----------
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Clear annotation data.
Parameters
----------
Returns
-------
None
### Response:
def clear_annotation_data(self):
"""Clear annotation data.
Parameters
----------
Returns
-------
None
"""
self.genes = set()
self.annotations = []
self.term_annotations = {}
self.gene_annotations = {} |
def fast_maxwell_boltzmann(mass, file_name=None,
return_code=False):
r"""Return a function that returns values of a Maxwell-Boltzmann
distribution.
>>> from fast import Atom
>>> mass = Atom("Rb", 87).mass
>>> f = fast_maxwell_boltzmann(mass)
>>> print f(0, 273.15+20)
0.00238221482739
>>> import numpy as np
>>> v = np.linspace(-600, 600, 101)
>>> dist = f(v, 273.15+20)
>>> dv = v[1]-v[0]
>>> print sum(dist)*dv
0.999704711134
"""
# We get the mass of the atom.
code = ""
code = "def maxwell_boltzmann(v, T):\n"
code += ' r"""A fast calculation of the'
code += ' Maxwell-Boltzmann distribution."""\n'
code += " if hasattr(v, 'shape'):\n"
code += " d = 1\n"
code += " m = %s\n" % mass
code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n"
code += " f = f * np.exp(-m*v**2/2/k_B_num/T)\n"
code += " return f\n"
code += " elif hasattr(v, '__len__'):\n"
code += " d = len(v)\n"
code += " m = %s\n" % mass
code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n"
code += " vsquare = sum([v[i]**2 for i in range(d)])\n"
code += " f = f * np.exp(-m*vsquare/2/k_B_num/T)\n"
code += " return f\n"
code += " else:\n"
code += " d = 1\n"
code += " m = %s\n" % mass
code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n"
code += " f = f * np.exp(-m*v**2/2/k_B_num/T)\n"
code += " return f\n"
# We write the code to file if provided, and execute it.
if file_name is not None:
f = file(file_name+".py", "w")
f.write(code)
f.close()
maxwell_boltzmann = code
if not return_code:
exec maxwell_boltzmann
return maxwell_boltzmann | r"""Return a function that returns values of a Maxwell-Boltzmann
distribution.
>>> from fast import Atom
>>> mass = Atom("Rb", 87).mass
>>> f = fast_maxwell_boltzmann(mass)
>>> print f(0, 273.15+20)
0.00238221482739
>>> import numpy as np
>>> v = np.linspace(-600, 600, 101)
>>> dist = f(v, 273.15+20)
>>> dv = v[1]-v[0]
>>> print sum(dist)*dv
0.999704711134 | Below is the the instruction that describes the task:
### Input:
r"""Return a function that returns values of a Maxwell-Boltzmann
distribution.
>>> from fast import Atom
>>> mass = Atom("Rb", 87).mass
>>> f = fast_maxwell_boltzmann(mass)
>>> print f(0, 273.15+20)
0.00238221482739
>>> import numpy as np
>>> v = np.linspace(-600, 600, 101)
>>> dist = f(v, 273.15+20)
>>> dv = v[1]-v[0]
>>> print sum(dist)*dv
0.999704711134
### Response:
def fast_maxwell_boltzmann(mass, file_name=None,
return_code=False):
r"""Return a function that returns values of a Maxwell-Boltzmann
distribution.
>>> from fast import Atom
>>> mass = Atom("Rb", 87).mass
>>> f = fast_maxwell_boltzmann(mass)
>>> print f(0, 273.15+20)
0.00238221482739
>>> import numpy as np
>>> v = np.linspace(-600, 600, 101)
>>> dist = f(v, 273.15+20)
>>> dv = v[1]-v[0]
>>> print sum(dist)*dv
0.999704711134
"""
# We get the mass of the atom.
code = ""
code = "def maxwell_boltzmann(v, T):\n"
code += ' r"""A fast calculation of the'
code += ' Maxwell-Boltzmann distribution."""\n'
code += " if hasattr(v, 'shape'):\n"
code += " d = 1\n"
code += " m = %s\n" % mass
code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n"
code += " f = f * np.exp(-m*v**2/2/k_B_num/T)\n"
code += " return f\n"
code += " elif hasattr(v, '__len__'):\n"
code += " d = len(v)\n"
code += " m = %s\n" % mass
code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n"
code += " vsquare = sum([v[i]**2 for i in range(d)])\n"
code += " f = f * np.exp(-m*vsquare/2/k_B_num/T)\n"
code += " return f\n"
code += " else:\n"
code += " d = 1\n"
code += " m = %s\n" % mass
code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n"
code += " f = f * np.exp(-m*v**2/2/k_B_num/T)\n"
code += " return f\n"
# We write the code to file if provided, and execute it.
if file_name is not None:
f = file(file_name+".py", "w")
f.write(code)
f.close()
maxwell_boltzmann = code
if not return_code:
exec maxwell_boltzmann
return maxwell_boltzmann |
def deserialize(self, xml_input, *args, **kwargs):
"""
Convert XML to dict object
"""
return xmltodict.parse(xml_input, *args, **kwargs) | Convert XML to dict object | Below is the the instruction that describes the task:
### Input:
Convert XML to dict object
### Response:
def deserialize(self, xml_input, *args, **kwargs):
"""
Convert XML to dict object
"""
return xmltodict.parse(xml_input, *args, **kwargs) |
def cmd_join(self, connection, sender, target, payload):
"""
Asks the bot to join a channel
"""
if payload:
connection.join(payload)
else:
raise ValueError("No channel given") | Asks the bot to join a channel | Below is the the instruction that describes the task:
### Input:
Asks the bot to join a channel
### Response:
def cmd_join(self, connection, sender, target, payload):
"""
Asks the bot to join a channel
"""
if payload:
connection.join(payload)
else:
raise ValueError("No channel given") |
def sqliteRowsToDicts(sqliteRows):
"""
Unpacks sqlite rows as returned by fetchall
into an array of simple dicts.
:param sqliteRows: array of rows returned from fetchall DB call
:return: array of dicts, keyed by the column names.
"""
return map(lambda r: dict(zip(r.keys(), r)), sqliteRows) | Unpacks sqlite rows as returned by fetchall
into an array of simple dicts.
:param sqliteRows: array of rows returned from fetchall DB call
:return: array of dicts, keyed by the column names. | Below is the the instruction that describes the task:
### Input:
Unpacks sqlite rows as returned by fetchall
into an array of simple dicts.
:param sqliteRows: array of rows returned from fetchall DB call
:return: array of dicts, keyed by the column names.
### Response:
def sqliteRowsToDicts(sqliteRows):
"""
Unpacks sqlite rows as returned by fetchall
into an array of simple dicts.
:param sqliteRows: array of rows returned from fetchall DB call
:return: array of dicts, keyed by the column names.
"""
return map(lambda r: dict(zip(r.keys(), r)), sqliteRows) |
def make_strain_from_inj_object(self, inj, delta_t, detector_name,
distance_scale=1):
"""Make a h(t) strain time-series from an injection object as read from
an hdf file.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
distance_scale: float, optional
Factor to scale the distance of an injection with. The default (=1)
is no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
"""
detector = Detector(detector_name)
# compute the waveform time series
hp, hc = ringdown_td_approximants[inj['approximant']](
inj, delta_t=delta_t, **self.extra_args)
hp._epoch += inj['tc']
hc._epoch += inj['tc']
if distance_scale != 1:
hp /= distance_scale
hc /= distance_scale
# compute the detector response and add it to the strain
signal = detector.project_wave(hp, hc,
inj['ra'], inj['dec'], inj['polarization'])
return signal | Make a h(t) strain time-series from an injection object as read from
an hdf file.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
distance_scale: float, optional
Factor to scale the distance of an injection with. The default (=1)
is no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection. | Below is the the instruction that describes the task:
### Input:
Make a h(t) strain time-series from an injection object as read from
an hdf file.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
distance_scale: float, optional
Factor to scale the distance of an injection with. The default (=1)
is no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
### Response:
def make_strain_from_inj_object(self, inj, delta_t, detector_name,
distance_scale=1):
"""Make a h(t) strain time-series from an injection object as read from
an hdf file.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
distance_scale: float, optional
Factor to scale the distance of an injection with. The default (=1)
is no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
"""
detector = Detector(detector_name)
# compute the waveform time series
hp, hc = ringdown_td_approximants[inj['approximant']](
inj, delta_t=delta_t, **self.extra_args)
hp._epoch += inj['tc']
hc._epoch += inj['tc']
if distance_scale != 1:
hp /= distance_scale
hc /= distance_scale
# compute the detector response and add it to the strain
signal = detector.project_wave(hp, hc,
inj['ra'], inj['dec'], inj['polarization'])
return signal |
def acoustic_similarity_directories(directories, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True):
"""
Analyze many directories.
Parameters
----------
directories : list of str
List of fully specified paths to the directories to be analyzed
"""
files = []
if call_back is not None:
call_back('Mapping directories...')
call_back(0, len(directories))
cur = 0
for d in directories:
if not os.path.isdir(d):
continue
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 3 == 0:
call_back(cur)
files += [os.path.join(d, x) for x in os.listdir(d) if x.lower().endswith('.wav')]
if len(files) == 0:
raise (ConchError("The directories specified do not contain any wav files"))
if call_back is not None:
call_back('Mapping directories...')
call_back(0, len(files) * len(files))
cur = 0
path_mapping = list()
for x in files:
for y in files:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 20 == 0:
call_back(cur)
if not x.lower().endswith('.wav'):
continue
if not y.lower().endswith('.wav'):
continue
if x == y:
continue
path_mapping.append((x, y))
result = acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check, call_back, multiprocessing)
return result | Analyze many directories.
Parameters
----------
directories : list of str
List of fully specified paths to the directories to be analyzed | Below is the the instruction that describes the task:
### Input:
Analyze many directories.
Parameters
----------
directories : list of str
List of fully specified paths to the directories to be analyzed
### Response:
def acoustic_similarity_directories(directories, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True):
"""
Analyze many directories.
Parameters
----------
directories : list of str
List of fully specified paths to the directories to be analyzed
"""
files = []
if call_back is not None:
call_back('Mapping directories...')
call_back(0, len(directories))
cur = 0
for d in directories:
if not os.path.isdir(d):
continue
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 3 == 0:
call_back(cur)
files += [os.path.join(d, x) for x in os.listdir(d) if x.lower().endswith('.wav')]
if len(files) == 0:
raise (ConchError("The directories specified do not contain any wav files"))
if call_back is not None:
call_back('Mapping directories...')
call_back(0, len(files) * len(files))
cur = 0
path_mapping = list()
for x in files:
for y in files:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 20 == 0:
call_back(cur)
if not x.lower().endswith('.wav'):
continue
if not y.lower().endswith('.wav'):
continue
if x == y:
continue
path_mapping.append((x, y))
result = acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check, call_back, multiprocessing)
return result |
def clear(self):
"""
convinience function to empty this fastrun container
"""
self.prop_dt_map = dict()
self.prop_data = dict()
self.rev_lookup = defaultdict(set) | convinience function to empty this fastrun container | Below is the the instruction that describes the task:
### Input:
convinience function to empty this fastrun container
### Response:
def clear(self):
"""
convinience function to empty this fastrun container
"""
self.prop_dt_map = dict()
self.prop_data = dict()
self.rev_lookup = defaultdict(set) |
def flush_all(self, conn):
"""Its effect is to invalidate all existing items immediately"""
command = b'flush_all\r\n'
response = yield from self._execute_simple_command(
conn, command)
if const.OK != response:
raise ClientException('Memcached flush_all failed', response) | Its effect is to invalidate all existing items immediately | Below is the the instruction that describes the task:
### Input:
Its effect is to invalidate all existing items immediately
### Response:
def flush_all(self, conn):
"""Its effect is to invalidate all existing items immediately"""
command = b'flush_all\r\n'
response = yield from self._execute_simple_command(
conn, command)
if const.OK != response:
raise ClientException('Memcached flush_all failed', response) |
def spearmanr(x, y):
"""
Michiel de Hoon's library (available in BioPython or standalone as
PyCluster) returns Spearman rsb which does include a tie correction.
>>> x = [5.05, 6.75, 3.21, 2.66]
>>> y = [1.65, 26.5, -5.93, 7.96]
>>> z = [1.65, 2.64, 2.64, 6.95]
>>> round(spearmanr(x, y), 4)
0.4
>>> round(spearmanr(x, z), 4)
-0.6325
"""
from scipy import stats
if not x or not y:
return 0
corr, pvalue = stats.spearmanr(x, y)
return corr | Michiel de Hoon's library (available in BioPython or standalone as
PyCluster) returns Spearman rsb which does include a tie correction.
>>> x = [5.05, 6.75, 3.21, 2.66]
>>> y = [1.65, 26.5, -5.93, 7.96]
>>> z = [1.65, 2.64, 2.64, 6.95]
>>> round(spearmanr(x, y), 4)
0.4
>>> round(spearmanr(x, z), 4)
-0.6325 | Below is the the instruction that describes the task:
### Input:
Michiel de Hoon's library (available in BioPython or standalone as
PyCluster) returns Spearman rsb which does include a tie correction.
>>> x = [5.05, 6.75, 3.21, 2.66]
>>> y = [1.65, 26.5, -5.93, 7.96]
>>> z = [1.65, 2.64, 2.64, 6.95]
>>> round(spearmanr(x, y), 4)
0.4
>>> round(spearmanr(x, z), 4)
-0.6325
### Response:
def spearmanr(x, y):
"""
Michiel de Hoon's library (available in BioPython or standalone as
PyCluster) returns Spearman rsb which does include a tie correction.
>>> x = [5.05, 6.75, 3.21, 2.66]
>>> y = [1.65, 26.5, -5.93, 7.96]
>>> z = [1.65, 2.64, 2.64, 6.95]
>>> round(spearmanr(x, y), 4)
0.4
>>> round(spearmanr(x, z), 4)
-0.6325
"""
from scipy import stats
if not x or not y:
return 0
corr, pvalue = stats.spearmanr(x, y)
return corr |
def get_profiles(self):
"""Returns set of profile names referenced in this Feature
:returns: set of profile names
"""
out = set(x.profile for x in self.requires if x.profile)
out.update(x.profile for x in self.removes if x.profile)
return out | Returns set of profile names referenced in this Feature
:returns: set of profile names | Below is the the instruction that describes the task:
### Input:
Returns set of profile names referenced in this Feature
:returns: set of profile names
### Response:
def get_profiles(self):
"""Returns set of profile names referenced in this Feature
:returns: set of profile names
"""
out = set(x.profile for x in self.requires if x.profile)
out.update(x.profile for x in self.removes if x.profile)
return out |
def group_dashboard(request, group_slug):
"""Dashboard for managing a TenantGroup."""
groups = get_user_groups(request.user)
group = get_object_or_404(groups, slug=group_slug)
tenants = get_user_tenants(request.user, group)
can_edit_group = request.user.has_perm('multitenancy.change_tenantgroup', group)
count = len(tenants)
if count == 1:
# Redirect to the detail page for this tenant
return redirect(tenants[0])
context = {
'group': group,
'tenants': tenants,
'count': count,
'can_edit_group': can_edit_group,
}
return render(request, 'multitenancy/group-detail.html', context) | Dashboard for managing a TenantGroup. | Below is the the instruction that describes the task:
### Input:
Dashboard for managing a TenantGroup.
### Response:
def group_dashboard(request, group_slug):
"""Dashboard for managing a TenantGroup."""
groups = get_user_groups(request.user)
group = get_object_or_404(groups, slug=group_slug)
tenants = get_user_tenants(request.user, group)
can_edit_group = request.user.has_perm('multitenancy.change_tenantgroup', group)
count = len(tenants)
if count == 1:
# Redirect to the detail page for this tenant
return redirect(tenants[0])
context = {
'group': group,
'tenants': tenants,
'count': count,
'can_edit_group': can_edit_group,
}
return render(request, 'multitenancy/group-detail.html', context) |
def broadcast(self, event):
"""Broadcasts an event either to all users or clients, depending on
event flag"""
try:
if event.broadcasttype == "users":
if len(self._users) > 0:
self.log("Broadcasting to all users:",
event.content, lvl=network)
for useruuid in self._users.keys():
self.fireEvent(
send(useruuid, event.content, sendtype="user"))
# else:
# self.log("Not broadcasting, no users connected.",
# lvl=debug)
elif event.broadcasttype == "clients":
if len(self._clients) > 0:
self.log("Broadcasting to all clients: ",
event.content, lvl=network)
for client in self._clients.values():
self.fireEvent(write(client.sock, event.content),
"wsserver")
# else:
# self.log("Not broadcasting, no clients
# connected.",
# lvl=debug)
elif event.broadcasttype == "socks":
if len(self._sockets) > 0:
self.log("Emergency?! Broadcasting to all sockets: ",
event.content)
for sock in self._sockets:
self.fireEvent(write(sock, event.content), "wsserver")
# else:
# self.log("Not broadcasting, no sockets
# connected.",
# lvl=debug)
except Exception as e:
self.log("Error during broadcast: ", e, type(e), lvl=critical) | Broadcasts an event either to all users or clients, depending on
event flag | Below is the the instruction that describes the task:
### Input:
Broadcasts an event either to all users or clients, depending on
event flag
### Response:
def broadcast(self, event):
"""Broadcasts an event either to all users or clients, depending on
event flag"""
try:
if event.broadcasttype == "users":
if len(self._users) > 0:
self.log("Broadcasting to all users:",
event.content, lvl=network)
for useruuid in self._users.keys():
self.fireEvent(
send(useruuid, event.content, sendtype="user"))
# else:
# self.log("Not broadcasting, no users connected.",
# lvl=debug)
elif event.broadcasttype == "clients":
if len(self._clients) > 0:
self.log("Broadcasting to all clients: ",
event.content, lvl=network)
for client in self._clients.values():
self.fireEvent(write(client.sock, event.content),
"wsserver")
# else:
# self.log("Not broadcasting, no clients
# connected.",
# lvl=debug)
elif event.broadcasttype == "socks":
if len(self._sockets) > 0:
self.log("Emergency?! Broadcasting to all sockets: ",
event.content)
for sock in self._sockets:
self.fireEvent(write(sock, event.content), "wsserver")
# else:
# self.log("Not broadcasting, no sockets
# connected.",
# lvl=debug)
except Exception as e:
self.log("Error during broadcast: ", e, type(e), lvl=critical) |
def BTC(cpu, dest, src):
"""
Bit test and complement.
Selects the bit in a bit string (specified with the first operand, called
the bit base) at the bit-position designated by the bit offset operand
(second operand), stores the value of the bit in the CF flag, and complements
the selected bit in the bit string.
:param cpu: current CPU.
:param dest: bit base operand.
:param src: bit offset operand.
"""
if dest.type == 'register':
value = dest.read()
pos = src.read() % dest.size
cpu.CF = value & (1 << pos) == 1 << pos
dest.write(value ^ (1 << pos))
elif dest.type == 'memory':
addr, pos = cpu._getMemoryBit(dest, src)
base, size, ty = cpu.get_descriptor(cpu.DS)
addr += base
value = cpu.read_int(addr, 8)
cpu.CF = value & (1 << pos) == 1 << pos
value = value ^ (1 << pos)
cpu.write_int(addr, value, 8)
else:
raise NotImplementedError(f"Unknown operand for BTC: {dest.type}") | Bit test and complement.
Selects the bit in a bit string (specified with the first operand, called
the bit base) at the bit-position designated by the bit offset operand
(second operand), stores the value of the bit in the CF flag, and complements
the selected bit in the bit string.
:param cpu: current CPU.
:param dest: bit base operand.
:param src: bit offset operand. | Below is the the instruction that describes the task:
### Input:
Bit test and complement.
Selects the bit in a bit string (specified with the first operand, called
the bit base) at the bit-position designated by the bit offset operand
(second operand), stores the value of the bit in the CF flag, and complements
the selected bit in the bit string.
:param cpu: current CPU.
:param dest: bit base operand.
:param src: bit offset operand.
### Response:
def BTC(cpu, dest, src):
"""
Bit test and complement.
Selects the bit in a bit string (specified with the first operand, called
the bit base) at the bit-position designated by the bit offset operand
(second operand), stores the value of the bit in the CF flag, and complements
the selected bit in the bit string.
:param cpu: current CPU.
:param dest: bit base operand.
:param src: bit offset operand.
"""
if dest.type == 'register':
value = dest.read()
pos = src.read() % dest.size
cpu.CF = value & (1 << pos) == 1 << pos
dest.write(value ^ (1 << pos))
elif dest.type == 'memory':
addr, pos = cpu._getMemoryBit(dest, src)
base, size, ty = cpu.get_descriptor(cpu.DS)
addr += base
value = cpu.read_int(addr, 8)
cpu.CF = value & (1 << pos) == 1 << pos
value = value ^ (1 << pos)
cpu.write_int(addr, value, 8)
else:
raise NotImplementedError(f"Unknown operand for BTC: {dest.type}") |
def get(self, date=datetime.date.today(), country=None):
"""
Get the CPI value for a specific time. Defaults to today. This uses
the closest method internally but sets limit to one day.
"""
if not country:
country = self.country
if country == "all":
raise ValueError("You need to specify a country")
if not isinstance(date, str) and not isinstance(date, int):
date = date.year
cpi = self.data.get(country.upper(), {}).get(str(date))
if not cpi:
raise ValueError("Missing CPI data for {} for {}".format(
country, date))
return CPIResult(date=date, value=cpi) | Get the CPI value for a specific time. Defaults to today. This uses
the closest method internally but sets limit to one day. | Below is the the instruction that describes the task:
### Input:
Get the CPI value for a specific time. Defaults to today. This uses
the closest method internally but sets limit to one day.
### Response:
def get(self, date=datetime.date.today(), country=None):
"""
Get the CPI value for a specific time. Defaults to today. This uses
the closest method internally but sets limit to one day.
"""
if not country:
country = self.country
if country == "all":
raise ValueError("You need to specify a country")
if not isinstance(date, str) and not isinstance(date, int):
date = date.year
cpi = self.data.get(country.upper(), {}).get(str(date))
if not cpi:
raise ValueError("Missing CPI data for {} for {}".format(
country, date))
return CPIResult(date=date, value=cpi) |
def check(f):
"""
Wraps the function with a decorator that runs all of the
pre/post conditions.
"""
if hasattr(f, 'wrapped_fn'):
return f
else:
@wraps(f)
def decorated(*args, **kwargs):
return check_conditions(f, args, kwargs)
decorated.wrapped_fn = f
return decorated | Wraps the function with a decorator that runs all of the
pre/post conditions. | Below is the the instruction that describes the task:
### Input:
Wraps the function with a decorator that runs all of the
pre/post conditions.
### Response:
def check(f):
"""
Wraps the function with a decorator that runs all of the
pre/post conditions.
"""
if hasattr(f, 'wrapped_fn'):
return f
else:
@wraps(f)
def decorated(*args, **kwargs):
return check_conditions(f, args, kwargs)
decorated.wrapped_fn = f
return decorated |
def upload(self, file_path, timeout=-1):
"""
Upload an SPP ISO image file or a hotfix file to the appliance.
The API supports upload of one hotfix at a time into the system.
For the successful upload of a hotfix, ensure its original name and extension are not altered.
Args:
file_path: Full path to firmware.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Information about the updated firmware bundle.
"""
return self._client.upload(file_path, timeout=timeout) | Upload an SPP ISO image file or a hotfix file to the appliance.
The API supports upload of one hotfix at a time into the system.
For the successful upload of a hotfix, ensure its original name and extension are not altered.
Args:
file_path: Full path to firmware.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Information about the updated firmware bundle. | Below is the the instruction that describes the task:
### Input:
Upload an SPP ISO image file or a hotfix file to the appliance.
The API supports upload of one hotfix at a time into the system.
For the successful upload of a hotfix, ensure its original name and extension are not altered.
Args:
file_path: Full path to firmware.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Information about the updated firmware bundle.
### Response:
def upload(self, file_path, timeout=-1):
"""
Upload an SPP ISO image file or a hotfix file to the appliance.
The API supports upload of one hotfix at a time into the system.
For the successful upload of a hotfix, ensure its original name and extension are not altered.
Args:
file_path: Full path to firmware.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Information about the updated firmware bundle.
"""
return self._client.upload(file_path, timeout=timeout) |
def _load_wm_map(exclude_auto=None):
"""Load an ontology map for world models.
exclude_auto : None or list[tuple]
A list of ontology mappings for which automated mappings should be
excluded, e.g. [(HUME, UN)] would result in not using mappings
from HUME to UN.
"""
exclude_auto = [] if not exclude_auto else exclude_auto
path_here = os.path.dirname(os.path.abspath(__file__))
ontomap_file = os.path.join(path_here, '../resources/wm_ontomap.tsv')
mappings = {}
def make_hume_prefix_map():
hume_ont = os.path.join(path_here, '../sources/hume/hume_ontology.rdf')
graph = rdflib.Graph()
graph.parse(os.path.abspath(hume_ont), format='nt')
entry_map = {}
for node in graph.all_nodes():
entry = node.split('#')[1]
# Handle "event" and other top-level entries
if '/' not in entry:
entry_map[entry] = None
continue
parts = entry.split('/')
prefix, real_entry = parts[0], '/'.join(parts[1:])
entry_map[real_entry] = prefix
return entry_map
hume_prefix_map = make_hume_prefix_map()
def add_hume_prefix(hume_entry):
"""We need to do this because the HUME prefixes are missing"""
prefix = hume_prefix_map[hume_entry]
return '%s/%s' % (prefix, hume_entry)
def map_entry(reader, entry):
"""Remap the readers and entries to match our internal standards."""
if reader == 'eidos':
namespace = 'UN'
entry = entry.replace(' ', '_')
entry_id = entry
elif reader == 'BBN':
namespace = 'HUME'
entry = entry.replace(' ', '_')
entry_id = add_hume_prefix(entry)
elif reader == 'sofia':
namespace = 'SOFIA'
# First chop off the Event/Entity prefix
parts = entry.split('/')[1:]
# Now we split each part by underscore and capitalize
# each piece of each part
parts = ['_'.join([p.capitalize() for p in part.split('_')])
for part in parts]
# Finally we stick the entry back together separated by slashes
entry_id = '/'.join(parts)
else:
return reader, entry
return namespace, entry_id
with open(ontomap_file, 'r') as fh:
for line in fh.readlines():
# Get each entry from the line
s, se, t, te, score = line.strip().split('\t')
score = float(score)
# Map the entries to our internal naming standards
s, se = map_entry(s, se)
t, te = map_entry(t, te)
# Skip automated mappings when they should be excluded
if (s, t) not in exclude_auto:
# We first do the forward mapping
if (s, se, t) in mappings:
if mappings[(s, se, t)][1] < score:
mappings[(s, se, t)] = ((t, te), score)
else:
mappings[(s, se, t)] = ((t, te), score)
# Then we add the reverse mapping
if (t, s) not in exclude_auto:
if (t, te, s) in mappings:
if mappings[(t, te, s)][1] < score:
mappings[(t, te, s)] = ((s, se), score)
else:
mappings[(t, te, s)] = ((s, se), score)
ontomap = []
for s, ts in mappings.items():
ontomap.append(((s[0], s[1]), ts[0], ts[1]))
# Now apply the Hume -> Eidos override
override_file = os.path.join(path_here, '../resources/wm_ontomap.bbn.tsv')
override_mappings = []
with open(override_file, 'r') as fh:
for row in fh.readlines():
if 'BBN' not in row:
continue
# Order is target first, source second
_, te, _, se = row.strip().split('\t')
# Map the entries to our internal naming standards
s = 'HUME'
t = 'UN'
se = se.replace(' ', '_')
te = te.replace(' ', '_')
if se.startswith('/'):
se = se[1:]
override_mappings.append((s, se, t, te))
for s, se, t, te in override_mappings:
found = False
for idx, ((so, seo), (eo, teo), score) in enumerate(ontomap):
if (s, se, t) == (so, seo, eo):
# Override when a match is found
ontomap[idx] = ((s, se), (t, te), 1.0)
found = True
if not found:
ontomap.append(((s, se), (t, te), 1.0))
return ontomap | Load an ontology map for world models.
exclude_auto : None or list[tuple]
A list of ontology mappings for which automated mappings should be
excluded, e.g. [(HUME, UN)] would result in not using mappings
from HUME to UN. | Below is the the instruction that describes the task:
### Input:
Load an ontology map for world models.
exclude_auto : None or list[tuple]
A list of ontology mappings for which automated mappings should be
excluded, e.g. [(HUME, UN)] would result in not using mappings
from HUME to UN.
### Response:
def _load_wm_map(exclude_auto=None):
"""Load an ontology map for world models.
exclude_auto : None or list[tuple]
A list of ontology mappings for which automated mappings should be
excluded, e.g. [(HUME, UN)] would result in not using mappings
from HUME to UN.
"""
exclude_auto = [] if not exclude_auto else exclude_auto
path_here = os.path.dirname(os.path.abspath(__file__))
ontomap_file = os.path.join(path_here, '../resources/wm_ontomap.tsv')
mappings = {}
def make_hume_prefix_map():
hume_ont = os.path.join(path_here, '../sources/hume/hume_ontology.rdf')
graph = rdflib.Graph()
graph.parse(os.path.abspath(hume_ont), format='nt')
entry_map = {}
for node in graph.all_nodes():
entry = node.split('#')[1]
# Handle "event" and other top-level entries
if '/' not in entry:
entry_map[entry] = None
continue
parts = entry.split('/')
prefix, real_entry = parts[0], '/'.join(parts[1:])
entry_map[real_entry] = prefix
return entry_map
hume_prefix_map = make_hume_prefix_map()
def add_hume_prefix(hume_entry):
"""We need to do this because the HUME prefixes are missing"""
prefix = hume_prefix_map[hume_entry]
return '%s/%s' % (prefix, hume_entry)
def map_entry(reader, entry):
"""Remap the readers and entries to match our internal standards."""
if reader == 'eidos':
namespace = 'UN'
entry = entry.replace(' ', '_')
entry_id = entry
elif reader == 'BBN':
namespace = 'HUME'
entry = entry.replace(' ', '_')
entry_id = add_hume_prefix(entry)
elif reader == 'sofia':
namespace = 'SOFIA'
# First chop off the Event/Entity prefix
parts = entry.split('/')[1:]
# Now we split each part by underscore and capitalize
# each piece of each part
parts = ['_'.join([p.capitalize() for p in part.split('_')])
for part in parts]
# Finally we stick the entry back together separated by slashes
entry_id = '/'.join(parts)
else:
return reader, entry
return namespace, entry_id
with open(ontomap_file, 'r') as fh:
for line in fh.readlines():
# Get each entry from the line
s, se, t, te, score = line.strip().split('\t')
score = float(score)
# Map the entries to our internal naming standards
s, se = map_entry(s, se)
t, te = map_entry(t, te)
# Skip automated mappings when they should be excluded
if (s, t) not in exclude_auto:
# We first do the forward mapping
if (s, se, t) in mappings:
if mappings[(s, se, t)][1] < score:
mappings[(s, se, t)] = ((t, te), score)
else:
mappings[(s, se, t)] = ((t, te), score)
# Then we add the reverse mapping
if (t, s) not in exclude_auto:
if (t, te, s) in mappings:
if mappings[(t, te, s)][1] < score:
mappings[(t, te, s)] = ((s, se), score)
else:
mappings[(t, te, s)] = ((s, se), score)
ontomap = []
for s, ts in mappings.items():
ontomap.append(((s[0], s[1]), ts[0], ts[1]))
# Now apply the Hume -> Eidos override
override_file = os.path.join(path_here, '../resources/wm_ontomap.bbn.tsv')
override_mappings = []
with open(override_file, 'r') as fh:
for row in fh.readlines():
if 'BBN' not in row:
continue
# Order is target first, source second
_, te, _, se = row.strip().split('\t')
# Map the entries to our internal naming standards
s = 'HUME'
t = 'UN'
se = se.replace(' ', '_')
te = te.replace(' ', '_')
if se.startswith('/'):
se = se[1:]
override_mappings.append((s, se, t, te))
for s, se, t, te in override_mappings:
found = False
for idx, ((so, seo), (eo, teo), score) in enumerate(ontomap):
if (s, se, t) == (so, seo, eo):
# Override when a match is found
ontomap[idx] = ((s, se), (t, te), 1.0)
found = True
if not found:
ontomap.append(((s, se), (t, te), 1.0))
return ontomap |
def unregister_transform(self, node_class, transform, predicate=None):
"""Unregister the given transform."""
self.transforms[node_class].remove((transform, predicate)) | Unregister the given transform. | Below is the the instruction that describes the task:
### Input:
Unregister the given transform.
### Response:
def unregister_transform(self, node_class, transform, predicate=None):
"""Unregister the given transform."""
self.transforms[node_class].remove((transform, predicate)) |
def cwd_filt2(depth):
"""Return the last depth elements of the current working directory.
$HOME is always replaced with '~'.
If depth==0, the full path is returned."""
full_cwd = os.getcwdu()
cwd = full_cwd.replace(HOME,"~").split(os.sep)
if '~' in cwd and len(cwd) == depth+1:
depth += 1
drivepart = ''
if sys.platform == 'win32' and len(cwd) > depth:
drivepart = os.path.splitdrive(full_cwd)[0]
out = drivepart + '/'.join(cwd[-depth:])
return out or os.sep | Return the last depth elements of the current working directory.
$HOME is always replaced with '~'.
If depth==0, the full path is returned. | Below is the the instruction that describes the task:
### Input:
Return the last depth elements of the current working directory.
$HOME is always replaced with '~'.
If depth==0, the full path is returned.
### Response:
def cwd_filt2(depth):
"""Return the last depth elements of the current working directory.
$HOME is always replaced with '~'.
If depth==0, the full path is returned."""
full_cwd = os.getcwdu()
cwd = full_cwd.replace(HOME,"~").split(os.sep)
if '~' in cwd and len(cwd) == depth+1:
depth += 1
drivepart = ''
if sys.platform == 'win32' and len(cwd) > depth:
drivepart = os.path.splitdrive(full_cwd)[0]
out = drivepart + '/'.join(cwd[-depth:])
return out or os.sep |
def cancel(self):
"""
Cancels an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.cancel()
This will send notifications to anyone who has not declined the meeting.
"""
if not self.id:
raise TypeError(u"You can't delete an event that hasn't been created yet.")
self.refresh_change_key()
self.service.send(soap_request.delete_event(self))
# TODO rsanders high - check return status to make sure it was actually sent
return None | Cancels an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.cancel()
This will send notifications to anyone who has not declined the meeting. | Below is the the instruction that describes the task:
### Input:
Cancels an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.cancel()
This will send notifications to anyone who has not declined the meeting.
### Response:
def cancel(self):
"""
Cancels an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.cancel()
This will send notifications to anyone who has not declined the meeting.
"""
if not self.id:
raise TypeError(u"You can't delete an event that hasn't been created yet.")
self.refresh_change_key()
self.service.send(soap_request.delete_event(self))
# TODO rsanders high - check return status to make sure it was actually sent
return None |
def recovery(self, using=None, **kwargs):
"""
The indices recovery API provides insight into on-going shard
recoveries for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.recovery`` unchanged.
"""
return self._get_connection(using).indices.recovery(index=self._name, **kwargs) | The indices recovery API provides insight into on-going shard
recoveries for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.recovery`` unchanged. | Below is the the instruction that describes the task:
### Input:
The indices recovery API provides insight into on-going shard
recoveries for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.recovery`` unchanged.
### Response:
def recovery(self, using=None, **kwargs):
"""
The indices recovery API provides insight into on-going shard
recoveries for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.recovery`` unchanged.
"""
return self._get_connection(using).indices.recovery(index=self._name, **kwargs) |
def getLabelByName(self, name):
"""Gets a label widget by it component name
:param name: name of the AbstractStimulusComponent which this label is named after
:type name: str
:returns: :class:`DragLabel<sparkle.gui.drag_label.DragLabel>`
"""
name = name.lower()
if name in self.stimLabels:
return self.stimLabels[name]
else:
return None | Gets a label widget by it component name
:param name: name of the AbstractStimulusComponent which this label is named after
:type name: str
:returns: :class:`DragLabel<sparkle.gui.drag_label.DragLabel>` | Below is the the instruction that describes the task:
### Input:
Gets a label widget by it component name
:param name: name of the AbstractStimulusComponent which this label is named after
:type name: str
:returns: :class:`DragLabel<sparkle.gui.drag_label.DragLabel>`
### Response:
def getLabelByName(self, name):
"""Gets a label widget by it component name
:param name: name of the AbstractStimulusComponent which this label is named after
:type name: str
:returns: :class:`DragLabel<sparkle.gui.drag_label.DragLabel>`
"""
name = name.lower()
if name in self.stimLabels:
return self.stimLabels[name]
else:
return None |
def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str,
show_examples):
"""Generate the documentation docstring for a PlotMethod"""
# leave out the first argument
example_call = ', '.join(map(str.strip, example_call.split(',')[1:]))
ret = docstrings.dedents("""
%s
This plotting method adds data arrays and plots them via
:class:`%s` plotters
To plot a variable in this dataset, type::
>>> ds.psy.plot.%s(%s)
%s""" % (summary, full_name, identifier, example_call, doc_str))
if show_examples:
ret += '\n\n' + cls._gen_examples(identifier)
return ret | Generate the documentation docstring for a PlotMethod | Below is the the instruction that describes the task:
### Input:
Generate the documentation docstring for a PlotMethod
### Response:
def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str,
show_examples):
"""Generate the documentation docstring for a PlotMethod"""
# leave out the first argument
example_call = ', '.join(map(str.strip, example_call.split(',')[1:]))
ret = docstrings.dedents("""
%s
This plotting method adds data arrays and plots them via
:class:`%s` plotters
To plot a variable in this dataset, type::
>>> ds.psy.plot.%s(%s)
%s""" % (summary, full_name, identifier, example_call, doc_str))
if show_examples:
ret += '\n\n' + cls._gen_examples(identifier)
return ret |
def parse_name_altree(record):
"""Parse NAME structure assuming ALTREE dialect.
In ALTREE dialect maiden name (if present) is saved as SURN sub-record
and is also appended to family name in parens. Given name is saved in
GIVN sub-record. Few examples:
No maiden name:
1 NAME John /Smith/
2 GIVN John
With maiden name:
1 NAME Jane /Smith (Ivanova)/
2 GIVN Jane
2 SURN Ivanova
No maiden name
1 NAME Mers /Daimler (-Benz)/
2 GIVN Mers
Because family name can also contain parens it's not enough to parse
family name and guess maiden name from it, we also have to check for
SURN record.
ALTREE also replaces empty names with question mark, we undo that too.
:param record: NAME record
:return: tuple with 3 or 4 elements, first three elements of tuple are
the same as returned from :py:meth:`split_name` method, fourth element
(if present) denotes maiden name.
"""
name_tuple = split_name(record.value)
if name_tuple[1] == '?':
name_tuple = (name_tuple[0], '', name_tuple[2])
maiden = record.sub_tag_value("SURN")
if maiden:
# strip "(maiden)" from family name
ending = '(' + maiden + ')'
surname = name_tuple[1]
if surname.endswith(ending):
surname = surname[:-len(ending)].rstrip()
if surname == '?':
surname = ''
name_tuple = (name_tuple[0], surname, name_tuple[2], maiden)
return name_tuple | Parse NAME structure assuming ALTREE dialect.
In ALTREE dialect maiden name (if present) is saved as SURN sub-record
and is also appended to family name in parens. Given name is saved in
GIVN sub-record. Few examples:
No maiden name:
1 NAME John /Smith/
2 GIVN John
With maiden name:
1 NAME Jane /Smith (Ivanova)/
2 GIVN Jane
2 SURN Ivanova
No maiden name
1 NAME Mers /Daimler (-Benz)/
2 GIVN Mers
Because family name can also contain parens it's not enough to parse
family name and guess maiden name from it, we also have to check for
SURN record.
ALTREE also replaces empty names with question mark, we undo that too.
:param record: NAME record
:return: tuple with 3 or 4 elements, first three elements of tuple are
the same as returned from :py:meth:`split_name` method, fourth element
(if present) denotes maiden name. | Below is the the instruction that describes the task:
### Input:
Parse NAME structure assuming ALTREE dialect.
In ALTREE dialect maiden name (if present) is saved as SURN sub-record
and is also appended to family name in parens. Given name is saved in
GIVN sub-record. Few examples:
No maiden name:
1 NAME John /Smith/
2 GIVN John
With maiden name:
1 NAME Jane /Smith (Ivanova)/
2 GIVN Jane
2 SURN Ivanova
No maiden name
1 NAME Mers /Daimler (-Benz)/
2 GIVN Mers
Because family name can also contain parens it's not enough to parse
family name and guess maiden name from it, we also have to check for
SURN record.
ALTREE also replaces empty names with question mark, we undo that too.
:param record: NAME record
:return: tuple with 3 or 4 elements, first three elements of tuple are
the same as returned from :py:meth:`split_name` method, fourth element
(if present) denotes maiden name.
### Response:
def parse_name_altree(record):
"""Parse NAME structure assuming ALTREE dialect.
In ALTREE dialect maiden name (if present) is saved as SURN sub-record
and is also appended to family name in parens. Given name is saved in
GIVN sub-record. Few examples:
No maiden name:
1 NAME John /Smith/
2 GIVN John
With maiden name:
1 NAME Jane /Smith (Ivanova)/
2 GIVN Jane
2 SURN Ivanova
No maiden name
1 NAME Mers /Daimler (-Benz)/
2 GIVN Mers
Because family name can also contain parens it's not enough to parse
family name and guess maiden name from it, we also have to check for
SURN record.
ALTREE also replaces empty names with question mark, we undo that too.
:param record: NAME record
:return: tuple with 3 or 4 elements, first three elements of tuple are
the same as returned from :py:meth:`split_name` method, fourth element
(if present) denotes maiden name.
"""
name_tuple = split_name(record.value)
if name_tuple[1] == '?':
name_tuple = (name_tuple[0], '', name_tuple[2])
maiden = record.sub_tag_value("SURN")
if maiden:
# strip "(maiden)" from family name
ending = '(' + maiden + ')'
surname = name_tuple[1]
if surname.endswith(ending):
surname = surname[:-len(ending)].rstrip()
if surname == '?':
surname = ''
name_tuple = (name_tuple[0], surname, name_tuple[2], maiden)
return name_tuple |
def _coerce_json_to_collection(self, json_repr):
"""Use to ensure that a JSON string (if found) is parsed to the equivalent dict in python.
If the incoming value is already parsed, do nothing. If a string fails to parse, return None."""
if isinstance(json_repr, dict):
collection = json_repr
else:
try:
collection = anyjson.loads(json_repr)
except:
_LOG.warn('> invalid JSON (failed anyjson parsing)')
return None
return collection | Use to ensure that a JSON string (if found) is parsed to the equivalent dict in python.
If the incoming value is already parsed, do nothing. If a string fails to parse, return None. | Below is the the instruction that describes the task:
### Input:
Use to ensure that a JSON string (if found) is parsed to the equivalent dict in python.
If the incoming value is already parsed, do nothing. If a string fails to parse, return None.
### Response:
def _coerce_json_to_collection(self, json_repr):
"""Use to ensure that a JSON string (if found) is parsed to the equivalent dict in python.
If the incoming value is already parsed, do nothing. If a string fails to parse, return None."""
if isinstance(json_repr, dict):
collection = json_repr
else:
try:
collection = anyjson.loads(json_repr)
except:
_LOG.warn('> invalid JSON (failed anyjson parsing)')
return None
return collection |
def getDirectory(*args):
"""
Normalizes the getDirectory method between the different Qt
wrappers.
:return (<str> filename, <bool> accepted)
"""
result = QtGui.QFileDialog.getDirectory(*args)
# PyQt4 returns just a string
if type(result) is not tuple:
return result, bool(result)
# PySide returns a tuple of str, bool
else:
return result | Normalizes the getDirectory method between the different Qt
wrappers.
:return (<str> filename, <bool> accepted) | Below is the the instruction that describes the task:
### Input:
Normalizes the getDirectory method between the different Qt
wrappers.
:return (<str> filename, <bool> accepted)
### Response:
def getDirectory(*args):
"""
Normalizes the getDirectory method between the different Qt
wrappers.
:return (<str> filename, <bool> accepted)
"""
result = QtGui.QFileDialog.getDirectory(*args)
# PyQt4 returns just a string
if type(result) is not tuple:
return result, bool(result)
# PySide returns a tuple of str, bool
else:
return result |
def get_configuration_set_by_id(self, id):
'''Finds a configuration set in the component by its ID.
@param id The ID of the configuration set to search for.
@return The ConfigurationSet object for the set, or None if it was not
found.
'''
for cs in self.configuration_sets:
if cs.id == id:
return cs
return None | Finds a configuration set in the component by its ID.
@param id The ID of the configuration set to search for.
@return The ConfigurationSet object for the set, or None if it was not
found. | Below is the the instruction that describes the task:
### Input:
Finds a configuration set in the component by its ID.
@param id The ID of the configuration set to search for.
@return The ConfigurationSet object for the set, or None if it was not
found.
### Response:
def get_configuration_set_by_id(self, id):
'''Finds a configuration set in the component by its ID.
@param id The ID of the configuration set to search for.
@return The ConfigurationSet object for the set, or None if it was not
found.
'''
for cs in self.configuration_sets:
if cs.id == id:
return cs
return None |
def onBatchRejected(self, ledger_id):
"""
A batch of requests has been rejected, if stateRoot is None, reject
the current batch.
:param ledger_id:
:param stateRoot: state root after the batch was created
:return:
"""
if ledger_id == POOL_LEDGER_ID:
if isinstance(self.poolManager, TxnPoolManager):
self.get_req_handler(POOL_LEDGER_ID).onBatchRejected()
elif self.get_req_handler(ledger_id):
self.get_req_handler(ledger_id).onBatchRejected()
else:
logger.debug('{} did not know how to handle for ledger {}'.format(self, ledger_id))
self.audit_handler.post_batch_rejected(ledger_id)
self.execute_hook(NodeHooks.POST_BATCH_REJECTED, ledger_id) | A batch of requests has been rejected, if stateRoot is None, reject
the current batch.
:param ledger_id:
:param stateRoot: state root after the batch was created
:return: | Below is the the instruction that describes the task:
### Input:
A batch of requests has been rejected, if stateRoot is None, reject
the current batch.
:param ledger_id:
:param stateRoot: state root after the batch was created
:return:
### Response:
def onBatchRejected(self, ledger_id):
"""
A batch of requests has been rejected, if stateRoot is None, reject
the current batch.
:param ledger_id:
:param stateRoot: state root after the batch was created
:return:
"""
if ledger_id == POOL_LEDGER_ID:
if isinstance(self.poolManager, TxnPoolManager):
self.get_req_handler(POOL_LEDGER_ID).onBatchRejected()
elif self.get_req_handler(ledger_id):
self.get_req_handler(ledger_id).onBatchRejected()
else:
logger.debug('{} did not know how to handle for ledger {}'.format(self, ledger_id))
self.audit_handler.post_batch_rejected(ledger_id)
self.execute_hook(NodeHooks.POST_BATCH_REJECTED, ledger_id) |
def getR(self, i=5, j=6):
""" return transport matrix element, indexed by i, j,
be default, return dispersion value, i.e. getR(5,6) in [m]
:param i: row index, with initial index of 1
:param j: col indx, with initial index of 1
:return: transport matrix element
"""
if self.refresh is True:
self.getMatrix()
return self.transM[i - 1, j - 1] | return transport matrix element, indexed by i, j,
be default, return dispersion value, i.e. getR(5,6) in [m]
:param i: row index, with initial index of 1
:param j: col indx, with initial index of 1
:return: transport matrix element | Below is the the instruction that describes the task:
### Input:
return transport matrix element, indexed by i, j,
be default, return dispersion value, i.e. getR(5,6) in [m]
:param i: row index, with initial index of 1
:param j: col indx, with initial index of 1
:return: transport matrix element
### Response:
def getR(self, i=5, j=6):
""" return transport matrix element, indexed by i, j,
be default, return dispersion value, i.e. getR(5,6) in [m]
:param i: row index, with initial index of 1
:param j: col indx, with initial index of 1
:return: transport matrix element
"""
if self.refresh is True:
self.getMatrix()
return self.transM[i - 1, j - 1] |
def dynamic_content_item_variant_delete(self, item_id, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/dynamic_content#delete-variant"
api_path = "/api/v2/dynamic_content/items/{item_id}/variants/{id}.json"
api_path = api_path.format(item_id=item_id, id=id)
return self.call(api_path, method="DELETE", **kwargs) | https://developer.zendesk.com/rest_api/docs/core/dynamic_content#delete-variant | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/dynamic_content#delete-variant
### Response:
def dynamic_content_item_variant_delete(self, item_id, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/dynamic_content#delete-variant"
api_path = "/api/v2/dynamic_content/items/{item_id}/variants/{id}.json"
api_path = api_path.format(item_id=item_id, id=id)
return self.call(api_path, method="DELETE", **kwargs) |
def _read_linguas_from_files(env, linguas_files=None):
""" Parse `LINGUAS` file and return list of extracted languages """
import SCons.Util
import SCons.Environment
global _re_comment
global _re_lang
if not SCons.Util.is_List(linguas_files) \
and not SCons.Util.is_String(linguas_files) \
and not isinstance(linguas_files, SCons.Node.FS.Base) \
and linguas_files:
# If, linguas_files==True or such, then read 'LINGUAS' file.
linguas_files = ['LINGUAS']
if linguas_files is None:
return []
fnodes = env.arg2nodes(linguas_files)
linguas = []
for fnode in fnodes:
contents = _re_comment.sub("", fnode.get_text_contents())
ls = [l for l in _re_lang.findall(contents) if l]
linguas.extend(ls)
return linguas | Parse `LINGUAS` file and return list of extracted languages | Below is the the instruction that describes the task:
### Input:
Parse `LINGUAS` file and return list of extracted languages
### Response:
def _read_linguas_from_files(env, linguas_files=None):
""" Parse `LINGUAS` file and return list of extracted languages """
import SCons.Util
import SCons.Environment
global _re_comment
global _re_lang
if not SCons.Util.is_List(linguas_files) \
and not SCons.Util.is_String(linguas_files) \
and not isinstance(linguas_files, SCons.Node.FS.Base) \
and linguas_files:
# If, linguas_files==True or such, then read 'LINGUAS' file.
linguas_files = ['LINGUAS']
if linguas_files is None:
return []
fnodes = env.arg2nodes(linguas_files)
linguas = []
for fnode in fnodes:
contents = _re_comment.sub("", fnode.get_text_contents())
ls = [l for l in _re_lang.findall(contents) if l]
linguas.extend(ls)
return linguas |
def winsorize(x, axis=0, limits=0.01):
"""
`Winsorize <https://en.wikipedia.org/wiki/Winsorizing>`_ values based on limits
"""
# operate on copy
x = x.copy()
if isinstance(x, pd.DataFrame):
return x.apply(_winsorize_wrapper, axis=axis, args=(limits, ))
else:
return pd.Series(_winsorize_wrapper(x, limits).values,
index=x.index) | `Winsorize <https://en.wikipedia.org/wiki/Winsorizing>`_ values based on limits | Below is the the instruction that describes the task:
### Input:
`Winsorize <https://en.wikipedia.org/wiki/Winsorizing>`_ values based on limits
### Response:
def winsorize(x, axis=0, limits=0.01):
"""
`Winsorize <https://en.wikipedia.org/wiki/Winsorizing>`_ values based on limits
"""
# operate on copy
x = x.copy()
if isinstance(x, pd.DataFrame):
return x.apply(_winsorize_wrapper, axis=axis, args=(limits, ))
else:
return pd.Series(_winsorize_wrapper(x, limits).values,
index=x.index) |
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
if not self.HasField("pathtype"):
raise ValueError("Can't determine AFF4 path without a valid pathtype.")
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":{}".format(first_component.offset // 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype == PathSpec.PathType.TSK):
result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":{}".format(p.offset // 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result)) | Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type. | Below is the the instruction that describes the task:
### Input:
Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
### Response:
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
if not self.HasField("pathtype"):
raise ValueError("Can't determine AFF4 path without a valid pathtype.")
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":{}".format(first_component.offset // 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype == PathSpec.PathType.TSK):
result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":{}".format(p.offset // 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result)) |
def absent(
name,
force=False,
region=None,
key=None,
keyid=None,
profile=None,
remove_lc=False):
'''
Ensure the named autoscale group is deleted.
name
Name of the autoscale group.
force
Force deletion of autoscale group.
remove_lc
Delete the launch config as well.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile)
if asg is None:
ret['result'] = False
ret['comment'] = 'Failed to check autoscale group existence.'
elif asg:
if __opts__['test']:
ret['comment'] = 'Autoscale group set to be deleted.'
ret['result'] = None
if remove_lc:
msg = 'Launch configuration {0} is set to be deleted.'.format(asg['launch_config_name'])
ret['comment'] = ' '.join([ret['comment'], msg])
return ret
deleted = __salt__['boto_asg.delete'](name, force, region, key, keyid,
profile)
if deleted:
if remove_lc:
lc_deleted = __salt__['boto_asg.delete_launch_configuration'](asg['launch_config_name'],
region,
key,
keyid,
profile)
if lc_deleted:
if 'launch_config' not in ret['changes']:
ret['changes']['launch_config'] = {}
ret['changes']['launch_config']['deleted'] = asg['launch_config_name']
else:
ret['result'] = False
ret['comment'] = ' '.join([ret['comment'], 'Failed to delete launch configuration.'])
ret['changes']['old'] = asg
ret['changes']['new'] = None
ret['comment'] = 'Deleted autoscale group.'
else:
ret['result'] = False
ret['comment'] = 'Failed to delete autoscale group.'
else:
ret['comment'] = 'Autoscale group does not exist.'
return ret | Ensure the named autoscale group is deleted.
name
Name of the autoscale group.
force
Force deletion of autoscale group.
remove_lc
Delete the launch config as well.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid. | Below is the the instruction that describes the task:
### Input:
Ensure the named autoscale group is deleted.
name
Name of the autoscale group.
force
Force deletion of autoscale group.
remove_lc
Delete the launch config as well.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
### Response:
def absent(
name,
force=False,
region=None,
key=None,
keyid=None,
profile=None,
remove_lc=False):
'''
Ensure the named autoscale group is deleted.
name
Name of the autoscale group.
force
Force deletion of autoscale group.
remove_lc
Delete the launch config as well.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile)
if asg is None:
ret['result'] = False
ret['comment'] = 'Failed to check autoscale group existence.'
elif asg:
if __opts__['test']:
ret['comment'] = 'Autoscale group set to be deleted.'
ret['result'] = None
if remove_lc:
msg = 'Launch configuration {0} is set to be deleted.'.format(asg['launch_config_name'])
ret['comment'] = ' '.join([ret['comment'], msg])
return ret
deleted = __salt__['boto_asg.delete'](name, force, region, key, keyid,
profile)
if deleted:
if remove_lc:
lc_deleted = __salt__['boto_asg.delete_launch_configuration'](asg['launch_config_name'],
region,
key,
keyid,
profile)
if lc_deleted:
if 'launch_config' not in ret['changes']:
ret['changes']['launch_config'] = {}
ret['changes']['launch_config']['deleted'] = asg['launch_config_name']
else:
ret['result'] = False
ret['comment'] = ' '.join([ret['comment'], 'Failed to delete launch configuration.'])
ret['changes']['old'] = asg
ret['changes']['new'] = None
ret['comment'] = 'Deleted autoscale group.'
else:
ret['result'] = False
ret['comment'] = 'Failed to delete autoscale group.'
else:
ret['comment'] = 'Autoscale group does not exist.'
return ret |
def getBody(self, url, method='GET', headers={}, data=None, socket=None):
"""Make an HTTP request and return the body
"""
if not 'User-Agent' in headers:
headers['User-Agent'] = ['Tensor HTTP checker']
return self.request(url, method, headers, data, socket) | Make an HTTP request and return the body | Below is the the instruction that describes the task:
### Input:
Make an HTTP request and return the body
### Response:
def getBody(self, url, method='GET', headers={}, data=None, socket=None):
"""Make an HTTP request and return the body
"""
if not 'User-Agent' in headers:
headers['User-Agent'] = ['Tensor HTTP checker']
return self.request(url, method, headers, data, socket) |
def requiv_contact_min(b, component, solve_for=None, **kwargs):
"""
Create a constraint to determine the critical (at L1) value of
requiv at which a constact will underflow. This will only be used
for contacts for requiv_min
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str component: the label of the star in which this
constraint should be built
:parameter str solve_for: if 'requiv_max' should not be the derived/constrained
parameter, provide which other parameter should be derived
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function)
"""
hier = b.get_hierarchy()
if not len(hier.get_value()):
# TODO: change to custom error type to catch in bundle.add_component
# TODO: check whether the problem is 0 hierarchies or more than 1
raise NotImplementedError("constraint for requiv_contact_min requires hierarchy")
component_ps = _get_system_ps(b, component)
parentorbit = hier.get_parent_of(component)
parentorbit_ps = _get_system_ps(b, parentorbit)
requiv_min = component_ps.get_parameter(qualifier='requiv_min')
q = parentorbit_ps.get_parameter(qualifier='q')
sma = parentorbit_ps.get_parameter(qualifier='sma')
if solve_for in [None, requiv_min]:
lhs = requiv_min
rhs = roche_requiv_contact_L1(q, sma, hier.get_primary_or_secondary(component, return_ind=True))
else:
raise NotImplementedError("requiv_contact_min can only be solved for requiv_min")
return lhs, rhs, {'component': component} | Create a constraint to determine the critical (at L1) value of
requiv at which a constact will underflow. This will only be used
for contacts for requiv_min
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str component: the label of the star in which this
constraint should be built
:parameter str solve_for: if 'requiv_max' should not be the derived/constrained
parameter, provide which other parameter should be derived
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function) | Below is the the instruction that describes the task:
### Input:
Create a constraint to determine the critical (at L1) value of
requiv at which a constact will underflow. This will only be used
for contacts for requiv_min
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str component: the label of the star in which this
constraint should be built
:parameter str solve_for: if 'requiv_max' should not be the derived/constrained
parameter, provide which other parameter should be derived
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function)
### Response:
def requiv_contact_min(b, component, solve_for=None, **kwargs):
"""
Create a constraint to determine the critical (at L1) value of
requiv at which a constact will underflow. This will only be used
for contacts for requiv_min
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str component: the label of the star in which this
constraint should be built
:parameter str solve_for: if 'requiv_max' should not be the derived/constrained
parameter, provide which other parameter should be derived
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function)
"""
hier = b.get_hierarchy()
if not len(hier.get_value()):
# TODO: change to custom error type to catch in bundle.add_component
# TODO: check whether the problem is 0 hierarchies or more than 1
raise NotImplementedError("constraint for requiv_contact_min requires hierarchy")
component_ps = _get_system_ps(b, component)
parentorbit = hier.get_parent_of(component)
parentorbit_ps = _get_system_ps(b, parentorbit)
requiv_min = component_ps.get_parameter(qualifier='requiv_min')
q = parentorbit_ps.get_parameter(qualifier='q')
sma = parentorbit_ps.get_parameter(qualifier='sma')
if solve_for in [None, requiv_min]:
lhs = requiv_min
rhs = roche_requiv_contact_L1(q, sma, hier.get_primary_or_secondary(component, return_ind=True))
else:
raise NotImplementedError("requiv_contact_min can only be solved for requiv_min")
return lhs, rhs, {'component': component} |
def authenticate(self):
"""
Authenticate against the HP Cloud Identity Service. This is the first
step in any hpcloud.com session, although this method is automatically
called when accessing higher-level methods/attributes.
**Examples of Credentials Configuration**
- Bare minimum for authentication using HP API keys:
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
access_key_id: MZOFIE9S83FOS248FIE3
secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo
- With multiple *compute* availability zones activated, the region must
also be specified (due to current limitations in the OpenStack client
libraries):
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
access_key_id: MZOFIE9S83FOS248FIE3
secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo
region_name: az-1.region-a.geo-1
- Using ``username`` and ``password`` is also allowed, but
discouraged:
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
username: farley.mowat
password: NeverCryW0lf
When both API keys and ``username+password`` are specified, the API
keys are used.
"""
log.info("Authenticating to HP Cloud...")
creds = self.creds
access_key_id = creds.get('access_key_id', '')
secret_access_key = creds.get('secret_access_key', '')
# prefer api key + secret key, but fallback to username + password
if access_key_id and secret_access_key:
self.nova_client.client.os_access_key_id = access_key_id
self.nova_client.client.os_secret_key = secret_access_key
self.nova_client.authenticate() | Authenticate against the HP Cloud Identity Service. This is the first
step in any hpcloud.com session, although this method is automatically
called when accessing higher-level methods/attributes.
**Examples of Credentials Configuration**
- Bare minimum for authentication using HP API keys:
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
access_key_id: MZOFIE9S83FOS248FIE3
secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo
- With multiple *compute* availability zones activated, the region must
also be specified (due to current limitations in the OpenStack client
libraries):
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
access_key_id: MZOFIE9S83FOS248FIE3
secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo
region_name: az-1.region-a.geo-1
- Using ``username`` and ``password`` is also allowed, but
discouraged:
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
username: farley.mowat
password: NeverCryW0lf
When both API keys and ``username+password`` are specified, the API
keys are used. | Below is the the instruction that describes the task:
### Input:
Authenticate against the HP Cloud Identity Service. This is the first
step in any hpcloud.com session, although this method is automatically
called when accessing higher-level methods/attributes.
**Examples of Credentials Configuration**
- Bare minimum for authentication using HP API keys:
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
access_key_id: MZOFIE9S83FOS248FIE3
secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo
- With multiple *compute* availability zones activated, the region must
also be specified (due to current limitations in the OpenStack client
libraries):
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
access_key_id: MZOFIE9S83FOS248FIE3
secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo
region_name: az-1.region-a.geo-1
- Using ``username`` and ``password`` is also allowed, but
discouraged:
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
username: farley.mowat
password: NeverCryW0lf
When both API keys and ``username+password`` are specified, the API
keys are used.
### Response:
def authenticate(self):
"""
Authenticate against the HP Cloud Identity Service. This is the first
step in any hpcloud.com session, although this method is automatically
called when accessing higher-level methods/attributes.
**Examples of Credentials Configuration**
- Bare minimum for authentication using HP API keys:
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
access_key_id: MZOFIE9S83FOS248FIE3
secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo
- With multiple *compute* availability zones activated, the region must
also be specified (due to current limitations in the OpenStack client
libraries):
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
access_key_id: MZOFIE9S83FOS248FIE3
secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo
region_name: az-1.region-a.geo-1
- Using ``username`` and ``password`` is also allowed, but
discouraged:
.. code-block:: yaml
deployer_credentials:
hpcloud:
auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
tenant_name: farley.mowat-tenant1
username: farley.mowat
password: NeverCryW0lf
When both API keys and ``username+password`` are specified, the API
keys are used.
"""
log.info("Authenticating to HP Cloud...")
creds = self.creds
access_key_id = creds.get('access_key_id', '')
secret_access_key = creds.get('secret_access_key', '')
# prefer api key + secret key, but fallback to username + password
if access_key_id and secret_access_key:
self.nova_client.client.os_access_key_id = access_key_id
self.nova_client.client.os_secret_key = secret_access_key
self.nova_client.authenticate() |
def frombed(args):
"""
%prog frombed bedfile contigfasta readfasta
Convert read placement to contig format. This is useful before running BAMBUS.
"""
from jcvi.formats.fasta import Fasta
from jcvi.formats.bed import Bed
from jcvi.utils.cbook import fill
p = OptionParser(frombed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
bedfile, contigfasta, readfasta = args
prefix = bedfile.rsplit(".", 1)[0]
contigfile = prefix + ".contig"
idsfile = prefix + ".ids"
contigfasta = Fasta(contigfasta)
readfasta = Fasta(readfasta)
bed = Bed(bedfile)
checksum = "00000000 checksum."
fw_ids = open(idsfile, "w")
fw = open(contigfile, "w")
for ctg, reads in bed.sub_beds():
ctgseq = contigfasta[ctg]
ctgline = "##{0} {1} {2} bases, {3}".format(\
ctg, len(reads), len(ctgseq), checksum)
print(ctg, file=fw_ids)
print(ctgline, file=fw)
print(fill(ctgseq.seq), file=fw)
for b in reads:
read = b.accn
strand = b.strand
readseq = readfasta[read]
rc = " [RC]" if strand == "-" else ""
readlen = len(readseq)
rstart, rend = 1, readlen
if strand == "-":
rstart, rend = rend, rstart
readrange = "{{{0} {1}}}".format(rstart, rend)
conrange = "<{0} {1}>".format(b.start, b.end)
readline = "#{0}(0){1} {2} bases, {3} {4} {5}".format(\
read, rc, readlen, checksum, readrange, conrange)
print(readline, file=fw)
print(fill(readseq.seq), file=fw)
logging.debug("Mapped contigs written to `{0}`.".format(contigfile))
logging.debug("Contig IDs written to `{0}`.".format(idsfile)) | %prog frombed bedfile contigfasta readfasta
Convert read placement to contig format. This is useful before running BAMBUS. | Below is the the instruction that describes the task:
### Input:
%prog frombed bedfile contigfasta readfasta
Convert read placement to contig format. This is useful before running BAMBUS.
### Response:
def frombed(args):
"""
%prog frombed bedfile contigfasta readfasta
Convert read placement to contig format. This is useful before running BAMBUS.
"""
from jcvi.formats.fasta import Fasta
from jcvi.formats.bed import Bed
from jcvi.utils.cbook import fill
p = OptionParser(frombed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
bedfile, contigfasta, readfasta = args
prefix = bedfile.rsplit(".", 1)[0]
contigfile = prefix + ".contig"
idsfile = prefix + ".ids"
contigfasta = Fasta(contigfasta)
readfasta = Fasta(readfasta)
bed = Bed(bedfile)
checksum = "00000000 checksum."
fw_ids = open(idsfile, "w")
fw = open(contigfile, "w")
for ctg, reads in bed.sub_beds():
ctgseq = contigfasta[ctg]
ctgline = "##{0} {1} {2} bases, {3}".format(\
ctg, len(reads), len(ctgseq), checksum)
print(ctg, file=fw_ids)
print(ctgline, file=fw)
print(fill(ctgseq.seq), file=fw)
for b in reads:
read = b.accn
strand = b.strand
readseq = readfasta[read]
rc = " [RC]" if strand == "-" else ""
readlen = len(readseq)
rstart, rend = 1, readlen
if strand == "-":
rstart, rend = rend, rstart
readrange = "{{{0} {1}}}".format(rstart, rend)
conrange = "<{0} {1}>".format(b.start, b.end)
readline = "#{0}(0){1} {2} bases, {3} {4} {5}".format(\
read, rc, readlen, checksum, readrange, conrange)
print(readline, file=fw)
print(fill(readseq.seq), file=fw)
logging.debug("Mapped contigs written to `{0}`.".format(contigfile))
logging.debug("Contig IDs written to `{0}`.".format(idsfile)) |
def is_instance_of(self, some_class):
"""Asserts that val is an instance of the given class."""
try:
if not isinstance(self.val, some_class):
if hasattr(self.val, '__name__'):
t = self.val.__name__
elif hasattr(self.val, '__class__'):
t = self.val.__class__.__name__
else:
t = 'unknown'
self._err('Expected <%s:%s> to be instance of class <%s>, but was not.' % (self.val, t, some_class.__name__))
except TypeError:
raise TypeError('given arg must be a class')
return self | Asserts that val is an instance of the given class. | Below is the the instruction that describes the task:
### Input:
Asserts that val is an instance of the given class.
### Response:
def is_instance_of(self, some_class):
"""Asserts that val is an instance of the given class."""
try:
if not isinstance(self.val, some_class):
if hasattr(self.val, '__name__'):
t = self.val.__name__
elif hasattr(self.val, '__class__'):
t = self.val.__class__.__name__
else:
t = 'unknown'
self._err('Expected <%s:%s> to be instance of class <%s>, but was not.' % (self.val, t, some_class.__name__))
except TypeError:
raise TypeError('given arg must be a class')
return self |
def _set_autobw_threshold_table_summary(self, v, load=False):
"""
Setter method for autobw_threshold_table_summary, mapped from YANG variable /mpls_state/autobw_threshold_table_summary (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_autobw_threshold_table_summary is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_autobw_threshold_table_summary() directly.
YANG Description: MPLS Auto Bandwidth Threshold TableSummary
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=autobw_threshold_table_summary.autobw_threshold_table_summary, is_container='container', presence=False, yang_name="autobw-threshold-table-summary", rest_name="autobw-threshold-table-summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-autobw-threshold-table-summary', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """autobw_threshold_table_summary must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=autobw_threshold_table_summary.autobw_threshold_table_summary, is_container='container', presence=False, yang_name="autobw-threshold-table-summary", rest_name="autobw-threshold-table-summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-autobw-threshold-table-summary', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__autobw_threshold_table_summary = t
if hasattr(self, '_set'):
self._set() | Setter method for autobw_threshold_table_summary, mapped from YANG variable /mpls_state/autobw_threshold_table_summary (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_autobw_threshold_table_summary is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_autobw_threshold_table_summary() directly.
YANG Description: MPLS Auto Bandwidth Threshold TableSummary | Below is the the instruction that describes the task:
### Input:
Setter method for autobw_threshold_table_summary, mapped from YANG variable /mpls_state/autobw_threshold_table_summary (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_autobw_threshold_table_summary is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_autobw_threshold_table_summary() directly.
YANG Description: MPLS Auto Bandwidth Threshold TableSummary
### Response:
def _set_autobw_threshold_table_summary(self, v, load=False):
"""
Setter method for autobw_threshold_table_summary, mapped from YANG variable /mpls_state/autobw_threshold_table_summary (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_autobw_threshold_table_summary is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_autobw_threshold_table_summary() directly.
YANG Description: MPLS Auto Bandwidth Threshold TableSummary
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=autobw_threshold_table_summary.autobw_threshold_table_summary, is_container='container', presence=False, yang_name="autobw-threshold-table-summary", rest_name="autobw-threshold-table-summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-autobw-threshold-table-summary', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """autobw_threshold_table_summary must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=autobw_threshold_table_summary.autobw_threshold_table_summary, is_container='container', presence=False, yang_name="autobw-threshold-table-summary", rest_name="autobw-threshold-table-summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-autobw-threshold-table-summary', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__autobw_threshold_table_summary = t
if hasattr(self, '_set'):
self._set() |
def get_token(self, hash):
"""
Looks up a token by hash
Args:
hash (UInt160): The token to look up
Returns:
SmartContractEvent: A smart contract event with a contract that is an NEP5 Token
"""
tokens_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_TOKEN).snapshot()
try:
val = tokens_snapshot.get(hash.ToBytes())
if val:
event = SmartContractEvent.FromByteArray(val)
return event
except Exception as e:
logger.error("Smart contract event with contract hash %s not found: %s " % (hash.ToString(), e))
return None | Looks up a token by hash
Args:
hash (UInt160): The token to look up
Returns:
SmartContractEvent: A smart contract event with a contract that is an NEP5 Token | Below is the the instruction that describes the task:
### Input:
Looks up a token by hash
Args:
hash (UInt160): The token to look up
Returns:
SmartContractEvent: A smart contract event with a contract that is an NEP5 Token
### Response:
def get_token(self, hash):
"""
Looks up a token by hash
Args:
hash (UInt160): The token to look up
Returns:
SmartContractEvent: A smart contract event with a contract that is an NEP5 Token
"""
tokens_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_TOKEN).snapshot()
try:
val = tokens_snapshot.get(hash.ToBytes())
if val:
event = SmartContractEvent.FromByteArray(val)
return event
except Exception as e:
logger.error("Smart contract event with contract hash %s not found: %s " % (hash.ToString(), e))
return None |
def _AddEvent(self, event):
"""Adds an event.
Args:
event (EventObject): event.
"""
if hasattr(event, 'event_data_row_identifier'):
event_data_identifier = identifiers.SQLTableIdentifier(
self._CONTAINER_TYPE_EVENT_DATA,
event.event_data_row_identifier)
lookup_key = event_data_identifier.CopyToString()
event_data_identifier = self._event_data_identifier_mappings[lookup_key]
event.SetEventDataIdentifier(event_data_identifier)
# TODO: add event identifier mappings for event tags.
self._storage_writer.AddEvent(event) | Adds an event.
Args:
event (EventObject): event. | Below is the the instruction that describes the task:
### Input:
Adds an event.
Args:
event (EventObject): event.
### Response:
def _AddEvent(self, event):
"""Adds an event.
Args:
event (EventObject): event.
"""
if hasattr(event, 'event_data_row_identifier'):
event_data_identifier = identifiers.SQLTableIdentifier(
self._CONTAINER_TYPE_EVENT_DATA,
event.event_data_row_identifier)
lookup_key = event_data_identifier.CopyToString()
event_data_identifier = self._event_data_identifier_mappings[lookup_key]
event.SetEventDataIdentifier(event_data_identifier)
# TODO: add event identifier mappings for event tags.
self._storage_writer.AddEvent(event) |
async def traverse(self, func):
"""
Traverses an async function or generator, yielding each result.
This function is private. The class should be used as an iterator instead of using this method.
"""
# this allows the reference to be stolen
async_executor = self
if inspect.isasyncgenfunction(func):
async for result in func(*async_executor.args):
yield result
else:
yield await func(*async_executor.args) | Traverses an async function or generator, yielding each result.
This function is private. The class should be used as an iterator instead of using this method. | Below is the the instruction that describes the task:
### Input:
Traverses an async function or generator, yielding each result.
This function is private. The class should be used as an iterator instead of using this method.
### Response:
async def traverse(self, func):
"""
Traverses an async function or generator, yielding each result.
This function is private. The class should be used as an iterator instead of using this method.
"""
# this allows the reference to be stolen
async_executor = self
if inspect.isasyncgenfunction(func):
async for result in func(*async_executor.args):
yield result
else:
yield await func(*async_executor.args) |
def load_texture(self, texture_version):
'''
Expect a texture version number as an integer, load the texture version from /is/ps/shared/data/body/template/texture_coordinates/.
Currently there are versions [0, 1, 2, 3] availiable.
'''
import numpy as np
lowres_tex_template = 's3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_low_v%d.obj' % texture_version
highres_tex_template = 's3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_high_v%d.obj' % texture_version
from lace.mesh import Mesh
from lace.cache import sc
mesh_with_texture = Mesh(filename=sc(lowres_tex_template))
if not np.all(mesh_with_texture.f.shape == self.f.shape):
mesh_with_texture = Mesh(filename=sc(highres_tex_template))
self.transfer_texture(mesh_with_texture) | Expect a texture version number as an integer, load the texture version from /is/ps/shared/data/body/template/texture_coordinates/.
Currently there are versions [0, 1, 2, 3] availiable. | Below is the the instruction that describes the task:
### Input:
Expect a texture version number as an integer, load the texture version from /is/ps/shared/data/body/template/texture_coordinates/.
Currently there are versions [0, 1, 2, 3] availiable.
### Response:
def load_texture(self, texture_version):
'''
Expect a texture version number as an integer, load the texture version from /is/ps/shared/data/body/template/texture_coordinates/.
Currently there are versions [0, 1, 2, 3] availiable.
'''
import numpy as np
lowres_tex_template = 's3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_low_v%d.obj' % texture_version
highres_tex_template = 's3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_high_v%d.obj' % texture_version
from lace.mesh import Mesh
from lace.cache import sc
mesh_with_texture = Mesh(filename=sc(lowres_tex_template))
if not np.all(mesh_with_texture.f.shape == self.f.shape):
mesh_with_texture = Mesh(filename=sc(highres_tex_template))
self.transfer_texture(mesh_with_texture) |
def list(args):
"""Lists the jobs in the given database."""
jm = setup(args)
jm.list(job_ids=get_ids(args.job_ids), print_array_jobs=args.print_array_jobs, print_dependencies=args.print_dependencies, status=args.status, long=args.long, print_times=args.print_times, ids_only=args.ids_only, names=args.names) | Lists the jobs in the given database. | Below is the the instruction that describes the task:
### Input:
Lists the jobs in the given database.
### Response:
def list(args):
"""Lists the jobs in the given database."""
jm = setup(args)
jm.list(job_ids=get_ids(args.job_ids), print_array_jobs=args.print_array_jobs, print_dependencies=args.print_dependencies, status=args.status, long=args.long, print_times=args.print_times, ids_only=args.ids_only, names=args.names) |
def do_lzop_get(creds, url, path, decrypt, do_retry):
"""
Get and decompress a URL
This streams the content directly to lzop; the compressed version
is never stored on disk.
"""
assert url.endswith('.lzo'), 'Expect an lzop-compressed file'
with files.DeleteOnError(path) as decomp_out:
key = _uri_to_key(creds, url)
with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl:
g = gevent.spawn(write_and_return_error, key, pl.stdin)
exc = g.get()
if exc is not None:
raise exc
logger.info(
msg='completed download and decompression',
detail='Downloaded and decompressed "{url}" to "{path}"'
.format(url=url, path=path))
return True | Get and decompress a URL
This streams the content directly to lzop; the compressed version
is never stored on disk. | Below is the the instruction that describes the task:
### Input:
Get and decompress a URL
This streams the content directly to lzop; the compressed version
is never stored on disk.
### Response:
def do_lzop_get(creds, url, path, decrypt, do_retry):
"""
Get and decompress a URL
This streams the content directly to lzop; the compressed version
is never stored on disk.
"""
assert url.endswith('.lzo'), 'Expect an lzop-compressed file'
with files.DeleteOnError(path) as decomp_out:
key = _uri_to_key(creds, url)
with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl:
g = gevent.spawn(write_and_return_error, key, pl.stdin)
exc = g.get()
if exc is not None:
raise exc
logger.info(
msg='completed download and decompression',
detail='Downloaded and decompressed "{url}" to "{path}"'
.format(url=url, path=path))
return True |
def split_by(self, layer, sep=' '):
"""Split the text into multiple instances defined by elements of given layer.
The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans`
method.
Parameters
----------
layer: str
String determining the layer that is used to define the start and end positions of resulting splits.
sep: str (default: ' ')
The separator to use to join texts of multilayer elements.
Returns
-------
list of Text
"""
if not self.is_tagged(layer):
self.tag(layer)
return self.split_given_spans(self.spans(layer), sep=sep) | Split the text into multiple instances defined by elements of given layer.
The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans`
method.
Parameters
----------
layer: str
String determining the layer that is used to define the start and end positions of resulting splits.
sep: str (default: ' ')
The separator to use to join texts of multilayer elements.
Returns
-------
list of Text | Below is the the instruction that describes the task:
### Input:
Split the text into multiple instances defined by elements of given layer.
The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans`
method.
Parameters
----------
layer: str
String determining the layer that is used to define the start and end positions of resulting splits.
sep: str (default: ' ')
The separator to use to join texts of multilayer elements.
Returns
-------
list of Text
### Response:
def split_by(self, layer, sep=' '):
"""Split the text into multiple instances defined by elements of given layer.
The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans`
method.
Parameters
----------
layer: str
String determining the layer that is used to define the start and end positions of resulting splits.
sep: str (default: ' ')
The separator to use to join texts of multilayer elements.
Returns
-------
list of Text
"""
if not self.is_tagged(layer):
self.tag(layer)
return self.split_given_spans(self.spans(layer), sep=sep) |
def locate_profile(profile='default'):
"""Find the path to the folder associated with a given profile.
I.e. find $IPYTHONDIR/profile_whatever.
"""
from IPython.core.profiledir import ProfileDir, ProfileDirError
try:
pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
except ProfileDirError:
# IOError makes more sense when people are expecting a path
raise IOError("Couldn't find profile %r" % profile)
return pd.location | Find the path to the folder associated with a given profile.
I.e. find $IPYTHONDIR/profile_whatever. | Below is the the instruction that describes the task:
### Input:
Find the path to the folder associated with a given profile.
I.e. find $IPYTHONDIR/profile_whatever.
### Response:
def locate_profile(profile='default'):
"""Find the path to the folder associated with a given profile.
I.e. find $IPYTHONDIR/profile_whatever.
"""
from IPython.core.profiledir import ProfileDir, ProfileDirError
try:
pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
except ProfileDirError:
# IOError makes more sense when people are expecting a path
raise IOError("Couldn't find profile %r" % profile)
return pd.location |
def zone_helper(zone):
"""
Zone finder by name. If zone doesn't exist, create it and
return the href
:param str zone: name of zone (if href, will be returned as is)
:return str href: href of zone
"""
if zone is None:
return None
elif isinstance(zone, Zone):
return zone.href
elif zone.startswith('http'):
return zone
return Zone.get_or_create(name=zone).href | Zone finder by name. If zone doesn't exist, create it and
return the href
:param str zone: name of zone (if href, will be returned as is)
:return str href: href of zone | Below is the the instruction that describes the task:
### Input:
Zone finder by name. If zone doesn't exist, create it and
return the href
:param str zone: name of zone (if href, will be returned as is)
:return str href: href of zone
### Response:
def zone_helper(zone):
"""
Zone finder by name. If zone doesn't exist, create it and
return the href
:param str zone: name of zone (if href, will be returned as is)
:return str href: href of zone
"""
if zone is None:
return None
elif isinstance(zone, Zone):
return zone.href
elif zone.startswith('http'):
return zone
return Zone.get_or_create(name=zone).href |
def stop_scan(self):
"""Stop to scan."""
try:
self.bable.stop_scan(sync=True)
except bable_interface.BaBLEException:
# If we errored our it is because we were not currently scanning
pass
self.scanning = False | Stop to scan. | Below is the the instruction that describes the task:
### Input:
Stop to scan.
### Response:
def stop_scan(self):
"""Stop to scan."""
try:
self.bable.stop_scan(sync=True)
except bable_interface.BaBLEException:
# If we errored our it is because we were not currently scanning
pass
self.scanning = False |
def build_requirements(docs_path, package_name="yacms"):
"""
Updates the requirements file with yacms's version number.
"""
mezz_string = "yacms=="
project_path = os.path.join(docs_path, "..")
requirements_file = os.path.join(project_path, package_name,
"project_template", "requirements.txt")
with open(requirements_file, "r") as f:
requirements = f.readlines()
with open(requirements_file, "w") as f:
f.write("yacms==%s\n" % __version__)
for requirement in requirements:
if requirement.strip() and not requirement.startswith(mezz_string):
f.write(requirement) | Updates the requirements file with yacms's version number. | Below is the the instruction that describes the task:
### Input:
Updates the requirements file with yacms's version number.
### Response:
def build_requirements(docs_path, package_name="yacms"):
"""
Updates the requirements file with yacms's version number.
"""
mezz_string = "yacms=="
project_path = os.path.join(docs_path, "..")
requirements_file = os.path.join(project_path, package_name,
"project_template", "requirements.txt")
with open(requirements_file, "r") as f:
requirements = f.readlines()
with open(requirements_file, "w") as f:
f.write("yacms==%s\n" % __version__)
for requirement in requirements:
if requirement.strip() and not requirement.startswith(mezz_string):
f.write(requirement) |
def forward(self, X):
"""Forward function.
:param X: The input (batch) of the model contains word sequences for lstm,
features and feature weights.
:type X: For word sequences: a list of torch.Tensor pair (word sequence
and word mask) of shape (batch_size, sequence_length).
For features: torch.Tensor of shape (batch_size, sparse_feature_size).
For feature weights: torch.Tensor of shape
(batch_size, sparse_feature_size).
:return: The output of LSTM layer.
:rtype: torch.Tensor of shape (batch_size, num_classes)
"""
s = X[:-2]
f = X[-2]
w = X[-1]
batch_size = len(f)
# Generate lstm weight indices
x_idx = self._cuda(
torch.as_tensor(np.arange(1, self.settings["lstm_dim"] + 1)).repeat(
batch_size, 1
)
)
outputs = self._cuda(torch.Tensor([]))
# Calculate textual features from LSTMs
for i in range(len(s)):
state_word = self.lstms[0].init_hidden(batch_size)
output = self.lstms[0].forward(s[i][0], s[i][1], state_word)
outputs = torch.cat((outputs, output), 1)
# Concatenate textual features with multi-modal features
feaures = torch.cat((x_idx, f), 1)
weights = torch.cat((outputs, w), 1)
return self.sparse_linear(feaures, weights) | Forward function.
:param X: The input (batch) of the model contains word sequences for lstm,
features and feature weights.
:type X: For word sequences: a list of torch.Tensor pair (word sequence
and word mask) of shape (batch_size, sequence_length).
For features: torch.Tensor of shape (batch_size, sparse_feature_size).
For feature weights: torch.Tensor of shape
(batch_size, sparse_feature_size).
:return: The output of LSTM layer.
:rtype: torch.Tensor of shape (batch_size, num_classes) | Below is the the instruction that describes the task:
### Input:
Forward function.
:param X: The input (batch) of the model contains word sequences for lstm,
features and feature weights.
:type X: For word sequences: a list of torch.Tensor pair (word sequence
and word mask) of shape (batch_size, sequence_length).
For features: torch.Tensor of shape (batch_size, sparse_feature_size).
For feature weights: torch.Tensor of shape
(batch_size, sparse_feature_size).
:return: The output of LSTM layer.
:rtype: torch.Tensor of shape (batch_size, num_classes)
### Response:
def forward(self, X):
"""Forward function.
:param X: The input (batch) of the model contains word sequences for lstm,
features and feature weights.
:type X: For word sequences: a list of torch.Tensor pair (word sequence
and word mask) of shape (batch_size, sequence_length).
For features: torch.Tensor of shape (batch_size, sparse_feature_size).
For feature weights: torch.Tensor of shape
(batch_size, sparse_feature_size).
:return: The output of LSTM layer.
:rtype: torch.Tensor of shape (batch_size, num_classes)
"""
s = X[:-2]
f = X[-2]
w = X[-1]
batch_size = len(f)
# Generate lstm weight indices
x_idx = self._cuda(
torch.as_tensor(np.arange(1, self.settings["lstm_dim"] + 1)).repeat(
batch_size, 1
)
)
outputs = self._cuda(torch.Tensor([]))
# Calculate textual features from LSTMs
for i in range(len(s)):
state_word = self.lstms[0].init_hidden(batch_size)
output = self.lstms[0].forward(s[i][0], s[i][1], state_word)
outputs = torch.cat((outputs, output), 1)
# Concatenate textual features with multi-modal features
feaures = torch.cat((x_idx, f), 1)
weights = torch.cat((outputs, w), 1)
return self.sparse_linear(feaures, weights) |
def info(self):
""" Print header information and other derived information. """
print("\n--- File Info ---")
for key, val in self.file_header.items():
if key == 'src_raj':
val = val.to_string(unit=u.hour, sep=':')
if key == 'src_dej':
val = val.to_string(unit=u.deg, sep=':')
print("%16s : %32s" % (key, val))
print("\n%16s : %32s" % ("Num ints in file", self.n_ints_in_file))
print("%16s : %32s" % ("File shape", self.file_shape))
print("--- Selection Info ---")
print("%16s : %32s" % ("Data selection shape", self.selection_shape))
print("%16s : %32s" % ("Minimum freq (MHz)", self.container.f_start))
print("%16s : %32s" % ("Maximum freq (MHz)", self.container.f_stop)) | Print header information and other derived information. | Below is the the instruction that describes the task:
### Input:
Print header information and other derived information.
### Response:
def info(self):
""" Print header information and other derived information. """
print("\n--- File Info ---")
for key, val in self.file_header.items():
if key == 'src_raj':
val = val.to_string(unit=u.hour, sep=':')
if key == 'src_dej':
val = val.to_string(unit=u.deg, sep=':')
print("%16s : %32s" % (key, val))
print("\n%16s : %32s" % ("Num ints in file", self.n_ints_in_file))
print("%16s : %32s" % ("File shape", self.file_shape))
print("--- Selection Info ---")
print("%16s : %32s" % ("Data selection shape", self.selection_shape))
print("%16s : %32s" % ("Minimum freq (MHz)", self.container.f_start))
print("%16s : %32s" % ("Maximum freq (MHz)", self.container.f_stop)) |
def get_page_of_iterator(iterator, page_size, page_number):
"""
Get a page from an interator, handling invalid input from the page number
by defaulting to the first page.
"""
try:
page_number = validate_page_number(page_number)
except (PageNotAnInteger, EmptyPage):
page_number = 1
start = (page_number - 1) * page_size
# End 1 more than we need, so that we can see if there's another page
end = (page_number * page_size) + 1
skipped_items = list(islice(iterator, start))
items = list(islice(iterator, end))
if len(items) == 0 and page_number != 1:
items = skipped_items
page_number = 1
has_next = len(items) > page_size
items = items[:page_size]
return NoCountPage(items, page_number, page_size, has_next) | Get a page from an interator, handling invalid input from the page number
by defaulting to the first page. | Below is the the instruction that describes the task:
### Input:
Get a page from an interator, handling invalid input from the page number
by defaulting to the first page.
### Response:
def get_page_of_iterator(iterator, page_size, page_number):
"""
Get a page from an interator, handling invalid input from the page number
by defaulting to the first page.
"""
try:
page_number = validate_page_number(page_number)
except (PageNotAnInteger, EmptyPage):
page_number = 1
start = (page_number - 1) * page_size
# End 1 more than we need, so that we can see if there's another page
end = (page_number * page_size) + 1
skipped_items = list(islice(iterator, start))
items = list(islice(iterator, end))
if len(items) == 0 and page_number != 1:
items = skipped_items
page_number = 1
has_next = len(items) > page_size
items = items[:page_size]
return NoCountPage(items, page_number, page_size, has_next) |
def mm_top1(
n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(n_items, data, initial_params, alpha, max_iter, tol, _mm_top1) | Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters. | Below is the the instruction that describes the task:
### Input:
Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
### Response:
def mm_top1(
n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(n_items, data, initial_params, alpha, max_iter, tol, _mm_top1) |
def update_issue_remote_link_by_id(self, issue_key, link_id, url, title, global_id=None, relationship=None):
"""
Update existing Remote Link on Issue
:param issue_key: str
:param link_id: str
:param url: str
:param title: str
:param global_id: str, OPTIONAL:
:param relationship: str, Optional. Default by built-in method: 'Web Link'
"""
data = {'object': {'url': url, 'title': title}}
if global_id:
data['globalId'] = global_id
if relationship:
data['relationship'] = relationship
url = 'rest/api/2/issue/{issue_key}/remotelink/{link_id}'.format(issue_key=issue_key, link_id=link_id)
return self.put(url, data=data) | Update existing Remote Link on Issue
:param issue_key: str
:param link_id: str
:param url: str
:param title: str
:param global_id: str, OPTIONAL:
:param relationship: str, Optional. Default by built-in method: 'Web Link' | Below is the the instruction that describes the task:
### Input:
Update existing Remote Link on Issue
:param issue_key: str
:param link_id: str
:param url: str
:param title: str
:param global_id: str, OPTIONAL:
:param relationship: str, Optional. Default by built-in method: 'Web Link'
### Response:
def update_issue_remote_link_by_id(self, issue_key, link_id, url, title, global_id=None, relationship=None):
"""
Update existing Remote Link on Issue
:param issue_key: str
:param link_id: str
:param url: str
:param title: str
:param global_id: str, OPTIONAL:
:param relationship: str, Optional. Default by built-in method: 'Web Link'
"""
data = {'object': {'url': url, 'title': title}}
if global_id:
data['globalId'] = global_id
if relationship:
data['relationship'] = relationship
url = 'rest/api/2/issue/{issue_key}/remotelink/{link_id}'.format(issue_key=issue_key, link_id=link_id)
return self.put(url, data=data) |
def update_reach_number_data(self):
"""
Update the reach number data for the namelist based on input files.
.. warning:: You need to make sure you set *rapid_connect_file*
and *riv_bas_id_file* before running this function.
Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
)
rapid_manager.update_reach_number_data()
Example with forcing data:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
Qfor_file='../rapid-io/input/qfor_file.csv',
for_tot_id_file='../rapid-io/input/for_tot_id_file.csv',
for_use_id_file='../rapid-io/input/for_use_id_file.csv',
ZS_dtF=3*60*60,
BS_opt_for=True
)
rapid_manager.update_reach_number_data()
"""
if not self.rapid_connect_file:
log("Missing rapid_connect_file. "
"Please set before running this function ...",
"ERROR")
if not self.riv_bas_id_file:
log("Missing riv_bas_id_file. "
"Please set before running this function ...",
"ERROR")
# get rapid connect info
rapid_connect_table = np.loadtxt(self.rapid_connect_file,
ndmin=2, delimiter=",", dtype=int)
self.IS_riv_tot = int(rapid_connect_table.shape[0])
self.IS_max_up = int(rapid_connect_table[:, 2].max())
# get riv_bas_id info
riv_bas_id_table = np.loadtxt(self.riv_bas_id_file,
ndmin=1, delimiter=",",
usecols=(0,), dtype=int)
self.IS_riv_bas = int(riv_bas_id_table.size)
# add the forcing files
if not self.for_tot_id_file:
self.IS_for_tot = 0
log("Missing for_tot_id_file. Skipping ...",
"WARNING")
else:
# get riv_bas_id info
for_tot_id_table = np.loadtxt(self.for_tot_id_file,
ndmin=1, delimiter=",",
usecols=(0,), dtype=int)
self.IS_for_tot = int(for_tot_id_table.size)
if not self.for_use_id_file:
self.IS_for_use = 0
log("Missing for_use_id_file. Skipping ...",
"WARNING")
else:
# get riv_bas_id info
for_use_id_table = np.loadtxt(self.for_use_id_file,
ndmin=1, delimiter=",",
usecols=(0,), dtype=int)
self.IS_for_use = int(for_use_id_table.size) | Update the reach number data for the namelist based on input files.
.. warning:: You need to make sure you set *rapid_connect_file*
and *riv_bas_id_file* before running this function.
Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
)
rapid_manager.update_reach_number_data()
Example with forcing data:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
Qfor_file='../rapid-io/input/qfor_file.csv',
for_tot_id_file='../rapid-io/input/for_tot_id_file.csv',
for_use_id_file='../rapid-io/input/for_use_id_file.csv',
ZS_dtF=3*60*60,
BS_opt_for=True
)
rapid_manager.update_reach_number_data() | Below is the the instruction that describes the task:
### Input:
Update the reach number data for the namelist based on input files.
.. warning:: You need to make sure you set *rapid_connect_file*
and *riv_bas_id_file* before running this function.
Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
)
rapid_manager.update_reach_number_data()
Example with forcing data:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
Qfor_file='../rapid-io/input/qfor_file.csv',
for_tot_id_file='../rapid-io/input/for_tot_id_file.csv',
for_use_id_file='../rapid-io/input/for_use_id_file.csv',
ZS_dtF=3*60*60,
BS_opt_for=True
)
rapid_manager.update_reach_number_data()
### Response:
def update_reach_number_data(self):
"""
Update the reach number data for the namelist based on input files.
.. warning:: You need to make sure you set *rapid_connect_file*
and *riv_bas_id_file* before running this function.
Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
)
rapid_manager.update_reach_number_data()
Example with forcing data:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
Qfor_file='../rapid-io/input/qfor_file.csv',
for_tot_id_file='../rapid-io/input/for_tot_id_file.csv',
for_use_id_file='../rapid-io/input/for_use_id_file.csv',
ZS_dtF=3*60*60,
BS_opt_for=True
)
rapid_manager.update_reach_number_data()
"""
if not self.rapid_connect_file:
log("Missing rapid_connect_file. "
"Please set before running this function ...",
"ERROR")
if not self.riv_bas_id_file:
log("Missing riv_bas_id_file. "
"Please set before running this function ...",
"ERROR")
# get rapid connect info
rapid_connect_table = np.loadtxt(self.rapid_connect_file,
ndmin=2, delimiter=",", dtype=int)
self.IS_riv_tot = int(rapid_connect_table.shape[0])
self.IS_max_up = int(rapid_connect_table[:, 2].max())
# get riv_bas_id info
riv_bas_id_table = np.loadtxt(self.riv_bas_id_file,
ndmin=1, delimiter=",",
usecols=(0,), dtype=int)
self.IS_riv_bas = int(riv_bas_id_table.size)
# add the forcing files
if not self.for_tot_id_file:
self.IS_for_tot = 0
log("Missing for_tot_id_file. Skipping ...",
"WARNING")
else:
# get riv_bas_id info
for_tot_id_table = np.loadtxt(self.for_tot_id_file,
ndmin=1, delimiter=",",
usecols=(0,), dtype=int)
self.IS_for_tot = int(for_tot_id_table.size)
if not self.for_use_id_file:
self.IS_for_use = 0
log("Missing for_use_id_file. Skipping ...",
"WARNING")
else:
# get riv_bas_id info
for_use_id_table = np.loadtxt(self.for_use_id_file,
ndmin=1, delimiter=",",
usecols=(0,), dtype=int)
self.IS_for_use = int(for_use_id_table.size) |
def expand(self, url):
"""Expand implementation for Adf.ly
Args:
url: the URL you want to expand
Returns:
A string containing the expanded URL
Raises:
BadAPIResponseException: If the data is malformed or we got a bad
status code on API response
ShorteningErrorException: If the API Returns an error as response
"""
url = self.clean_url(url)
expand_url = f'{self.api_url}v1/expand'
payload = {
'domain': getattr(self, 'domain', 'adf.ly'),
'advert_type': getattr(self, 'type', 'int'),
'group_id': getattr(self, 'group_id', None),
'key': self.api_key,
'user_id': self.user_id,
'url': url,
}
response = self._post(expand_url, data=payload)
if not response.ok:
raise BadAPIResponseException(response.content)
try:
data = response.json()
except json.decoder.JSONDecodeError:
raise BadAPIResponseException('API response could not be decoded')
if data.get('errors'):
errors = ','.join(i['msg'] for i in data['errors'])
raise ShorteningErrorException(errors)
if not data.get('data'):
raise BadAPIResponseException(response.content)
return data['data'][0]['url'] | Expand implementation for Adf.ly
Args:
url: the URL you want to expand
Returns:
A string containing the expanded URL
Raises:
BadAPIResponseException: If the data is malformed or we got a bad
status code on API response
ShorteningErrorException: If the API Returns an error as response | Below is the the instruction that describes the task:
### Input:
Expand implementation for Adf.ly
Args:
url: the URL you want to expand
Returns:
A string containing the expanded URL
Raises:
BadAPIResponseException: If the data is malformed or we got a bad
status code on API response
ShorteningErrorException: If the API Returns an error as response
### Response:
def expand(self, url):
"""Expand implementation for Adf.ly
Args:
url: the URL you want to expand
Returns:
A string containing the expanded URL
Raises:
BadAPIResponseException: If the data is malformed or we got a bad
status code on API response
ShorteningErrorException: If the API Returns an error as response
"""
url = self.clean_url(url)
expand_url = f'{self.api_url}v1/expand'
payload = {
'domain': getattr(self, 'domain', 'adf.ly'),
'advert_type': getattr(self, 'type', 'int'),
'group_id': getattr(self, 'group_id', None),
'key': self.api_key,
'user_id': self.user_id,
'url': url,
}
response = self._post(expand_url, data=payload)
if not response.ok:
raise BadAPIResponseException(response.content)
try:
data = response.json()
except json.decoder.JSONDecodeError:
raise BadAPIResponseException('API response could not be decoded')
if data.get('errors'):
errors = ','.join(i['msg'] for i in data['errors'])
raise ShorteningErrorException(errors)
if not data.get('data'):
raise BadAPIResponseException(response.content)
return data['data'][0]['url'] |
def get_anchor_point(self, anchor_name):
"""Return an anchor point of the node, if it exists."""
if anchor_name in self._possible_anchors:
return TikZNodeAnchor(self.handle, anchor_name)
else:
try:
anchor = int(anchor_name.split('_')[1])
except:
anchor = None
if anchor is not None:
return TikZNodeAnchor(self.handle, str(anchor))
raise ValueError('Invalid anchor name: "{}"'.format(anchor_name)) | Return an anchor point of the node, if it exists. | Below is the the instruction that describes the task:
### Input:
Return an anchor point of the node, if it exists.
### Response:
def get_anchor_point(self, anchor_name):
"""Return an anchor point of the node, if it exists."""
if anchor_name in self._possible_anchors:
return TikZNodeAnchor(self.handle, anchor_name)
else:
try:
anchor = int(anchor_name.split('_')[1])
except:
anchor = None
if anchor is not None:
return TikZNodeAnchor(self.handle, str(anchor))
raise ValueError('Invalid anchor name: "{}"'.format(anchor_name)) |
def correlation(T, obs1, obs2=None, times=(1), maxtime=None, k=None, ncv=None, return_times=False):
r"""Time-correlation for equilibrium experiment.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : array-like of int (optional), default=(1)
List of times (in tau) at which to compute correlation
maxtime : int, optional, default=None
Maximum time step to use. Equivalent to . Alternative to times.
k : int (optional)
Number of eigenvalues and eigenvectors to use for computation
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
Returns
-------
correlations : ndarray
Correlation values at given times
times : ndarray, optional
time points at which the correlation was computed (if return_times=True)
References
----------
.. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D
Chodera and J Smith. 2010. Dynamical fingerprints for probing
individual relaxation processes in biomolecular dynamics with
simulations and kinetic experiments. PNAS 108 (12): 4822-4827.
Notes
-----
**Auto-correlation**
The auto-correlation of an observable :math:`a(x)` for a system in
equilibrium is
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_x \mu(x) a(x, 0) a(x, t)
:math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can
be propagated forward in time using the t-step transition matrix
:math:`p^{t}(x, y)`.
The propagated observable at time :math:`t` is :math:`a(x,
t)=\sum_y p^t(x, y)a(y, 0)`.
Using the eigenvlaues and eigenvectors of the transition matrix
the autocorrelation can be written as
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_i \lambda_i^t \langle a, r_i\rangle_{\mu} \langle l_i, a \rangle.
**Cross-correlation**
The cross-correlation of two observables :math:`a(x)`,
:math:`b(x)` is similarly given
.. math:: \mathbb{E}_{\mu}[a(x,0)b(x,t)]=\sum_x \mu(x) a(x, 0) b(x, t)
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import correlation
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> a = np.array([1.0, 0.0, 0.0])
>>> times = np.array([1, 5, 10, 20])
>>> corr = correlation(T, a, times=times)
>>> corr
array([ 0.40909091, 0.34081364, 0.28585667, 0.23424263])
"""
# check if square matrix and remember size
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
n = T.shape[0]
obs1 = _types.ensure_ndarray(obs1, ndim=1, size=n, kind='numeric')
obs2 = _types.ensure_ndarray_or_None(obs2, ndim=1, size=n, kind='numeric')
times = _types.ensure_int_vector(times, require_order=True)
# check input
# go
if _issparse(T):
return sparse.fingerprints.correlation(T, obs1, obs2=obs2, times=times, k=k, ncv=ncv)
else:
return dense.fingerprints.correlation(T, obs1, obs2=obs2, times=times, k=k) | r"""Time-correlation for equilibrium experiment.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : array-like of int (optional), default=(1)
List of times (in tau) at which to compute correlation
maxtime : int, optional, default=None
Maximum time step to use. Equivalent to . Alternative to times.
k : int (optional)
Number of eigenvalues and eigenvectors to use for computation
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
Returns
-------
correlations : ndarray
Correlation values at given times
times : ndarray, optional
time points at which the correlation was computed (if return_times=True)
References
----------
.. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D
Chodera and J Smith. 2010. Dynamical fingerprints for probing
individual relaxation processes in biomolecular dynamics with
simulations and kinetic experiments. PNAS 108 (12): 4822-4827.
Notes
-----
**Auto-correlation**
The auto-correlation of an observable :math:`a(x)` for a system in
equilibrium is
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_x \mu(x) a(x, 0) a(x, t)
:math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can
be propagated forward in time using the t-step transition matrix
:math:`p^{t}(x, y)`.
The propagated observable at time :math:`t` is :math:`a(x,
t)=\sum_y p^t(x, y)a(y, 0)`.
Using the eigenvlaues and eigenvectors of the transition matrix
the autocorrelation can be written as
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_i \lambda_i^t \langle a, r_i\rangle_{\mu} \langle l_i, a \rangle.
**Cross-correlation**
The cross-correlation of two observables :math:`a(x)`,
:math:`b(x)` is similarly given
.. math:: \mathbb{E}_{\mu}[a(x,0)b(x,t)]=\sum_x \mu(x) a(x, 0) b(x, t)
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import correlation
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> a = np.array([1.0, 0.0, 0.0])
>>> times = np.array([1, 5, 10, 20])
>>> corr = correlation(T, a, times=times)
>>> corr
array([ 0.40909091, 0.34081364, 0.28585667, 0.23424263]) | Below is the the instruction that describes the task:
### Input:
r"""Time-correlation for equilibrium experiment.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : array-like of int (optional), default=(1)
List of times (in tau) at which to compute correlation
maxtime : int, optional, default=None
Maximum time step to use. Equivalent to . Alternative to times.
k : int (optional)
Number of eigenvalues and eigenvectors to use for computation
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
Returns
-------
correlations : ndarray
Correlation values at given times
times : ndarray, optional
time points at which the correlation was computed (if return_times=True)
References
----------
.. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D
Chodera and J Smith. 2010. Dynamical fingerprints for probing
individual relaxation processes in biomolecular dynamics with
simulations and kinetic experiments. PNAS 108 (12): 4822-4827.
Notes
-----
**Auto-correlation**
The auto-correlation of an observable :math:`a(x)` for a system in
equilibrium is
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_x \mu(x) a(x, 0) a(x, t)
:math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can
be propagated forward in time using the t-step transition matrix
:math:`p^{t}(x, y)`.
The propagated observable at time :math:`t` is :math:`a(x,
t)=\sum_y p^t(x, y)a(y, 0)`.
Using the eigenvlaues and eigenvectors of the transition matrix
the autocorrelation can be written as
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_i \lambda_i^t \langle a, r_i\rangle_{\mu} \langle l_i, a \rangle.
**Cross-correlation**
The cross-correlation of two observables :math:`a(x)`,
:math:`b(x)` is similarly given
.. math:: \mathbb{E}_{\mu}[a(x,0)b(x,t)]=\sum_x \mu(x) a(x, 0) b(x, t)
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import correlation
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> a = np.array([1.0, 0.0, 0.0])
>>> times = np.array([1, 5, 10, 20])
>>> corr = correlation(T, a, times=times)
>>> corr
array([ 0.40909091, 0.34081364, 0.28585667, 0.23424263])
### Response:
def correlation(T, obs1, obs2=None, times=(1), maxtime=None, k=None, ncv=None, return_times=False):
r"""Time-correlation for equilibrium experiment.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : array-like of int (optional), default=(1)
List of times (in tau) at which to compute correlation
maxtime : int, optional, default=None
Maximum time step to use. Equivalent to . Alternative to times.
k : int (optional)
Number of eigenvalues and eigenvectors to use for computation
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
Returns
-------
correlations : ndarray
Correlation values at given times
times : ndarray, optional
time points at which the correlation was computed (if return_times=True)
References
----------
.. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D
Chodera and J Smith. 2010. Dynamical fingerprints for probing
individual relaxation processes in biomolecular dynamics with
simulations and kinetic experiments. PNAS 108 (12): 4822-4827.
Notes
-----
**Auto-correlation**
The auto-correlation of an observable :math:`a(x)` for a system in
equilibrium is
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_x \mu(x) a(x, 0) a(x, t)
:math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can
be propagated forward in time using the t-step transition matrix
:math:`p^{t}(x, y)`.
The propagated observable at time :math:`t` is :math:`a(x,
t)=\sum_y p^t(x, y)a(y, 0)`.
Using the eigenvlaues and eigenvectors of the transition matrix
the autocorrelation can be written as
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_i \lambda_i^t \langle a, r_i\rangle_{\mu} \langle l_i, a \rangle.
**Cross-correlation**
The cross-correlation of two observables :math:`a(x)`,
:math:`b(x)` is similarly given
.. math:: \mathbb{E}_{\mu}[a(x,0)b(x,t)]=\sum_x \mu(x) a(x, 0) b(x, t)
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import correlation
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> a = np.array([1.0, 0.0, 0.0])
>>> times = np.array([1, 5, 10, 20])
>>> corr = correlation(T, a, times=times)
>>> corr
array([ 0.40909091, 0.34081364, 0.28585667, 0.23424263])
"""
# check if square matrix and remember size
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
n = T.shape[0]
obs1 = _types.ensure_ndarray(obs1, ndim=1, size=n, kind='numeric')
obs2 = _types.ensure_ndarray_or_None(obs2, ndim=1, size=n, kind='numeric')
times = _types.ensure_int_vector(times, require_order=True)
# check input
# go
if _issparse(T):
return sparse.fingerprints.correlation(T, obs1, obs2=obs2, times=times, k=k, ncv=ncv)
else:
return dense.fingerprints.correlation(T, obs1, obs2=obs2, times=times, k=k) |
def setup(self, phase, entry_pressure='', pore_volume='', throat_volume=''):
r"""
Set up the required parameters for the algorithm
Parameters
----------
phase : OpenPNM Phase object
The phase to be injected into the Network. The Phase must have the
capillary entry pressure values for the system.
entry_pressure : string
The dictionary key to the capillary entry pressure. If none is
supplied then the current value is retained. The default is
'throat.capillary_pressure'.
pore_volume : string
The dictionary key to the pore volume. If none is supplied then
the current value is retained. The default is 'pore.volume'.
throat_volume : string
The dictionary key to the throat volume. If none is supplied then
the current value is retained. The default is 'throat.volume'.
"""
self.settings['phase'] = phase.name
if pore_volume:
self.settings['pore_volume'] = pore_volume
if throat_volume:
self.settings['throat_volume'] = throat_volume
if entry_pressure:
self.settings['entry_pressure'] = entry_pressure
# Setup arrays and info
self['throat.entry_pressure'] = phase[self.settings['entry_pressure']]
# Indices into t_entry giving a sorted list
self['throat.sorted'] = sp.argsort(self['throat.entry_pressure'], axis=0)
self['throat.order'] = 0
self['throat.order'][self['throat.sorted']] = sp.arange(0, self.Nt)
self['throat.invasion_sequence'] = -1
self['pore.invasion_sequence'] = -1
self._tcount = 0 | r"""
Set up the required parameters for the algorithm
Parameters
----------
phase : OpenPNM Phase object
The phase to be injected into the Network. The Phase must have the
capillary entry pressure values for the system.
entry_pressure : string
The dictionary key to the capillary entry pressure. If none is
supplied then the current value is retained. The default is
'throat.capillary_pressure'.
pore_volume : string
The dictionary key to the pore volume. If none is supplied then
the current value is retained. The default is 'pore.volume'.
throat_volume : string
The dictionary key to the throat volume. If none is supplied then
the current value is retained. The default is 'throat.volume'. | Below is the the instruction that describes the task:
### Input:
r"""
Set up the required parameters for the algorithm
Parameters
----------
phase : OpenPNM Phase object
The phase to be injected into the Network. The Phase must have the
capillary entry pressure values for the system.
entry_pressure : string
The dictionary key to the capillary entry pressure. If none is
supplied then the current value is retained. The default is
'throat.capillary_pressure'.
pore_volume : string
The dictionary key to the pore volume. If none is supplied then
the current value is retained. The default is 'pore.volume'.
throat_volume : string
The dictionary key to the throat volume. If none is supplied then
the current value is retained. The default is 'throat.volume'.
### Response:
def setup(self, phase, entry_pressure='', pore_volume='', throat_volume=''):
r"""
Set up the required parameters for the algorithm
Parameters
----------
phase : OpenPNM Phase object
The phase to be injected into the Network. The Phase must have the
capillary entry pressure values for the system.
entry_pressure : string
The dictionary key to the capillary entry pressure. If none is
supplied then the current value is retained. The default is
'throat.capillary_pressure'.
pore_volume : string
The dictionary key to the pore volume. If none is supplied then
the current value is retained. The default is 'pore.volume'.
throat_volume : string
The dictionary key to the throat volume. If none is supplied then
the current value is retained. The default is 'throat.volume'.
"""
self.settings['phase'] = phase.name
if pore_volume:
self.settings['pore_volume'] = pore_volume
if throat_volume:
self.settings['throat_volume'] = throat_volume
if entry_pressure:
self.settings['entry_pressure'] = entry_pressure
# Setup arrays and info
self['throat.entry_pressure'] = phase[self.settings['entry_pressure']]
# Indices into t_entry giving a sorted list
self['throat.sorted'] = sp.argsort(self['throat.entry_pressure'], axis=0)
self['throat.order'] = 0
self['throat.order'][self['throat.sorted']] = sp.arange(0, self.Nt)
self['throat.invasion_sequence'] = -1
self['pore.invasion_sequence'] = -1
self._tcount = 0 |
def add_metadata(self, metadata_matrix, meta_index_store):
'''
Returns a new corpus with a the metadata matrix and index store integrated.
:param metadata_matrix: scipy.sparse matrix (# docs, # metadata)
:param meta_index_store: IndexStore of metadata values
:return: TermDocMatrixWithoutCategories
'''
assert isinstance(meta_index_store, IndexStore)
assert len(metadata_matrix.shape) == 2
assert metadata_matrix.shape[0] == self.get_num_docs()
return self._make_new_term_doc_matrix(new_X=self._X,
new_y=None,
new_category_idx_store=None,
new_y_mask=np.ones(self.get_num_docs()).astype(bool),
new_mX=metadata_matrix,
new_term_idx_store=self._term_idx_store,
new_metadata_idx_store=meta_index_store) | Returns a new corpus with a the metadata matrix and index store integrated.
:param metadata_matrix: scipy.sparse matrix (# docs, # metadata)
:param meta_index_store: IndexStore of metadata values
:return: TermDocMatrixWithoutCategories | Below is the the instruction that describes the task:
### Input:
Returns a new corpus with a the metadata matrix and index store integrated.
:param metadata_matrix: scipy.sparse matrix (# docs, # metadata)
:param meta_index_store: IndexStore of metadata values
:return: TermDocMatrixWithoutCategories
### Response:
def add_metadata(self, metadata_matrix, meta_index_store):
'''
Returns a new corpus with a the metadata matrix and index store integrated.
:param metadata_matrix: scipy.sparse matrix (# docs, # metadata)
:param meta_index_store: IndexStore of metadata values
:return: TermDocMatrixWithoutCategories
'''
assert isinstance(meta_index_store, IndexStore)
assert len(metadata_matrix.shape) == 2
assert metadata_matrix.shape[0] == self.get_num_docs()
return self._make_new_term_doc_matrix(new_X=self._X,
new_y=None,
new_category_idx_store=None,
new_y_mask=np.ones(self.get_num_docs()).astype(bool),
new_mX=metadata_matrix,
new_term_idx_store=self._term_idx_store,
new_metadata_idx_store=meta_index_store) |
def permission_set(self, name, func=None):
"""Define a new permission set (directly, or as a decorator).
E.g.::
@authz.permission_set('HTTP')
def is_http_perm(perm):
return perm.startswith('http.')
"""
if func is None:
return functools.partial(self.predicate, name)
self.permission_sets[name] = func
return func | Define a new permission set (directly, or as a decorator).
E.g.::
@authz.permission_set('HTTP')
def is_http_perm(perm):
return perm.startswith('http.') | Below is the the instruction that describes the task:
### Input:
Define a new permission set (directly, or as a decorator).
E.g.::
@authz.permission_set('HTTP')
def is_http_perm(perm):
return perm.startswith('http.')
### Response:
def permission_set(self, name, func=None):
"""Define a new permission set (directly, or as a decorator).
E.g.::
@authz.permission_set('HTTP')
def is_http_perm(perm):
return perm.startswith('http.')
"""
if func is None:
return functools.partial(self.predicate, name)
self.permission_sets[name] = func
return func |
def clean(self):
""" Cleans the data and throws ValidationError on failure """
errors = {}
cleaned = {}
for name, validator in self.validate_schema.items():
val = getattr(self, name, None)
try:
cleaned[name] = validator.to_python(val)
except formencode.api.Invalid, err:
errors[name] = err
if errors:
raise ValidationError('Invalid data', errors)
return cleaned | Cleans the data and throws ValidationError on failure | Below is the the instruction that describes the task:
### Input:
Cleans the data and throws ValidationError on failure
### Response:
def clean(self):
""" Cleans the data and throws ValidationError on failure """
errors = {}
cleaned = {}
for name, validator in self.validate_schema.items():
val = getattr(self, name, None)
try:
cleaned[name] = validator.to_python(val)
except formencode.api.Invalid, err:
errors[name] = err
if errors:
raise ValidationError('Invalid data', errors)
return cleaned |
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
# ensure we have an object and not a brain
obj = api.get_object(obj)
uid = api.get_uid(obj)
url = api.get_url(obj)
title = api.get_title(obj)
# get the category
if self.show_categories_enabled():
category = obj.getCategoryTitle()
if category not in self.categories:
self.categories.append(category)
item["category"] = category
rr = self.referenceresults.get(uid, {})
item["Title"] = title
item["replace"]["Title"] = get_link(url, value=title)
item["allow_edit"] = self.get_editable_columns()
item["required"] = self.get_required_columns()
item["selected"] = rr and True or False
item["result"] = rr.get("result", "")
item["min"] = rr.get("min", "")
item["max"] = rr.get("max", "")
# Icons
after_icons = ""
if obj.getAccredited():
after_icons += get_image(
"accredited.png", title=_("Accredited"))
if obj.getAttachmentOption() == "r":
after_icons += get_image(
"attach_reqd.png", title=_("Attachment required"))
if obj.getAttachmentOption() == "n":
after_icons += get_image(
"attach_no.png", title=_("Attachment not permitted"))
if after_icons:
item["after"]["Title"] = after_icons
return item | Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item | Below is the the instruction that describes the task:
### Input:
Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
### Response:
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
# ensure we have an object and not a brain
obj = api.get_object(obj)
uid = api.get_uid(obj)
url = api.get_url(obj)
title = api.get_title(obj)
# get the category
if self.show_categories_enabled():
category = obj.getCategoryTitle()
if category not in self.categories:
self.categories.append(category)
item["category"] = category
rr = self.referenceresults.get(uid, {})
item["Title"] = title
item["replace"]["Title"] = get_link(url, value=title)
item["allow_edit"] = self.get_editable_columns()
item["required"] = self.get_required_columns()
item["selected"] = rr and True or False
item["result"] = rr.get("result", "")
item["min"] = rr.get("min", "")
item["max"] = rr.get("max", "")
# Icons
after_icons = ""
if obj.getAccredited():
after_icons += get_image(
"accredited.png", title=_("Accredited"))
if obj.getAttachmentOption() == "r":
after_icons += get_image(
"attach_reqd.png", title=_("Attachment required"))
if obj.getAttachmentOption() == "n":
after_icons += get_image(
"attach_no.png", title=_("Attachment not permitted"))
if after_icons:
item["after"]["Title"] = after_icons
return item |
def inplace_filter(func, sequence):
"""
Like Python's filter() builtin, but modifies the sequence in place.
Example:
>>> l = range(10)
>>> inplace_filter(lambda x: x > 5, l)
>>> l
[6, 7, 8, 9]
Performance considerations: the function iterates over the
sequence, shuffling surviving members down and deleting whatever
top part of the sequence is left empty at the end, so sequences
whose surviving members are predominantly at the bottom will be
processed faster.
"""
target = 0
for source in xrange(len(sequence)):
if func(sequence[source]):
sequence[target] = sequence[source]
target += 1
del sequence[target:] | Like Python's filter() builtin, but modifies the sequence in place.
Example:
>>> l = range(10)
>>> inplace_filter(lambda x: x > 5, l)
>>> l
[6, 7, 8, 9]
Performance considerations: the function iterates over the
sequence, shuffling surviving members down and deleting whatever
top part of the sequence is left empty at the end, so sequences
whose surviving members are predominantly at the bottom will be
processed faster. | Below is the the instruction that describes the task:
### Input:
Like Python's filter() builtin, but modifies the sequence in place.
Example:
>>> l = range(10)
>>> inplace_filter(lambda x: x > 5, l)
>>> l
[6, 7, 8, 9]
Performance considerations: the function iterates over the
sequence, shuffling surviving members down and deleting whatever
top part of the sequence is left empty at the end, so sequences
whose surviving members are predominantly at the bottom will be
processed faster.
### Response:
def inplace_filter(func, sequence):
"""
Like Python's filter() builtin, but modifies the sequence in place.
Example:
>>> l = range(10)
>>> inplace_filter(lambda x: x > 5, l)
>>> l
[6, 7, 8, 9]
Performance considerations: the function iterates over the
sequence, shuffling surviving members down and deleting whatever
top part of the sequence is left empty at the end, so sequences
whose surviving members are predominantly at the bottom will be
processed faster.
"""
target = 0
for source in xrange(len(sequence)):
if func(sequence[source]):
sequence[target] = sequence[source]
target += 1
del sequence[target:] |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._BillingInvoice is not None:
return False
if self._DraftPayment is not None:
return False
if self._MasterCardAction is not None:
return False
if self._Payment is not None:
return False
if self._PaymentBatch is not None:
return False
if self._RequestResponse is not None:
return False
if self._ScheduleInstance is not None:
return False
if self._TabResultResponse is not None:
return False
if self._WhitelistResult is not None:
return False
return True | :rtype: bool | Below is the the instruction that describes the task:
### Input:
:rtype: bool
### Response:
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._BillingInvoice is not None:
return False
if self._DraftPayment is not None:
return False
if self._MasterCardAction is not None:
return False
if self._Payment is not None:
return False
if self._PaymentBatch is not None:
return False
if self._RequestResponse is not None:
return False
if self._ScheduleInstance is not None:
return False
if self._TabResultResponse is not None:
return False
if self._WhitelistResult is not None:
return False
return True |
def biclique(self, xmin, xmax, ymin, ymax):
"""Compute a maximum-sized complete bipartite graph contained in the
rectangle defined by ``xmin, xmax, ymin, ymax`` where each chain of
qubits is either a vertical line or a horizontal line.
INPUTS:
xmin,xmax,ymin,ymax: integers defining the bounds of a rectangle
where we look for unbroken chains. These ranges include both
endpoints.
OUTPUT:
(A_side, B_side): a tuple of two lists containing lists of qubits.
the lists found in ``A_side`` and ``B_side`` are chains of qubits.
These lists of qubits are arranged so that
>>> [zip(chain,chain[1:]) for chain in A_side]
and
>>> [zip(chain,chain[1:]) for chain in B_side]
are lists of valid couplers.
"""
Aside = sum((self.maximum_hline_bundle(y, xmin, xmax)
for y in range(ymin, ymax + 1)), [])
Bside = sum((self.maximum_vline_bundle(x, ymin, ymax)
for x in range(xmin, xmax + 1)), [])
return Aside, Bside | Compute a maximum-sized complete bipartite graph contained in the
rectangle defined by ``xmin, xmax, ymin, ymax`` where each chain of
qubits is either a vertical line or a horizontal line.
INPUTS:
xmin,xmax,ymin,ymax: integers defining the bounds of a rectangle
where we look for unbroken chains. These ranges include both
endpoints.
OUTPUT:
(A_side, B_side): a tuple of two lists containing lists of qubits.
the lists found in ``A_side`` and ``B_side`` are chains of qubits.
These lists of qubits are arranged so that
>>> [zip(chain,chain[1:]) for chain in A_side]
and
>>> [zip(chain,chain[1:]) for chain in B_side]
are lists of valid couplers. | Below is the the instruction that describes the task:
### Input:
Compute a maximum-sized complete bipartite graph contained in the
rectangle defined by ``xmin, xmax, ymin, ymax`` where each chain of
qubits is either a vertical line or a horizontal line.
INPUTS:
xmin,xmax,ymin,ymax: integers defining the bounds of a rectangle
where we look for unbroken chains. These ranges include both
endpoints.
OUTPUT:
(A_side, B_side): a tuple of two lists containing lists of qubits.
the lists found in ``A_side`` and ``B_side`` are chains of qubits.
These lists of qubits are arranged so that
>>> [zip(chain,chain[1:]) for chain in A_side]
and
>>> [zip(chain,chain[1:]) for chain in B_side]
are lists of valid couplers.
### Response:
def biclique(self, xmin, xmax, ymin, ymax):
"""Compute a maximum-sized complete bipartite graph contained in the
rectangle defined by ``xmin, xmax, ymin, ymax`` where each chain of
qubits is either a vertical line or a horizontal line.
INPUTS:
xmin,xmax,ymin,ymax: integers defining the bounds of a rectangle
where we look for unbroken chains. These ranges include both
endpoints.
OUTPUT:
(A_side, B_side): a tuple of two lists containing lists of qubits.
the lists found in ``A_side`` and ``B_side`` are chains of qubits.
These lists of qubits are arranged so that
>>> [zip(chain,chain[1:]) for chain in A_side]
and
>>> [zip(chain,chain[1:]) for chain in B_side]
are lists of valid couplers.
"""
Aside = sum((self.maximum_hline_bundle(y, xmin, xmax)
for y in range(ymin, ymax + 1)), [])
Bside = sum((self.maximum_vline_bundle(x, ymin, ymax)
for x in range(xmin, xmax + 1)), [])
return Aside, Bside |
def _make_cmap(colors, position=None, bit=False):
'''
_make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). _make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color.
'''
bit_rgb = np.linspace(0,1,256)
if position == None:
position = np.linspace(0,1,len(colors))
else:
if len(position) != len(colors):
sys.exit("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
sys.exit("position must start with 0 and end with 1")
palette = [(i, (float(r), float(g), float(b), float(a))) for
i, (r, g, b, a) in enumerate(colors)]
cmap = Colormap(*palette)
return cmap | _make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). _make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color. | Below is the the instruction that describes the task:
### Input:
_make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). _make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color.
### Response:
def _make_cmap(colors, position=None, bit=False):
'''
_make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). _make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color.
'''
bit_rgb = np.linspace(0,1,256)
if position == None:
position = np.linspace(0,1,len(colors))
else:
if len(position) != len(colors):
sys.exit("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
sys.exit("position must start with 0 and end with 1")
palette = [(i, (float(r), float(g), float(b), float(a))) for
i, (r, g, b, a) in enumerate(colors)]
cmap = Colormap(*palette)
return cmap |
def fromOPEndpointURL(cls, op_endpoint_url):
"""Construct an OP-Identifier OpenIDServiceEndpoint object for
a given OP Endpoint URL
@param op_endpoint_url: The URL of the endpoint
@rtype: OpenIDServiceEndpoint
"""
service = cls()
service.server_url = op_endpoint_url
service.type_uris = [OPENID_IDP_2_0_TYPE]
return service | Construct an OP-Identifier OpenIDServiceEndpoint object for
a given OP Endpoint URL
@param op_endpoint_url: The URL of the endpoint
@rtype: OpenIDServiceEndpoint | Below is the the instruction that describes the task:
### Input:
Construct an OP-Identifier OpenIDServiceEndpoint object for
a given OP Endpoint URL
@param op_endpoint_url: The URL of the endpoint
@rtype: OpenIDServiceEndpoint
### Response:
def fromOPEndpointURL(cls, op_endpoint_url):
"""Construct an OP-Identifier OpenIDServiceEndpoint object for
a given OP Endpoint URL
@param op_endpoint_url: The URL of the endpoint
@rtype: OpenIDServiceEndpoint
"""
service = cls()
service.server_url = op_endpoint_url
service.type_uris = [OPENID_IDP_2_0_TYPE]
return service |
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict | Converts ES field mappings to .kibana field mappings | Below is the the instruction that describes the task:
### Input:
Converts ES field mappings to .kibana field mappings
### Response:
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict |
def bind(self, server, net=None, address=None):
"""Create a network adapter object and bind."""
if _debug: NetworkServiceAccessPoint._debug("bind %r net=%r address=%r", server, net, address)
# make sure this hasn't already been called with this network
if net in self.adapters:
raise RuntimeError("already bound")
# create an adapter object, add it to our map
adapter = NetworkAdapter(self, net)
self.adapters[net] = adapter
if _debug: NetworkServiceAccessPoint._debug(" - adapters[%r]: %r", net, adapter)
# if the address was given, make it the "local" one
if address and not self.local_address:
self.local_adapter = adapter
self.local_address = address
# bind to the server
bind(adapter, server) | Create a network adapter object and bind. | Below is the the instruction that describes the task:
### Input:
Create a network adapter object and bind.
### Response:
def bind(self, server, net=None, address=None):
"""Create a network adapter object and bind."""
if _debug: NetworkServiceAccessPoint._debug("bind %r net=%r address=%r", server, net, address)
# make sure this hasn't already been called with this network
if net in self.adapters:
raise RuntimeError("already bound")
# create an adapter object, add it to our map
adapter = NetworkAdapter(self, net)
self.adapters[net] = adapter
if _debug: NetworkServiceAccessPoint._debug(" - adapters[%r]: %r", net, adapter)
# if the address was given, make it the "local" one
if address and not self.local_address:
self.local_adapter = adapter
self.local_address = address
# bind to the server
bind(adapter, server) |
def extract_ast_species(ast):
"""Extract species from ast.species set of tuples (id, label)"""
species_id = "None"
species_label = "None"
species = [
(species_id, species_label) for (species_id, species_label) in ast.species if species_id
]
if len(species) == 1:
(species_id, species_label) = species[0]
if not species_id:
species_id = "None"
species_label = "None"
log.debug(f"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}")
return (species_id, species_label) | Extract species from ast.species set of tuples (id, label) | Below is the the instruction that describes the task:
### Input:
Extract species from ast.species set of tuples (id, label)
### Response:
def extract_ast_species(ast):
"""Extract species from ast.species set of tuples (id, label)"""
species_id = "None"
species_label = "None"
species = [
(species_id, species_label) for (species_id, species_label) in ast.species if species_id
]
if len(species) == 1:
(species_id, species_label) = species[0]
if not species_id:
species_id = "None"
species_label = "None"
log.debug(f"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}")
return (species_id, species_label) |
def accepts(*argtypes, **kwargtypes):
"""A function decorator to specify argument types of the function.
Types may be specified either in the order that they appear in the
function or via keyword arguments (just as if you were calling the
function).
Example usage:
| @accepts(Positive0)
| def square_root(x):
| ...
"""
theseargtypes = [T.TypeFactory(a) for a in argtypes]
thesekwargtypes = {k : T.TypeFactory(a) for k,a in kwargtypes.items()}
def _decorator(func):
# @accepts decorator
f = func.__wrapped__ if hasattr(func, "__wrapped__") else func
try:
argtypes = inspect.getcallargs(f, *theseargtypes, **thesekwargtypes)
argtypes = {k: v if issubclass(type(v), T.Type) else T.Constant(v)
for k,v in argtypes.items()}
except TypeError:
raise E.ArgumentTypeError("Invalid argument specification to @accepts in %s" % func.__qualname__)
# Support keyword arguments. Find the name of the **kwargs
# parameter (not necessarily "kwargs") and set it to be a
# dictionary of unspecified types.
kwargname = U.get_func_kwargs_name(func)
if kwargname in argtypes.keys():
argtypes[kwargname] = T.KeywordArguments()
# Support positional arguments. Find the name of the *args
# parameter (not necessarily "args") and set it to be an
# unspecified type.
posargname = U.get_func_posargs_name(func)
if posargname in argtypes.keys():
argtypes[posargname] = T.PositionalArguments() # TODO merge with actual argument names
if U.has_fun_prop(func, "argtypes"):
raise ValueError("Cannot set argument types twice")
U.set_fun_prop(func, "argtypes", argtypes)
return _wrap(func)
return _decorator | A function decorator to specify argument types of the function.
Types may be specified either in the order that they appear in the
function or via keyword arguments (just as if you were calling the
function).
Example usage:
| @accepts(Positive0)
| def square_root(x):
| ... | Below is the the instruction that describes the task:
### Input:
A function decorator to specify argument types of the function.
Types may be specified either in the order that they appear in the
function or via keyword arguments (just as if you were calling the
function).
Example usage:
| @accepts(Positive0)
| def square_root(x):
| ...
### Response:
def accepts(*argtypes, **kwargtypes):
"""A function decorator to specify argument types of the function.
Types may be specified either in the order that they appear in the
function or via keyword arguments (just as if you were calling the
function).
Example usage:
| @accepts(Positive0)
| def square_root(x):
| ...
"""
theseargtypes = [T.TypeFactory(a) for a in argtypes]
thesekwargtypes = {k : T.TypeFactory(a) for k,a in kwargtypes.items()}
def _decorator(func):
# @accepts decorator
f = func.__wrapped__ if hasattr(func, "__wrapped__") else func
try:
argtypes = inspect.getcallargs(f, *theseargtypes, **thesekwargtypes)
argtypes = {k: v if issubclass(type(v), T.Type) else T.Constant(v)
for k,v in argtypes.items()}
except TypeError:
raise E.ArgumentTypeError("Invalid argument specification to @accepts in %s" % func.__qualname__)
# Support keyword arguments. Find the name of the **kwargs
# parameter (not necessarily "kwargs") and set it to be a
# dictionary of unspecified types.
kwargname = U.get_func_kwargs_name(func)
if kwargname in argtypes.keys():
argtypes[kwargname] = T.KeywordArguments()
# Support positional arguments. Find the name of the *args
# parameter (not necessarily "args") and set it to be an
# unspecified type.
posargname = U.get_func_posargs_name(func)
if posargname in argtypes.keys():
argtypes[posargname] = T.PositionalArguments() # TODO merge with actual argument names
if U.has_fun_prop(func, "argtypes"):
raise ValueError("Cannot set argument types twice")
U.set_fun_prop(func, "argtypes", argtypes)
return _wrap(func)
return _decorator |
def add_at(self, moment: float, fn_process: Callable, *args: Any, **kwargs: Any) -> 'Process':
"""
Adds a process to the simulation, which is made to start at the given exact time on the simulated clock. Note
that times in the past when compared to the current moment on the simulated clock are forbidden.
See method add() for more details.
"""
delay = moment - self.now()
if delay < 0.0:
raise ValueError(
f"The given moment to start the process ({moment:f}) is in the past (now is {self.now():f})."
)
return self.add_in(delay, fn_process, *args, **kwargs) | Adds a process to the simulation, which is made to start at the given exact time on the simulated clock. Note
that times in the past when compared to the current moment on the simulated clock are forbidden.
See method add() for more details. | Below is the the instruction that describes the task:
### Input:
Adds a process to the simulation, which is made to start at the given exact time on the simulated clock. Note
that times in the past when compared to the current moment on the simulated clock are forbidden.
See method add() for more details.
### Response:
def add_at(self, moment: float, fn_process: Callable, *args: Any, **kwargs: Any) -> 'Process':
"""
Adds a process to the simulation, which is made to start at the given exact time on the simulated clock. Note
that times in the past when compared to the current moment on the simulated clock are forbidden.
See method add() for more details.
"""
delay = moment - self.now()
if delay < 0.0:
raise ValueError(
f"The given moment to start the process ({moment:f}) is in the past (now is {self.now():f})."
)
return self.add_in(delay, fn_process, *args, **kwargs) |
def m2i(self, pkt, s):
"""
The good thing about safedec is that it may still decode ASN1
even if there is a mismatch between the expected tag (self.ASN1_tag)
and the actual tag; the decoded ASN1 object will simply be put
into an ASN1_BADTAG object. However, safedec prevents the raising of
exceptions needed for ASN1F_optional processing.
Thus we use 'flexible_tag', which should be False with ASN1F_optional.
Regarding other fields, we might need to know whether encoding went
as expected or not. Noticeably, input methods from cert.py expect
certain exceptions to be raised. Hence default flexible_tag is False.
"""
diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
safe=self.flexible_tag)
if diff_tag is not None:
# this implies that flexible_tag was True
if self.implicit_tag is not None:
self.implicit_tag = diff_tag
elif self.explicit_tag is not None:
self.explicit_tag = diff_tag
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
if self.flexible_tag:
return codec.safedec(s, context=self.context)
else:
return codec.dec(s, context=self.context) | The good thing about safedec is that it may still decode ASN1
even if there is a mismatch between the expected tag (self.ASN1_tag)
and the actual tag; the decoded ASN1 object will simply be put
into an ASN1_BADTAG object. However, safedec prevents the raising of
exceptions needed for ASN1F_optional processing.
Thus we use 'flexible_tag', which should be False with ASN1F_optional.
Regarding other fields, we might need to know whether encoding went
as expected or not. Noticeably, input methods from cert.py expect
certain exceptions to be raised. Hence default flexible_tag is False. | Below is the the instruction that describes the task:
### Input:
The good thing about safedec is that it may still decode ASN1
even if there is a mismatch between the expected tag (self.ASN1_tag)
and the actual tag; the decoded ASN1 object will simply be put
into an ASN1_BADTAG object. However, safedec prevents the raising of
exceptions needed for ASN1F_optional processing.
Thus we use 'flexible_tag', which should be False with ASN1F_optional.
Regarding other fields, we might need to know whether encoding went
as expected or not. Noticeably, input methods from cert.py expect
certain exceptions to be raised. Hence default flexible_tag is False.
### Response:
def m2i(self, pkt, s):
"""
The good thing about safedec is that it may still decode ASN1
even if there is a mismatch between the expected tag (self.ASN1_tag)
and the actual tag; the decoded ASN1 object will simply be put
into an ASN1_BADTAG object. However, safedec prevents the raising of
exceptions needed for ASN1F_optional processing.
Thus we use 'flexible_tag', which should be False with ASN1F_optional.
Regarding other fields, we might need to know whether encoding went
as expected or not. Noticeably, input methods from cert.py expect
certain exceptions to be raised. Hence default flexible_tag is False.
"""
diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
safe=self.flexible_tag)
if diff_tag is not None:
# this implies that flexible_tag was True
if self.implicit_tag is not None:
self.implicit_tag = diff_tag
elif self.explicit_tag is not None:
self.explicit_tag = diff_tag
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
if self.flexible_tag:
return codec.safedec(s, context=self.context)
else:
return codec.dec(s, context=self.context) |
def regex(pattern, flags: int = 0):
"""Filter messages that match a given RegEx pattern.
Args:
pattern (``str``):
The RegEx pattern as string, it will be applied to the text of a message. When a pattern matches,
all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`_
are stored in the *matches* field of the :class:`Message <pyrogram.Message>` itself.
flags (``int``, *optional*):
RegEx flags.
"""
def f(_, m):
m.matches = [i for i in _.p.finditer(m.text or m.caption or "")]
return bool(m.matches)
return create("Regex", f, p=re.compile(pattern, flags)) | Filter messages that match a given RegEx pattern.
Args:
pattern (``str``):
The RegEx pattern as string, it will be applied to the text of a message. When a pattern matches,
all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`_
are stored in the *matches* field of the :class:`Message <pyrogram.Message>` itself.
flags (``int``, *optional*):
RegEx flags. | Below is the the instruction that describes the task:
### Input:
Filter messages that match a given RegEx pattern.
Args:
pattern (``str``):
The RegEx pattern as string, it will be applied to the text of a message. When a pattern matches,
all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`_
are stored in the *matches* field of the :class:`Message <pyrogram.Message>` itself.
flags (``int``, *optional*):
RegEx flags.
### Response:
def regex(pattern, flags: int = 0):
"""Filter messages that match a given RegEx pattern.
Args:
pattern (``str``):
The RegEx pattern as string, it will be applied to the text of a message. When a pattern matches,
all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`_
are stored in the *matches* field of the :class:`Message <pyrogram.Message>` itself.
flags (``int``, *optional*):
RegEx flags.
"""
def f(_, m):
m.matches = [i for i in _.p.finditer(m.text or m.caption or "")]
return bool(m.matches)
return create("Regex", f, p=re.compile(pattern, flags)) |
def create_aggregator(self, subordinates):
"""Creates an aggregator event source, collecting events from multiple sources.
This way a single listener can listen for events coming from multiple sources,
using a single blocking :py:func:`get_event` on the returned aggregator.
in subordinates of type :class:`IEventSource`
Subordinate event source this one aggregates.
return result of type :class:`IEventSource`
Event source aggregating passed sources.
"""
if not isinstance(subordinates, list):
raise TypeError("subordinates can only be an instance of type list")
for a in subordinates[:10]:
if not isinstance(a, IEventSource):
raise TypeError(
"array can only contain objects of type IEventSource")
result = self._call("createAggregator",
in_p=[subordinates])
result = IEventSource(result)
return result | Creates an aggregator event source, collecting events from multiple sources.
This way a single listener can listen for events coming from multiple sources,
using a single blocking :py:func:`get_event` on the returned aggregator.
in subordinates of type :class:`IEventSource`
Subordinate event source this one aggregates.
return result of type :class:`IEventSource`
Event source aggregating passed sources. | Below is the the instruction that describes the task:
### Input:
Creates an aggregator event source, collecting events from multiple sources.
This way a single listener can listen for events coming from multiple sources,
using a single blocking :py:func:`get_event` on the returned aggregator.
in subordinates of type :class:`IEventSource`
Subordinate event source this one aggregates.
return result of type :class:`IEventSource`
Event source aggregating passed sources.
### Response:
def create_aggregator(self, subordinates):
"""Creates an aggregator event source, collecting events from multiple sources.
This way a single listener can listen for events coming from multiple sources,
using a single blocking :py:func:`get_event` on the returned aggregator.
in subordinates of type :class:`IEventSource`
Subordinate event source this one aggregates.
return result of type :class:`IEventSource`
Event source aggregating passed sources.
"""
if not isinstance(subordinates, list):
raise TypeError("subordinates can only be an instance of type list")
for a in subordinates[:10]:
if not isinstance(a, IEventSource):
raise TypeError(
"array can only contain objects of type IEventSource")
result = self._call("createAggregator",
in_p=[subordinates])
result = IEventSource(result)
return result |
def write_json(json_obj, filename, mode="w", print_pretty=True):
'''write_json will (optionally,pretty print) a json object to file
Parameters
==========
json_obj: the dict to print to json
filename: the output file to write to
pretty_print: if True, will use nicer formatting
'''
with open(filename, mode) as filey:
if print_pretty:
filey.writelines(print_json(json_obj))
else:
filey.writelines(json.dumps(json_obj))
return filename | write_json will (optionally,pretty print) a json object to file
Parameters
==========
json_obj: the dict to print to json
filename: the output file to write to
pretty_print: if True, will use nicer formatting | Below is the the instruction that describes the task:
### Input:
write_json will (optionally,pretty print) a json object to file
Parameters
==========
json_obj: the dict to print to json
filename: the output file to write to
pretty_print: if True, will use nicer formatting
### Response:
def write_json(json_obj, filename, mode="w", print_pretty=True):
'''write_json will (optionally,pretty print) a json object to file
Parameters
==========
json_obj: the dict to print to json
filename: the output file to write to
pretty_print: if True, will use nicer formatting
'''
with open(filename, mode) as filey:
if print_pretty:
filey.writelines(print_json(json_obj))
else:
filey.writelines(json.dumps(json_obj))
return filename |
def make_mujoco_env(env_id, seed, reward_scale=1.0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env | Create a wrapped, monitored gym.Env for MuJoCo. | Below is the the instruction that describes the task:
### Input:
Create a wrapped, monitored gym.Env for MuJoCo.
### Response:
def make_mujoco_env(env_id, seed, reward_scale=1.0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env |
def is_finished(self):
"""Returns whether all trials have finished running."""
if self._total_time > self._global_time_limit:
logger.warning("Exceeded global time limit {} / {}".format(
self._total_time, self._global_time_limit))
return True
trials_done = all(trial.is_finished() for trial in self._trials)
return trials_done and self._search_alg.is_finished() | Returns whether all trials have finished running. | Below is the the instruction that describes the task:
### Input:
Returns whether all trials have finished running.
### Response:
def is_finished(self):
"""Returns whether all trials have finished running."""
if self._total_time > self._global_time_limit:
logger.warning("Exceeded global time limit {} / {}".format(
self._total_time, self._global_time_limit))
return True
trials_done = all(trial.is_finished() for trial in self._trials)
return trials_done and self._search_alg.is_finished() |
Subsets and Splits