code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def find(self, key):
'''Get a shared variable for a parameter by name.
Parameters
----------
key : str or int
The name of the parameter to look up, or the index of the parameter
in our parameter list. These are both dependent on the
implementation of the layer.
Returns
-------
param : shared variable
A shared variable containing values for the given parameter.
Raises
------
KeyError
If a param with the given name does not exist.
'''
name = self._fmt(str(key))
for i, p in enumerate(self._params):
if key == i or name == p.name:
return p
raise KeyError(key) | Get a shared variable for a parameter by name.
Parameters
----------
key : str or int
The name of the parameter to look up, or the index of the parameter
in our parameter list. These are both dependent on the
implementation of the layer.
Returns
-------
param : shared variable
A shared variable containing values for the given parameter.
Raises
------
KeyError
If a param with the given name does not exist. | Below is the the instruction that describes the task:
### Input:
Get a shared variable for a parameter by name.
Parameters
----------
key : str or int
The name of the parameter to look up, or the index of the parameter
in our parameter list. These are both dependent on the
implementation of the layer.
Returns
-------
param : shared variable
A shared variable containing values for the given parameter.
Raises
------
KeyError
If a param with the given name does not exist.
### Response:
def find(self, key):
'''Get a shared variable for a parameter by name.
Parameters
----------
key : str or int
The name of the parameter to look up, or the index of the parameter
in our parameter list. These are both dependent on the
implementation of the layer.
Returns
-------
param : shared variable
A shared variable containing values for the given parameter.
Raises
------
KeyError
If a param with the given name does not exist.
'''
name = self._fmt(str(key))
for i, p in enumerate(self._params):
if key == i or name == p.name:
return p
raise KeyError(key) |
def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
"""
Determine the joint maximum likelihood estimate
"""
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(),
self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten()))
elif hstate is None:
# Marginalize hstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :],
self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :]))
elif pstate is None:
# Marginalize pstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate],
self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate]))
else:
if self.emission_name_distr[feature] == 'lognormal':
return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate],
self.emission[feature]['logmu'][pstate_idx, hstate])
elif self.emission_name_distr[feature] == 'normal':
return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
return | Determine the joint maximum likelihood estimate | Below is the the instruction that describes the task:
### Input:
Determine the joint maximum likelihood estimate
### Response:
def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
"""
Determine the joint maximum likelihood estimate
"""
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(),
self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten()))
elif hstate is None:
# Marginalize hstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :],
self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :]))
elif pstate is None:
# Marginalize pstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate],
self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate]))
else:
if self.emission_name_distr[feature] == 'lognormal':
return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate],
self.emission[feature]['logmu'][pstate_idx, hstate])
elif self.emission_name_distr[feature] == 'normal':
return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
return |
def _pys2col_widths(self, line):
"""Updates col_widths in code_array"""
# Split with maxsplit 3
split_line = self._split_tidy(line)
key = col, tab = self._get_key(*split_line[:2])
width = float(split_line[2])
shape = self.code_array.shape
try:
if col < shape[1] and tab < shape[2]:
self.code_array.col_widths[key] = width
except ValueError:
pass | Updates col_widths in code_array | Below is the the instruction that describes the task:
### Input:
Updates col_widths in code_array
### Response:
def _pys2col_widths(self, line):
"""Updates col_widths in code_array"""
# Split with maxsplit 3
split_line = self._split_tidy(line)
key = col, tab = self._get_key(*split_line[:2])
width = float(split_line[2])
shape = self.code_array.shape
try:
if col < shape[1] and tab < shape[2]:
self.code_array.col_widths[key] = width
except ValueError:
pass |
def _do_to_py_ast(ctx: GeneratorContext, node: Do) -> GeneratedPyAST:
"""Return a Python AST Node for a `do` expression."""
assert node.op == NodeOp.DO
assert not node.is_body
body_ast = GeneratedPyAST.reduce(
*map(partial(gen_py_ast, ctx), chain(node.statements, [node.ret]))
)
fn_body_ast: List[ast.AST] = []
do_result_name = genname(_DO_PREFIX)
fn_body_ast.extend(map(statementize, body_ast.dependencies))
fn_body_ast.append(
ast.Assign(
targets=[ast.Name(id=do_result_name, ctx=ast.Store())], value=body_ast.node
)
)
return GeneratedPyAST(
node=ast.Name(id=do_result_name, ctx=ast.Load()), dependencies=fn_body_ast
) | Return a Python AST Node for a `do` expression. | Below is the the instruction that describes the task:
### Input:
Return a Python AST Node for a `do` expression.
### Response:
def _do_to_py_ast(ctx: GeneratorContext, node: Do) -> GeneratedPyAST:
"""Return a Python AST Node for a `do` expression."""
assert node.op == NodeOp.DO
assert not node.is_body
body_ast = GeneratedPyAST.reduce(
*map(partial(gen_py_ast, ctx), chain(node.statements, [node.ret]))
)
fn_body_ast: List[ast.AST] = []
do_result_name = genname(_DO_PREFIX)
fn_body_ast.extend(map(statementize, body_ast.dependencies))
fn_body_ast.append(
ast.Assign(
targets=[ast.Name(id=do_result_name, ctx=ast.Store())], value=body_ast.node
)
)
return GeneratedPyAST(
node=ast.Name(id=do_result_name, ctx=ast.Load()), dependencies=fn_body_ast
) |
def filter(self, twig=None, check_visible=True, check_default=True, **kwargs):
"""
Filter the ParameterSet based on the meta-tags of the Parameters
and return another ParameterSet.
Because another ParameterSet is returned, these filter calls are
chainable.
>>> b.filter(context='component').filter(component='starA')
:parameter str twig: (optional) the search twig - essentially a single
string with any delimiter (ie '@') that will be parsed
into any of the meta-tags. Example: instead of
b.filter(context='component', component='starA'), you
could do b.filter('starA@component').
:parameter bool check_visible: whether to hide invisible
parameters. These are usually parameters that do not
play a role unless the value of another parameter meets
some condition.
:parameter bool check_default: whether to exclude parameters which
have a _default tag (these are parameters which solely exist
to provide defaults for when new parameters or datasets are
added and the parameter needs to be copied appropriately).
Defaults to True.
:parameter **kwargs: meta-tags to search (ie. 'context', 'component',
'model', etc). See :func:`meta` for all possible options.
:return: the resulting :class:`ParameterSet`
"""
kwargs['check_visible'] = check_visible
kwargs['check_default'] = check_default
kwargs['force_ps'] = True
return self.filter_or_get(twig=twig, **kwargs) | Filter the ParameterSet based on the meta-tags of the Parameters
and return another ParameterSet.
Because another ParameterSet is returned, these filter calls are
chainable.
>>> b.filter(context='component').filter(component='starA')
:parameter str twig: (optional) the search twig - essentially a single
string with any delimiter (ie '@') that will be parsed
into any of the meta-tags. Example: instead of
b.filter(context='component', component='starA'), you
could do b.filter('starA@component').
:parameter bool check_visible: whether to hide invisible
parameters. These are usually parameters that do not
play a role unless the value of another parameter meets
some condition.
:parameter bool check_default: whether to exclude parameters which
have a _default tag (these are parameters which solely exist
to provide defaults for when new parameters or datasets are
added and the parameter needs to be copied appropriately).
Defaults to True.
:parameter **kwargs: meta-tags to search (ie. 'context', 'component',
'model', etc). See :func:`meta` for all possible options.
:return: the resulting :class:`ParameterSet` | Below is the the instruction that describes the task:
### Input:
Filter the ParameterSet based on the meta-tags of the Parameters
and return another ParameterSet.
Because another ParameterSet is returned, these filter calls are
chainable.
>>> b.filter(context='component').filter(component='starA')
:parameter str twig: (optional) the search twig - essentially a single
string with any delimiter (ie '@') that will be parsed
into any of the meta-tags. Example: instead of
b.filter(context='component', component='starA'), you
could do b.filter('starA@component').
:parameter bool check_visible: whether to hide invisible
parameters. These are usually parameters that do not
play a role unless the value of another parameter meets
some condition.
:parameter bool check_default: whether to exclude parameters which
have a _default tag (these are parameters which solely exist
to provide defaults for when new parameters or datasets are
added and the parameter needs to be copied appropriately).
Defaults to True.
:parameter **kwargs: meta-tags to search (ie. 'context', 'component',
'model', etc). See :func:`meta` for all possible options.
:return: the resulting :class:`ParameterSet`
### Response:
def filter(self, twig=None, check_visible=True, check_default=True, **kwargs):
"""
Filter the ParameterSet based on the meta-tags of the Parameters
and return another ParameterSet.
Because another ParameterSet is returned, these filter calls are
chainable.
>>> b.filter(context='component').filter(component='starA')
:parameter str twig: (optional) the search twig - essentially a single
string with any delimiter (ie '@') that will be parsed
into any of the meta-tags. Example: instead of
b.filter(context='component', component='starA'), you
could do b.filter('starA@component').
:parameter bool check_visible: whether to hide invisible
parameters. These are usually parameters that do not
play a role unless the value of another parameter meets
some condition.
:parameter bool check_default: whether to exclude parameters which
have a _default tag (these are parameters which solely exist
to provide defaults for when new parameters or datasets are
added and the parameter needs to be copied appropriately).
Defaults to True.
:parameter **kwargs: meta-tags to search (ie. 'context', 'component',
'model', etc). See :func:`meta` for all possible options.
:return: the resulting :class:`ParameterSet`
"""
kwargs['check_visible'] = check_visible
kwargs['check_default'] = check_default
kwargs['force_ps'] = True
return self.filter_or_get(twig=twig, **kwargs) |
def histogram(x,
edges,
axis=None,
extend_lower_interval=False,
extend_upper_interval=False,
dtype=None,
name=None):
"""Count how often `x` falls in intervals defined by `edges`.
Given `edges = [c0, ..., cK]`, defining intervals
`I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,
This function counts how often `x` falls into each interval.
Values of `x` outside of the intervals cause errors. Consider using
`extend_lower_interval`, `extend_upper_interval` to deal with this.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not
`None`, must have statically known number of dimensions. The
`axis` kwarg determines which dimensions index iid samples.
Other dimensions of `x` index "events" for which we will compute different
histograms.
edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges
of intervals. Must either be `1-D` or have `edges.shape[1:]` the same
as the dimensions of `x` excluding `axis`.
If `rank(edges) > 1`, `edges[k]` designates a shape `edges.shape[1:]`
`Tensor` of interval edges for the corresponding dimensions of `x`.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant
values. The axis in `x` that index iid samples.
`Default value:` `None` (treat every dimension as sample dimension).
extend_lower_interval: Python `bool`. If `True`, extend the lowest
interval `I0` to `(-inf, c1]`.
extend_upper_interval: Python `bool`. If `True`, extend the upper
interval `I_{K-1}` to `[c_{K-1}, +inf)`.
dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.
name: A Python string name to prepend to created ops.
`Default value:` 'histogram'
Returns:
counts: `Tensor` of type `dtype` and, with
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
`counts.shape = [edges.shape[0]] + x.shape[~axis]`.
With `I` a multi-index into `~axis`, `counts[k][I]` is the number of times
event(s) fell into the `kth` interval of `edges`.
#### Examples
```python
# x.shape = [1000, 2]
# x[:, 0] ~ Uniform(0, 1), x[:, 1] ~ Uniform(1, 2).
x = tf.stack([tf.random_uniform([1000]), 1 + tf.random_uniform([1000])],
axis=-1)
# edges ==> bins [0, 0.5), [0.5, 1.0), [1.0, 1.5), [1.5, 2.0].
edges = [0., 0.5, 1.0, 1.5, 2.0]
tfp.stats.histogram(x, edges)
==> approximately [500, 500, 500, 500]
tfp.stats.histogram(x, edges, axis=0)
==> approximately [[500, 500, 0, 0], [0, 0, 500, 500]]
```
"""
with tf.compat.v1.name_scope(name, 'histogram', values=[x, edges, axis]):
# Tensor conversions.
in_dtype = dtype_util.common_dtype([x, edges], preferred_dtype=tf.float32)
x = tf.convert_to_tensor(value=x, name='x', dtype=in_dtype)
edges = tf.convert_to_tensor(value=edges, name='edges', dtype=in_dtype)
# Move dims in axis to the left end as one flattened dim.
# After this, x.shape = [n_samples] + E.
if axis is None:
x = tf.reshape(x, shape=[-1])
else:
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative_list(axis, x_ndims)
if not axis:
raise ValueError('`axis` cannot be empty. Found: {}'.format(axis))
x = _move_dims_to_flat_end(x, axis, x_ndims, right_end=False)
# bins.shape = x.shape = [n_samples] + E,
# and bins[i] is a shape E Tensor of the bins that sample `i` fell into.
# E is the "event shape", which is [] if axis is None.
bins = find_bins(
x,
edges=edges,
# If not extending intervals, then values outside the edges will return
# -1, which gives an error when fed to bincount.
extend_lower_interval=extend_lower_interval,
extend_upper_interval=extend_upper_interval,
dtype=tf.int32)
# TODO(b/124015136) Use standard tf.math.bincount once it supports `axis`.
counts = count_integers(
bins,
# Ensure we get correct output, even if x did not fall into every bin
minlength=tf.shape(input=edges)[0] - 1,
maxlength=tf.shape(input=edges)[0] - 1,
axis=0,
dtype=dtype or in_dtype)
n_edges = tf.compat.dimension_value(edges.shape[0])
if n_edges is not None:
counts.set_shape(
tf.TensorShape([n_edges - 1]).concatenate(counts.shape[1:]))
return counts | Count how often `x` falls in intervals defined by `edges`.
Given `edges = [c0, ..., cK]`, defining intervals
`I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,
This function counts how often `x` falls into each interval.
Values of `x` outside of the intervals cause errors. Consider using
`extend_lower_interval`, `extend_upper_interval` to deal with this.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not
`None`, must have statically known number of dimensions. The
`axis` kwarg determines which dimensions index iid samples.
Other dimensions of `x` index "events" for which we will compute different
histograms.
edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges
of intervals. Must either be `1-D` or have `edges.shape[1:]` the same
as the dimensions of `x` excluding `axis`.
If `rank(edges) > 1`, `edges[k]` designates a shape `edges.shape[1:]`
`Tensor` of interval edges for the corresponding dimensions of `x`.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant
values. The axis in `x` that index iid samples.
`Default value:` `None` (treat every dimension as sample dimension).
extend_lower_interval: Python `bool`. If `True`, extend the lowest
interval `I0` to `(-inf, c1]`.
extend_upper_interval: Python `bool`. If `True`, extend the upper
interval `I_{K-1}` to `[c_{K-1}, +inf)`.
dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.
name: A Python string name to prepend to created ops.
`Default value:` 'histogram'
Returns:
counts: `Tensor` of type `dtype` and, with
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
`counts.shape = [edges.shape[0]] + x.shape[~axis]`.
With `I` a multi-index into `~axis`, `counts[k][I]` is the number of times
event(s) fell into the `kth` interval of `edges`.
#### Examples
```python
# x.shape = [1000, 2]
# x[:, 0] ~ Uniform(0, 1), x[:, 1] ~ Uniform(1, 2).
x = tf.stack([tf.random_uniform([1000]), 1 + tf.random_uniform([1000])],
axis=-1)
# edges ==> bins [0, 0.5), [0.5, 1.0), [1.0, 1.5), [1.5, 2.0].
edges = [0., 0.5, 1.0, 1.5, 2.0]
tfp.stats.histogram(x, edges)
==> approximately [500, 500, 500, 500]
tfp.stats.histogram(x, edges, axis=0)
==> approximately [[500, 500, 0, 0], [0, 0, 500, 500]]
``` | Below is the the instruction that describes the task:
### Input:
Count how often `x` falls in intervals defined by `edges`.
Given `edges = [c0, ..., cK]`, defining intervals
`I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,
This function counts how often `x` falls into each interval.
Values of `x` outside of the intervals cause errors. Consider using
`extend_lower_interval`, `extend_upper_interval` to deal with this.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not
`None`, must have statically known number of dimensions. The
`axis` kwarg determines which dimensions index iid samples.
Other dimensions of `x` index "events" for which we will compute different
histograms.
edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges
of intervals. Must either be `1-D` or have `edges.shape[1:]` the same
as the dimensions of `x` excluding `axis`.
If `rank(edges) > 1`, `edges[k]` designates a shape `edges.shape[1:]`
`Tensor` of interval edges for the corresponding dimensions of `x`.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant
values. The axis in `x` that index iid samples.
`Default value:` `None` (treat every dimension as sample dimension).
extend_lower_interval: Python `bool`. If `True`, extend the lowest
interval `I0` to `(-inf, c1]`.
extend_upper_interval: Python `bool`. If `True`, extend the upper
interval `I_{K-1}` to `[c_{K-1}, +inf)`.
dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.
name: A Python string name to prepend to created ops.
`Default value:` 'histogram'
Returns:
counts: `Tensor` of type `dtype` and, with
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
`counts.shape = [edges.shape[0]] + x.shape[~axis]`.
With `I` a multi-index into `~axis`, `counts[k][I]` is the number of times
event(s) fell into the `kth` interval of `edges`.
#### Examples
```python
# x.shape = [1000, 2]
# x[:, 0] ~ Uniform(0, 1), x[:, 1] ~ Uniform(1, 2).
x = tf.stack([tf.random_uniform([1000]), 1 + tf.random_uniform([1000])],
axis=-1)
# edges ==> bins [0, 0.5), [0.5, 1.0), [1.0, 1.5), [1.5, 2.0].
edges = [0., 0.5, 1.0, 1.5, 2.0]
tfp.stats.histogram(x, edges)
==> approximately [500, 500, 500, 500]
tfp.stats.histogram(x, edges, axis=0)
==> approximately [[500, 500, 0, 0], [0, 0, 500, 500]]
```
### Response:
def histogram(x,
edges,
axis=None,
extend_lower_interval=False,
extend_upper_interval=False,
dtype=None,
name=None):
"""Count how often `x` falls in intervals defined by `edges`.
Given `edges = [c0, ..., cK]`, defining intervals
`I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`,
This function counts how often `x` falls into each interval.
Values of `x` outside of the intervals cause errors. Consider using
`extend_lower_interval`, `extend_upper_interval` to deal with this.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not
`None`, must have statically known number of dimensions. The
`axis` kwarg determines which dimensions index iid samples.
Other dimensions of `x` index "events" for which we will compute different
histograms.
edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges
of intervals. Must either be `1-D` or have `edges.shape[1:]` the same
as the dimensions of `x` excluding `axis`.
If `rank(edges) > 1`, `edges[k]` designates a shape `edges.shape[1:]`
`Tensor` of interval edges for the corresponding dimensions of `x`.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant
values. The axis in `x` that index iid samples.
`Default value:` `None` (treat every dimension as sample dimension).
extend_lower_interval: Python `bool`. If `True`, extend the lowest
interval `I0` to `(-inf, c1]`.
extend_upper_interval: Python `bool`. If `True`, extend the upper
interval `I_{K-1}` to `[c_{K-1}, +inf)`.
dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`.
name: A Python string name to prepend to created ops.
`Default value:` 'histogram'
Returns:
counts: `Tensor` of type `dtype` and, with
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
`counts.shape = [edges.shape[0]] + x.shape[~axis]`.
With `I` a multi-index into `~axis`, `counts[k][I]` is the number of times
event(s) fell into the `kth` interval of `edges`.
#### Examples
```python
# x.shape = [1000, 2]
# x[:, 0] ~ Uniform(0, 1), x[:, 1] ~ Uniform(1, 2).
x = tf.stack([tf.random_uniform([1000]), 1 + tf.random_uniform([1000])],
axis=-1)
# edges ==> bins [0, 0.5), [0.5, 1.0), [1.0, 1.5), [1.5, 2.0].
edges = [0., 0.5, 1.0, 1.5, 2.0]
tfp.stats.histogram(x, edges)
==> approximately [500, 500, 500, 500]
tfp.stats.histogram(x, edges, axis=0)
==> approximately [[500, 500, 0, 0], [0, 0, 500, 500]]
```
"""
with tf.compat.v1.name_scope(name, 'histogram', values=[x, edges, axis]):
# Tensor conversions.
in_dtype = dtype_util.common_dtype([x, edges], preferred_dtype=tf.float32)
x = tf.convert_to_tensor(value=x, name='x', dtype=in_dtype)
edges = tf.convert_to_tensor(value=edges, name='edges', dtype=in_dtype)
# Move dims in axis to the left end as one flattened dim.
# After this, x.shape = [n_samples] + E.
if axis is None:
x = tf.reshape(x, shape=[-1])
else:
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative_list(axis, x_ndims)
if not axis:
raise ValueError('`axis` cannot be empty. Found: {}'.format(axis))
x = _move_dims_to_flat_end(x, axis, x_ndims, right_end=False)
# bins.shape = x.shape = [n_samples] + E,
# and bins[i] is a shape E Tensor of the bins that sample `i` fell into.
# E is the "event shape", which is [] if axis is None.
bins = find_bins(
x,
edges=edges,
# If not extending intervals, then values outside the edges will return
# -1, which gives an error when fed to bincount.
extend_lower_interval=extend_lower_interval,
extend_upper_interval=extend_upper_interval,
dtype=tf.int32)
# TODO(b/124015136) Use standard tf.math.bincount once it supports `axis`.
counts = count_integers(
bins,
# Ensure we get correct output, even if x did not fall into every bin
minlength=tf.shape(input=edges)[0] - 1,
maxlength=tf.shape(input=edges)[0] - 1,
axis=0,
dtype=dtype or in_dtype)
n_edges = tf.compat.dimension_value(edges.shape[0])
if n_edges is not None:
counts.set_shape(
tf.TensorShape([n_edges - 1]).concatenate(counts.shape[1:]))
return counts |
def get_all_versions(self, headers=None, **params):
"""
A lower-level, version-aware method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
with respect to keys.
:type version_id_marker: string
:param version_id_marker: The "marker" of where you are in the result
set with respect to version-id's.
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
return self._get_all([('Version', self.key_class),
('CommonPrefixes', Prefix),
('DeleteMarker', DeleteMarker)],
'versions', headers, **params) | A lower-level, version-aware method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
with respect to keys.
:type version_id_marker: string
:param version_id_marker: The "marker" of where you are in the result
set with respect to version-id's.
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested | Below is the the instruction that describes the task:
### Input:
A lower-level, version-aware method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
with respect to keys.
:type version_id_marker: string
:param version_id_marker: The "marker" of where you are in the result
set with respect to version-id's.
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested
### Response:
def get_all_versions(self, headers=None, **params):
"""
A lower-level, version-aware method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
with respect to keys.
:type version_id_marker: string
:param version_id_marker: The "marker" of where you are in the result
set with respect to version-id's.
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
return self._get_all([('Version', self.key_class),
('CommonPrefixes', Prefix),
('DeleteMarker', DeleteMarker)],
'versions', headers, **params) |
def strip_dots(value):
"""
Remove dots(if any) that mark calculated aesthetics
Parameters
----------
value : object
Aesthetic value. In most cases this will be a string
but other types will pass through unmodified.
Return
------
out : object
Aesthetic value with the dots removed.
"""
with suppress(TypeError):
value = DOTS_RE.sub(r'\1', value)
return value | Remove dots(if any) that mark calculated aesthetics
Parameters
----------
value : object
Aesthetic value. In most cases this will be a string
but other types will pass through unmodified.
Return
------
out : object
Aesthetic value with the dots removed. | Below is the the instruction that describes the task:
### Input:
Remove dots(if any) that mark calculated aesthetics
Parameters
----------
value : object
Aesthetic value. In most cases this will be a string
but other types will pass through unmodified.
Return
------
out : object
Aesthetic value with the dots removed.
### Response:
def strip_dots(value):
"""
Remove dots(if any) that mark calculated aesthetics
Parameters
----------
value : object
Aesthetic value. In most cases this will be a string
but other types will pass through unmodified.
Return
------
out : object
Aesthetic value with the dots removed.
"""
with suppress(TypeError):
value = DOTS_RE.sub(r'\1', value)
return value |
def set_p0(self, samples_file=None, prior=None):
"""Sets the initial position of the walkers.
Parameters
----------
samples_file : InferenceFile, optional
If provided, use the last iteration in the given file for the
starting positions.
prior : JointDistribution, optional
Use the given prior to set the initial positions rather than
``model``'s prior.
Returns
-------
p0 : dict
A dictionary maping sampling params to the starting positions.
"""
# if samples are given then use those as initial positions
if samples_file is not None:
with self.io(samples_file, 'r') as fp:
samples = fp.read_samples(self.variable_params,
iteration=-1, flatten=False)
# remove the (length 1) niterations dimension
samples = samples[..., 0]
# make sure we have the same shape
assert samples.shape == self.base_shape, (
"samples in file {} have shape {}, but I have shape {}".
format(samples_file, samples.shape, self.base_shape))
# transform to sampling parameter space
if self.model.sampling_transforms is not None:
samples = self.model.sampling_transforms.apply(samples)
# draw random samples if samples are not provided
else:
nsamples = numpy.prod(self.base_shape)
samples = self.model.prior_rvs(size=nsamples, prior=prior).reshape(
self.base_shape)
# store as ND array with shape [base_shape] x nparams
ndim = len(self.variable_params)
p0 = numpy.ones(list(self.base_shape)+[ndim])
for i, param in enumerate(self.sampling_params):
p0[..., i] = samples[param]
self._p0 = p0
return self.p0 | Sets the initial position of the walkers.
Parameters
----------
samples_file : InferenceFile, optional
If provided, use the last iteration in the given file for the
starting positions.
prior : JointDistribution, optional
Use the given prior to set the initial positions rather than
``model``'s prior.
Returns
-------
p0 : dict
A dictionary maping sampling params to the starting positions. | Below is the the instruction that describes the task:
### Input:
Sets the initial position of the walkers.
Parameters
----------
samples_file : InferenceFile, optional
If provided, use the last iteration in the given file for the
starting positions.
prior : JointDistribution, optional
Use the given prior to set the initial positions rather than
``model``'s prior.
Returns
-------
p0 : dict
A dictionary maping sampling params to the starting positions.
### Response:
def set_p0(self, samples_file=None, prior=None):
"""Sets the initial position of the walkers.
Parameters
----------
samples_file : InferenceFile, optional
If provided, use the last iteration in the given file for the
starting positions.
prior : JointDistribution, optional
Use the given prior to set the initial positions rather than
``model``'s prior.
Returns
-------
p0 : dict
A dictionary maping sampling params to the starting positions.
"""
# if samples are given then use those as initial positions
if samples_file is not None:
with self.io(samples_file, 'r') as fp:
samples = fp.read_samples(self.variable_params,
iteration=-1, flatten=False)
# remove the (length 1) niterations dimension
samples = samples[..., 0]
# make sure we have the same shape
assert samples.shape == self.base_shape, (
"samples in file {} have shape {}, but I have shape {}".
format(samples_file, samples.shape, self.base_shape))
# transform to sampling parameter space
if self.model.sampling_transforms is not None:
samples = self.model.sampling_transforms.apply(samples)
# draw random samples if samples are not provided
else:
nsamples = numpy.prod(self.base_shape)
samples = self.model.prior_rvs(size=nsamples, prior=prior).reshape(
self.base_shape)
# store as ND array with shape [base_shape] x nparams
ndim = len(self.variable_params)
p0 = numpy.ones(list(self.base_shape)+[ndim])
for i, param in enumerate(self.sampling_params):
p0[..., i] = samples[param]
self._p0 = p0
return self.p0 |
def toggle(self, event):
"""Toggles state to next bitmap"""
if self.state < len(self.bitmap_list) - 1:
self.state += 1
else:
self.state = 0
self.SetBitmapLabel(self.bitmap_list[self.state])
try:
event.Skip()
except AttributeError:
pass
"""For compatibility with toggle buttons"""
setattr(self, "GetToolState", lambda x: self.state) | Toggles state to next bitmap | Below is the the instruction that describes the task:
### Input:
Toggles state to next bitmap
### Response:
def toggle(self, event):
"""Toggles state to next bitmap"""
if self.state < len(self.bitmap_list) - 1:
self.state += 1
else:
self.state = 0
self.SetBitmapLabel(self.bitmap_list[self.state])
try:
event.Skip()
except AttributeError:
pass
"""For compatibility with toggle buttons"""
setattr(self, "GetToolState", lambda x: self.state) |
def main():
"""
Main method
"""
# Options
global args
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--min_word_length", type=int,
help="Minimum length for each word", default=3)
parser.add_argument("-x", "--max_word_length", type=int,
help="Maximum length for each word", default=8)
parser.add_argument("-i", "--max_int_value", type=int,
help="Maximum value for the integer", default=1000)
parser.add_argument("-e", "--number_of_elements", type=int,
help="Number of elements in the password (ie. 4 = 3 words + 1 integer)", default=4)
parser.add_argument("-s", "--no_special_characters",
action='store_true', help="Do not use special characters")
args = parser.parse_args()
# Print a password
print(pw(min_word_length=args.min_word_length,
max_word_length=args.max_word_length,
max_int_value=args.max_int_value,
number_of_elements=args.number_of_elements,
no_special_characters=args.no_special_characters)) | Main method | Below is the the instruction that describes the task:
### Input:
Main method
### Response:
def main():
"""
Main method
"""
# Options
global args
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--min_word_length", type=int,
help="Minimum length for each word", default=3)
parser.add_argument("-x", "--max_word_length", type=int,
help="Maximum length for each word", default=8)
parser.add_argument("-i", "--max_int_value", type=int,
help="Maximum value for the integer", default=1000)
parser.add_argument("-e", "--number_of_elements", type=int,
help="Number of elements in the password (ie. 4 = 3 words + 1 integer)", default=4)
parser.add_argument("-s", "--no_special_characters",
action='store_true', help="Do not use special characters")
args = parser.parse_args()
# Print a password
print(pw(min_word_length=args.min_word_length,
max_word_length=args.max_word_length,
max_int_value=args.max_int_value,
number_of_elements=args.number_of_elements,
no_special_characters=args.no_special_characters)) |
def _canonicalize_query(self, query):
"""
Transform the query dictionary to replace e.g. documents with __ref__ fields.
"""
def transform_query(q):
if isinstance(q, dict):
nq = {}
for key,value in q.items():
nq[key] = transform_query(value)
return nq
elif isinstance(q, (list,QuerySet,tuple)):
return [transform_query(x) for x in q]
elif isinstance(q,Document):
collection = self.get_collection_for_obj(q)
ref = "%s:%s" % (collection,q.pk)
return ref
else:
return q
return transform_query(query) | Transform the query dictionary to replace e.g. documents with __ref__ fields. | Below is the the instruction that describes the task:
### Input:
Transform the query dictionary to replace e.g. documents with __ref__ fields.
### Response:
def _canonicalize_query(self, query):
"""
Transform the query dictionary to replace e.g. documents with __ref__ fields.
"""
def transform_query(q):
if isinstance(q, dict):
nq = {}
for key,value in q.items():
nq[key] = transform_query(value)
return nq
elif isinstance(q, (list,QuerySet,tuple)):
return [transform_query(x) for x in q]
elif isinstance(q,Document):
collection = self.get_collection_for_obj(q)
ref = "%s:%s" % (collection,q.pk)
return ref
else:
return q
return transform_query(query) |
def fetch_items(self, path, payload):
"""Return the items from GitLab API using links pagination"""
page = 0 # current page
last_page = None # last page
url_next = urijoin(self.base_url, GitLabClient.PROJECTS, self.owner + '%2F' + self.repository, path)
logger.debug("Get GitLab paginated items from " + url_next)
response = self.fetch(url_next, payload=payload)
items = response.text
page += 1
if 'last' in response.links:
last_url = response.links['last']['url']
last_page = last_url.split('&page=')[1].split('&')[0]
last_page = int(last_page)
logger.debug("Page: %i/%i" % (page, last_page))
while items:
yield items
items = None
if 'next' in response.links:
url_next = response.links['next']['url'] # Loving requests :)
response = self.fetch(url_next, payload=payload)
page += 1
items = response.text
logger.debug("Page: %i/%i" % (page, last_page)) | Return the items from GitLab API using links pagination | Below is the the instruction that describes the task:
### Input:
Return the items from GitLab API using links pagination
### Response:
def fetch_items(self, path, payload):
"""Return the items from GitLab API using links pagination"""
page = 0 # current page
last_page = None # last page
url_next = urijoin(self.base_url, GitLabClient.PROJECTS, self.owner + '%2F' + self.repository, path)
logger.debug("Get GitLab paginated items from " + url_next)
response = self.fetch(url_next, payload=payload)
items = response.text
page += 1
if 'last' in response.links:
last_url = response.links['last']['url']
last_page = last_url.split('&page=')[1].split('&')[0]
last_page = int(last_page)
logger.debug("Page: %i/%i" % (page, last_page))
while items:
yield items
items = None
if 'next' in response.links:
url_next = response.links['next']['url'] # Loving requests :)
response = self.fetch(url_next, payload=payload)
page += 1
items = response.text
logger.debug("Page: %i/%i" % (page, last_page)) |
def rdfs_properties(rdf):
"""Perform RDFS subproperty inference.
Add superproperties where subproperties have been used."""
# find out the subproperty mappings
superprops = {} # key: property val: set([superprop1, superprop2..])
for s, o in rdf.subject_objects(RDFS.subPropertyOf):
superprops.setdefault(s, set())
for sp in rdf.transitive_objects(s, RDFS.subPropertyOf):
if sp != s:
superprops[s].add(sp)
# add the superproperty relationships
for p, sps in superprops.items():
logging.debug("setting superproperties: %s -> %s", p, str(sps))
for s, o in rdf.subject_objects(p):
for sp in sps:
rdf.add((s, sp, o)) | Perform RDFS subproperty inference.
Add superproperties where subproperties have been used. | Below is the the instruction that describes the task:
### Input:
Perform RDFS subproperty inference.
Add superproperties where subproperties have been used.
### Response:
def rdfs_properties(rdf):
"""Perform RDFS subproperty inference.
Add superproperties where subproperties have been used."""
# find out the subproperty mappings
superprops = {} # key: property val: set([superprop1, superprop2..])
for s, o in rdf.subject_objects(RDFS.subPropertyOf):
superprops.setdefault(s, set())
for sp in rdf.transitive_objects(s, RDFS.subPropertyOf):
if sp != s:
superprops[s].add(sp)
# add the superproperty relationships
for p, sps in superprops.items():
logging.debug("setting superproperties: %s -> %s", p, str(sps))
for s, o in rdf.subject_objects(p):
for sp in sps:
rdf.add((s, sp, o)) |
def is_suspended(order_book_id, count=1):
"""
判断某只股票是否全天停牌。
:param str order_book_id: 某只股票的代码或股票代码,可传入单只股票的order_book_id, symbol
:param int count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据
:return: count为1时 `bool`; count>1时 `pandas.DataFrame`
"""
dt = Environment.get_instance().calendar_dt.date()
order_book_id = assure_stock_order_book_id(order_book_id)
return Environment.get_instance().data_proxy.is_suspended(order_book_id, dt, count) | 判断某只股票是否全天停牌。
:param str order_book_id: 某只股票的代码或股票代码,可传入单只股票的order_book_id, symbol
:param int count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据
:return: count为1时 `bool`; count>1时 `pandas.DataFrame` | Below is the the instruction that describes the task:
### Input:
判断某只股票是否全天停牌。
:param str order_book_id: 某只股票的代码或股票代码,可传入单只股票的order_book_id, symbol
:param int count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据
:return: count为1时 `bool`; count>1时 `pandas.DataFrame`
### Response:
def is_suspended(order_book_id, count=1):
"""
判断某只股票是否全天停牌。
:param str order_book_id: 某只股票的代码或股票代码,可传入单只股票的order_book_id, symbol
:param int count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据
:return: count为1时 `bool`; count>1时 `pandas.DataFrame`
"""
dt = Environment.get_instance().calendar_dt.date()
order_book_id = assure_stock_order_book_id(order_book_id)
return Environment.get_instance().data_proxy.is_suspended(order_book_id, dt, count) |
def vertical_line(self,
x: Union[int, float],
y1: Union[int, float],
y2: Union[int, float],
emphasize: bool = False
) -> None:
"""Adds a line from (x, y1) to (x, y2)."""
y1, y2 = sorted([y1, y2])
self.vertical_lines.append(_VerticalLine(x, y1, y2, emphasize)) | Adds a line from (x, y1) to (x, y2). | Below is the the instruction that describes the task:
### Input:
Adds a line from (x, y1) to (x, y2).
### Response:
def vertical_line(self,
x: Union[int, float],
y1: Union[int, float],
y2: Union[int, float],
emphasize: bool = False
) -> None:
"""Adds a line from (x, y1) to (x, y2)."""
y1, y2 = sorted([y1, y2])
self.vertical_lines.append(_VerticalLine(x, y1, y2, emphasize)) |
def runNetwork(network, writer):
"""Run the network and write output to writer.
:param network: a Network instance to run
:param writer: a csv.writer instance to write output to
"""
identityRegion = network.regions["identityRegion"]
for i in xrange(_NUM_RECORDS):
# Run the network for a single iteration
network.run(1)
# Write out the record number and encoding
encoding = identityRegion.getOutputData("out")
writer.writerow((i, encoding)) | Run the network and write output to writer.
:param network: a Network instance to run
:param writer: a csv.writer instance to write output to | Below is the the instruction that describes the task:
### Input:
Run the network and write output to writer.
:param network: a Network instance to run
:param writer: a csv.writer instance to write output to
### Response:
def runNetwork(network, writer):
"""Run the network and write output to writer.
:param network: a Network instance to run
:param writer: a csv.writer instance to write output to
"""
identityRegion = network.regions["identityRegion"]
for i in xrange(_NUM_RECORDS):
# Run the network for a single iteration
network.run(1)
# Write out the record number and encoding
encoding = identityRegion.getOutputData("out")
writer.writerow((i, encoding)) |
def analyze(self, M_c, T, X_L, X_D, seed, kernel_list=(), n_steps=1, c=(),
r=(),
max_iterations=-1, max_time=-1, do_diagnostics=False,
diagnostics_every_N=1,
ROW_CRP_ALPHA_GRID=(),
COLUMN_CRP_ALPHA_GRID=(),
S_GRID=(), MU_GRID=(),
N_GRID=31,
do_timing=False,
CT_KERNEL=0,
progress=None,
):
"""Evolve the latent state by running MCMC transition kernels.
:param seed: The random seed
:type seed: int
:param M_c: The column metadata
:type M_c: dict
:param T: The data table in mapped representation (all floats, generated
by data_utils.read_data_objects)
:param X_L: the latent variables associated with the latent state
:type X_L: dict
:param X_D: the particular cluster assignments of each row in each view
:type X_D: list of lists
:param kernel_list: names of the MCMC transition kernels to run
:type kernel_list: list of strings
:param n_steps: the number of times to run each MCMC transition kernel
:type n_steps: int
:param c: the (global) column indices to run MCMC transition kernels on
:type c: list of ints
:param r: the (global) row indices to run MCMC transition kernels on
:type r: list of ints
:param max_iterations: the maximum number of times ot run each MCMC
transition kernel. Applicable only if max_time != -1.
:type max_iterations: int
:param max_time: the maximum amount of time (seconds) to run MCMC
transition kernels for before stopping to return progress
:type max_time: float
:param progress: a function accepting
(n_steps, max_time, step_idx, elapsed_secs, end=None) where
`n_steps` is the total number of transition steps, `max_time` is the
timeout in secods, `step_idx` is number of transitions so far,
`elapsed_secs` is the amount of time so far, and `end=None` is an
optional kwarg for indicating the analysis has been completed.
For example, `progress` may be used to print a progress bar
to standard out.
:type progress: function pointer.
:returns: X_L, X_D -- the evolved latent state
"""
if n_steps <= 0:
raise ValueError("You must do at least one analyze step.")
if CT_KERNEL not in [0, 1]:
raise ValueError("CT_KERNEL must be 0 (Gibbs) or 1 (MH)")
if do_timing:
# Diagnostics and timing are exclusive.
do_diagnostics = False
diagnostic_func_dict, reprocess_diagnostics_func = \
do_diagnostics_to_func_dict(do_diagnostics)
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D)
arg_tuples = self.get_analyze_arg_tuples(
M_c,
T,
X_L_list,
X_D_list,
kernel_list,
n_steps,
c,
r,
max_iterations,
max_time,
diagnostic_func_dict,
diagnostics_every_N,
ROW_CRP_ALPHA_GRID,
COLUMN_CRP_ALPHA_GRID,
S_GRID,
MU_GRID,
N_GRID,
do_timing,
CT_KERNEL,
progress,
make_get_next_seed(seed))
chain_tuples = self.mapper(self.do_analyze, arg_tuples)
X_L_list, X_D_list, diagnostics_dict_list = zip(*chain_tuples)
if do_timing:
timing_list = diagnostics_dict_list
if not was_multistate:
X_L_list, X_D_list = X_L_list[0], X_D_list[0]
ret_tuple = X_L_list, X_D_list
if diagnostic_func_dict is not None:
diagnostics_dict = munge_diagnostics(diagnostics_dict_list)
if reprocess_diagnostics_func is not None:
diagnostics_dict = reprocess_diagnostics_func(diagnostics_dict)
ret_tuple = ret_tuple + (diagnostics_dict, )
if do_timing:
ret_tuple = ret_tuple + (timing_list, )
return ret_tuple | Evolve the latent state by running MCMC transition kernels.
:param seed: The random seed
:type seed: int
:param M_c: The column metadata
:type M_c: dict
:param T: The data table in mapped representation (all floats, generated
by data_utils.read_data_objects)
:param X_L: the latent variables associated with the latent state
:type X_L: dict
:param X_D: the particular cluster assignments of each row in each view
:type X_D: list of lists
:param kernel_list: names of the MCMC transition kernels to run
:type kernel_list: list of strings
:param n_steps: the number of times to run each MCMC transition kernel
:type n_steps: int
:param c: the (global) column indices to run MCMC transition kernels on
:type c: list of ints
:param r: the (global) row indices to run MCMC transition kernels on
:type r: list of ints
:param max_iterations: the maximum number of times ot run each MCMC
transition kernel. Applicable only if max_time != -1.
:type max_iterations: int
:param max_time: the maximum amount of time (seconds) to run MCMC
transition kernels for before stopping to return progress
:type max_time: float
:param progress: a function accepting
(n_steps, max_time, step_idx, elapsed_secs, end=None) where
`n_steps` is the total number of transition steps, `max_time` is the
timeout in secods, `step_idx` is number of transitions so far,
`elapsed_secs` is the amount of time so far, and `end=None` is an
optional kwarg for indicating the analysis has been completed.
For example, `progress` may be used to print a progress bar
to standard out.
:type progress: function pointer.
:returns: X_L, X_D -- the evolved latent state | Below is the the instruction that describes the task:
### Input:
Evolve the latent state by running MCMC transition kernels.
:param seed: The random seed
:type seed: int
:param M_c: The column metadata
:type M_c: dict
:param T: The data table in mapped representation (all floats, generated
by data_utils.read_data_objects)
:param X_L: the latent variables associated with the latent state
:type X_L: dict
:param X_D: the particular cluster assignments of each row in each view
:type X_D: list of lists
:param kernel_list: names of the MCMC transition kernels to run
:type kernel_list: list of strings
:param n_steps: the number of times to run each MCMC transition kernel
:type n_steps: int
:param c: the (global) column indices to run MCMC transition kernels on
:type c: list of ints
:param r: the (global) row indices to run MCMC transition kernels on
:type r: list of ints
:param max_iterations: the maximum number of times ot run each MCMC
transition kernel. Applicable only if max_time != -1.
:type max_iterations: int
:param max_time: the maximum amount of time (seconds) to run MCMC
transition kernels for before stopping to return progress
:type max_time: float
:param progress: a function accepting
(n_steps, max_time, step_idx, elapsed_secs, end=None) where
`n_steps` is the total number of transition steps, `max_time` is the
timeout in secods, `step_idx` is number of transitions so far,
`elapsed_secs` is the amount of time so far, and `end=None` is an
optional kwarg for indicating the analysis has been completed.
For example, `progress` may be used to print a progress bar
to standard out.
:type progress: function pointer.
:returns: X_L, X_D -- the evolved latent state
### Response:
def analyze(self, M_c, T, X_L, X_D, seed, kernel_list=(), n_steps=1, c=(),
r=(),
max_iterations=-1, max_time=-1, do_diagnostics=False,
diagnostics_every_N=1,
ROW_CRP_ALPHA_GRID=(),
COLUMN_CRP_ALPHA_GRID=(),
S_GRID=(), MU_GRID=(),
N_GRID=31,
do_timing=False,
CT_KERNEL=0,
progress=None,
):
"""Evolve the latent state by running MCMC transition kernels.
:param seed: The random seed
:type seed: int
:param M_c: The column metadata
:type M_c: dict
:param T: The data table in mapped representation (all floats, generated
by data_utils.read_data_objects)
:param X_L: the latent variables associated with the latent state
:type X_L: dict
:param X_D: the particular cluster assignments of each row in each view
:type X_D: list of lists
:param kernel_list: names of the MCMC transition kernels to run
:type kernel_list: list of strings
:param n_steps: the number of times to run each MCMC transition kernel
:type n_steps: int
:param c: the (global) column indices to run MCMC transition kernels on
:type c: list of ints
:param r: the (global) row indices to run MCMC transition kernels on
:type r: list of ints
:param max_iterations: the maximum number of times ot run each MCMC
transition kernel. Applicable only if max_time != -1.
:type max_iterations: int
:param max_time: the maximum amount of time (seconds) to run MCMC
transition kernels for before stopping to return progress
:type max_time: float
:param progress: a function accepting
(n_steps, max_time, step_idx, elapsed_secs, end=None) where
`n_steps` is the total number of transition steps, `max_time` is the
timeout in secods, `step_idx` is number of transitions so far,
`elapsed_secs` is the amount of time so far, and `end=None` is an
optional kwarg for indicating the analysis has been completed.
For example, `progress` may be used to print a progress bar
to standard out.
:type progress: function pointer.
:returns: X_L, X_D -- the evolved latent state
"""
if n_steps <= 0:
raise ValueError("You must do at least one analyze step.")
if CT_KERNEL not in [0, 1]:
raise ValueError("CT_KERNEL must be 0 (Gibbs) or 1 (MH)")
if do_timing:
# Diagnostics and timing are exclusive.
do_diagnostics = False
diagnostic_func_dict, reprocess_diagnostics_func = \
do_diagnostics_to_func_dict(do_diagnostics)
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D)
arg_tuples = self.get_analyze_arg_tuples(
M_c,
T,
X_L_list,
X_D_list,
kernel_list,
n_steps,
c,
r,
max_iterations,
max_time,
diagnostic_func_dict,
diagnostics_every_N,
ROW_CRP_ALPHA_GRID,
COLUMN_CRP_ALPHA_GRID,
S_GRID,
MU_GRID,
N_GRID,
do_timing,
CT_KERNEL,
progress,
make_get_next_seed(seed))
chain_tuples = self.mapper(self.do_analyze, arg_tuples)
X_L_list, X_D_list, diagnostics_dict_list = zip(*chain_tuples)
if do_timing:
timing_list = diagnostics_dict_list
if not was_multistate:
X_L_list, X_D_list = X_L_list[0], X_D_list[0]
ret_tuple = X_L_list, X_D_list
if diagnostic_func_dict is not None:
diagnostics_dict = munge_diagnostics(diagnostics_dict_list)
if reprocess_diagnostics_func is not None:
diagnostics_dict = reprocess_diagnostics_func(diagnostics_dict)
ret_tuple = ret_tuple + (diagnostics_dict, )
if do_timing:
ret_tuple = ret_tuple + (timing_list, )
return ret_tuple |
def filtered_image(self, im):
"""Returns a filtered image after applying the Fourier-space filters"""
q = np.fft.fftn(im)
for k,v in self.filters:
q[k] -= v
return np.real(np.fft.ifftn(q)) | Returns a filtered image after applying the Fourier-space filters | Below is the the instruction that describes the task:
### Input:
Returns a filtered image after applying the Fourier-space filters
### Response:
def filtered_image(self, im):
"""Returns a filtered image after applying the Fourier-space filters"""
q = np.fft.fftn(im)
for k,v in self.filters:
q[k] -= v
return np.real(np.fft.ifftn(q)) |
def install(self, io_handler, module_name):
"""
Installs the bundle with the given module name
"""
bundle = self._context.install_bundle(module_name)
io_handler.write_line("Bundle ID: {0}", bundle.get_bundle_id())
return bundle.get_bundle_id() | Installs the bundle with the given module name | Below is the the instruction that describes the task:
### Input:
Installs the bundle with the given module name
### Response:
def install(self, io_handler, module_name):
"""
Installs the bundle with the given module name
"""
bundle = self._context.install_bundle(module_name)
io_handler.write_line("Bundle ID: {0}", bundle.get_bundle_id())
return bundle.get_bundle_id() |
def tunnel(self, local_port, remote_port):
"""
Creates an SSH tunnel.
"""
r = self.local_renderer
r.env.tunnel_local_port = local_port
r.env.tunnel_remote_port = remote_port
r.local(' ssh -i {key_filename} -L {tunnel_local_port}:localhost:{tunnel_remote_port} {user}@{host_string} -N') | Creates an SSH tunnel. | Below is the the instruction that describes the task:
### Input:
Creates an SSH tunnel.
### Response:
def tunnel(self, local_port, remote_port):
"""
Creates an SSH tunnel.
"""
r = self.local_renderer
r.env.tunnel_local_port = local_port
r.env.tunnel_remote_port = remote_port
r.local(' ssh -i {key_filename} -L {tunnel_local_port}:localhost:{tunnel_remote_port} {user}@{host_string} -N') |
def dumpRule(serviceCls, rule, prefix):
"""
Create an in-between representation of the rule, so we can eventually convert it to OpenAPIPathItem with OpenAPIOperation(s)
"""
rulePath = prefix + rule.rule
rulePath = re.sub('/{2,}', '/', rulePath)
cor = ConvertedRule(
rulePath=rulePath,
operationId=rule.endpoint
)
# look for methods
for meth in sorted(rule.methods or []):
cor.methods.append(meth)
# edit _branch operationId to provide the true method name
origEP = cor.operationId
if origEP.endswith('_branch'):
origEP = origEP[:-7]
cor.branch = True
cor.operationId = '%s.%s' % (serviceCls.__name__, origEP)
# get the actual method so we can inspect it for extension attributes
meth = getattr(serviceCls, origEP)
if hasattr(meth, '_subKleinQname'):
cor.subKlein = meth._subKleinQname
cor.doco = OpenAPIExtendedDocumentation.fromObject(meth, decode=True)
return cor | Create an in-between representation of the rule, so we can eventually convert it to OpenAPIPathItem with OpenAPIOperation(s) | Below is the the instruction that describes the task:
### Input:
Create an in-between representation of the rule, so we can eventually convert it to OpenAPIPathItem with OpenAPIOperation(s)
### Response:
def dumpRule(serviceCls, rule, prefix):
"""
Create an in-between representation of the rule, so we can eventually convert it to OpenAPIPathItem with OpenAPIOperation(s)
"""
rulePath = prefix + rule.rule
rulePath = re.sub('/{2,}', '/', rulePath)
cor = ConvertedRule(
rulePath=rulePath,
operationId=rule.endpoint
)
# look for methods
for meth in sorted(rule.methods or []):
cor.methods.append(meth)
# edit _branch operationId to provide the true method name
origEP = cor.operationId
if origEP.endswith('_branch'):
origEP = origEP[:-7]
cor.branch = True
cor.operationId = '%s.%s' % (serviceCls.__name__, origEP)
# get the actual method so we can inspect it for extension attributes
meth = getattr(serviceCls, origEP)
if hasattr(meth, '_subKleinQname'):
cor.subKlein = meth._subKleinQname
cor.doco = OpenAPIExtendedDocumentation.fromObject(meth, decode=True)
return cor |
def list_set(seq):
"""Similar to `list(set(seq))`, but maintains the order of `seq` while eliminating duplicates
In general list(set(L)) will not have the same order as the original list.
This is a list(set(L)) function that will preserve the order of L.
Arguments:
seq (iterable): list, tuple, or other iterable to be used to produce an ordered `set()`
Returns:
iterable: A copy of `seq` but with duplicates removed, and distinct elements in the same order as in `seq`
Examples:
>>> list_set([2.7,3,2,2,2,1,1,2,3,4,3,2,42,1])
[2.7, 3, 2, 1, 4, 42]
>>> list_set(['Zzz','abc', ('what.', 'ever.'), 0, 0.0, 'Zzz', 0.00, 'ABC'])
['Zzz', 'abc', ('what.', 'ever.'), 0, 'ABC']
"""
new_list = []
for i in seq:
if i not in new_list:
new_list += [i]
return type(seq)(new_list) | Similar to `list(set(seq))`, but maintains the order of `seq` while eliminating duplicates
In general list(set(L)) will not have the same order as the original list.
This is a list(set(L)) function that will preserve the order of L.
Arguments:
seq (iterable): list, tuple, or other iterable to be used to produce an ordered `set()`
Returns:
iterable: A copy of `seq` but with duplicates removed, and distinct elements in the same order as in `seq`
Examples:
>>> list_set([2.7,3,2,2,2,1,1,2,3,4,3,2,42,1])
[2.7, 3, 2, 1, 4, 42]
>>> list_set(['Zzz','abc', ('what.', 'ever.'), 0, 0.0, 'Zzz', 0.00, 'ABC'])
['Zzz', 'abc', ('what.', 'ever.'), 0, 'ABC'] | Below is the the instruction that describes the task:
### Input:
Similar to `list(set(seq))`, but maintains the order of `seq` while eliminating duplicates
In general list(set(L)) will not have the same order as the original list.
This is a list(set(L)) function that will preserve the order of L.
Arguments:
seq (iterable): list, tuple, or other iterable to be used to produce an ordered `set()`
Returns:
iterable: A copy of `seq` but with duplicates removed, and distinct elements in the same order as in `seq`
Examples:
>>> list_set([2.7,3,2,2,2,1,1,2,3,4,3,2,42,1])
[2.7, 3, 2, 1, 4, 42]
>>> list_set(['Zzz','abc', ('what.', 'ever.'), 0, 0.0, 'Zzz', 0.00, 'ABC'])
['Zzz', 'abc', ('what.', 'ever.'), 0, 'ABC']
### Response:
def list_set(seq):
"""Similar to `list(set(seq))`, but maintains the order of `seq` while eliminating duplicates
In general list(set(L)) will not have the same order as the original list.
This is a list(set(L)) function that will preserve the order of L.
Arguments:
seq (iterable): list, tuple, or other iterable to be used to produce an ordered `set()`
Returns:
iterable: A copy of `seq` but with duplicates removed, and distinct elements in the same order as in `seq`
Examples:
>>> list_set([2.7,3,2,2,2,1,1,2,3,4,3,2,42,1])
[2.7, 3, 2, 1, 4, 42]
>>> list_set(['Zzz','abc', ('what.', 'ever.'), 0, 0.0, 'Zzz', 0.00, 'ABC'])
['Zzz', 'abc', ('what.', 'ever.'), 0, 'ABC']
"""
new_list = []
for i in seq:
if i not in new_list:
new_list += [i]
return type(seq)(new_list) |
def send_raw_transaction(self, tx: Transaction, is_full: bool = False) -> str:
"""
This interface is used to send the transaction into the network.
:param tx: Transaction object in ontology Python SDK.
:param is_full:
:return: a hexadecimal transaction hash value.
"""
tx_data = tx.serialize(is_hex=True)
payload = self.generate_json_rpc_payload(RpcMethod.SEND_TRANSACTION, [tx_data])
response = self.__post(self.__url, payload)
if is_full:
return response
return response['result'] | This interface is used to send the transaction into the network.
:param tx: Transaction object in ontology Python SDK.
:param is_full:
:return: a hexadecimal transaction hash value. | Below is the the instruction that describes the task:
### Input:
This interface is used to send the transaction into the network.
:param tx: Transaction object in ontology Python SDK.
:param is_full:
:return: a hexadecimal transaction hash value.
### Response:
def send_raw_transaction(self, tx: Transaction, is_full: bool = False) -> str:
"""
This interface is used to send the transaction into the network.
:param tx: Transaction object in ontology Python SDK.
:param is_full:
:return: a hexadecimal transaction hash value.
"""
tx_data = tx.serialize(is_hex=True)
payload = self.generate_json_rpc_payload(RpcMethod.SEND_TRANSACTION, [tx_data])
response = self.__post(self.__url, payload)
if is_full:
return response
return response['result'] |
def provStacks(self, offs, size):
'''
Returns a stream of provenance stacks at the given offset
'''
for _, iden in self.provseq.slice(offs, size):
stack = self.getProvStack(iden)
if stack is None:
continue
yield (iden, stack) | Returns a stream of provenance stacks at the given offset | Below is the the instruction that describes the task:
### Input:
Returns a stream of provenance stacks at the given offset
### Response:
def provStacks(self, offs, size):
'''
Returns a stream of provenance stacks at the given offset
'''
for _, iden in self.provseq.slice(offs, size):
stack = self.getProvStack(iden)
if stack is None:
continue
yield (iden, stack) |
def rm(*components, **kwargs):
"""
Remove a file or directory.
If path is a directory, this recursively removes the directory and
any contents. Non-existent paths are silently ignored.
Supports Unix style globbing by default (disable using
glob=False). For details on globbing pattern expansion, see:
https://docs.python.org/2/library/glob.html
Arguments:
*components (string[]): path to the file or directory to remove. May be
absolute or relative. May contain unix glob
**kwargs: if "glob" is True, perform Unix style pattern expansion of
paths (default: True).
"""
_path = path(*components)
glob = kwargs.get("glob", True)
paths = iglob(_path) if glob else [_path]
for file in paths:
if isfile(file):
os.remove(file)
elif exists(file):
shutil.rmtree(file, ignore_errors=False) | Remove a file or directory.
If path is a directory, this recursively removes the directory and
any contents. Non-existent paths are silently ignored.
Supports Unix style globbing by default (disable using
glob=False). For details on globbing pattern expansion, see:
https://docs.python.org/2/library/glob.html
Arguments:
*components (string[]): path to the file or directory to remove. May be
absolute or relative. May contain unix glob
**kwargs: if "glob" is True, perform Unix style pattern expansion of
paths (default: True). | Below is the the instruction that describes the task:
### Input:
Remove a file or directory.
If path is a directory, this recursively removes the directory and
any contents. Non-existent paths are silently ignored.
Supports Unix style globbing by default (disable using
glob=False). For details on globbing pattern expansion, see:
https://docs.python.org/2/library/glob.html
Arguments:
*components (string[]): path to the file or directory to remove. May be
absolute or relative. May contain unix glob
**kwargs: if "glob" is True, perform Unix style pattern expansion of
paths (default: True).
### Response:
def rm(*components, **kwargs):
"""
Remove a file or directory.
If path is a directory, this recursively removes the directory and
any contents. Non-existent paths are silently ignored.
Supports Unix style globbing by default (disable using
glob=False). For details on globbing pattern expansion, see:
https://docs.python.org/2/library/glob.html
Arguments:
*components (string[]): path to the file or directory to remove. May be
absolute or relative. May contain unix glob
**kwargs: if "glob" is True, perform Unix style pattern expansion of
paths (default: True).
"""
_path = path(*components)
glob = kwargs.get("glob", True)
paths = iglob(_path) if glob else [_path]
for file in paths:
if isfile(file):
os.remove(file)
elif exists(file):
shutil.rmtree(file, ignore_errors=False) |
def start_worker(queues, config, *, name=None, celery_args=None, check_datastore=True):
""" Start a worker process.
Args:
queues (list): List of queue names this worker accepts jobs from.
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
name (string): Unique name for the worker. The hostname template variables from
Celery can be used. If not given, a unique name is created.
celery_args (list): List of additional Celery worker command line arguments.
Please note that this depends on the version of Celery used and might change.
Use with caution.
check_datastore (bool): Set to True to check whether the data store is available
prior to starting the worker.
"""
celery_app = create_app(config)
if check_datastore:
with DataStore(**config.data_store,
auto_connect=True, handle_reconnect=False) as ds:
celery_app.user_options['datastore_info'] = ds.server_info
argv = [
'worker',
'-n={}'.format(uuid4() if name is None else name),
'--queues={}'.format(','.join(queues))
]
argv.extend(celery_args or [])
celery_app.steps['consumer'].add(WorkerLifecycle)
celery_app.user_options['config'] = config
celery_app.worker_main(argv) | Start a worker process.
Args:
queues (list): List of queue names this worker accepts jobs from.
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
name (string): Unique name for the worker. The hostname template variables from
Celery can be used. If not given, a unique name is created.
celery_args (list): List of additional Celery worker command line arguments.
Please note that this depends on the version of Celery used and might change.
Use with caution.
check_datastore (bool): Set to True to check whether the data store is available
prior to starting the worker. | Below is the the instruction that describes the task:
### Input:
Start a worker process.
Args:
queues (list): List of queue names this worker accepts jobs from.
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
name (string): Unique name for the worker. The hostname template variables from
Celery can be used. If not given, a unique name is created.
celery_args (list): List of additional Celery worker command line arguments.
Please note that this depends on the version of Celery used and might change.
Use with caution.
check_datastore (bool): Set to True to check whether the data store is available
prior to starting the worker.
### Response:
def start_worker(queues, config, *, name=None, celery_args=None, check_datastore=True):
""" Start a worker process.
Args:
queues (list): List of queue names this worker accepts jobs from.
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
name (string): Unique name for the worker. The hostname template variables from
Celery can be used. If not given, a unique name is created.
celery_args (list): List of additional Celery worker command line arguments.
Please note that this depends on the version of Celery used and might change.
Use with caution.
check_datastore (bool): Set to True to check whether the data store is available
prior to starting the worker.
"""
celery_app = create_app(config)
if check_datastore:
with DataStore(**config.data_store,
auto_connect=True, handle_reconnect=False) as ds:
celery_app.user_options['datastore_info'] = ds.server_info
argv = [
'worker',
'-n={}'.format(uuid4() if name is None else name),
'--queues={}'.format(','.join(queues))
]
argv.extend(celery_args or [])
celery_app.steps['consumer'].add(WorkerLifecycle)
celery_app.user_options['config'] = config
celery_app.worker_main(argv) |
def _errmsg(self, error: "Err", tb: bool=False, i: int=None,
msgformat: str="terminal") -> str:
"""
Get the error message
"""
if msgformat == "terminal":
msg = self._headline(error, i)
if error.ex is not None:
msg += "\n" + "line " + colors.bold(str(error.line))
msg += ": " + colors.yellow(error.code)
msg += "\n" + str(error.file)
if self.errs_traceback is True or tb is True:
if error.tb is not None:
msg += "\n" + error.tb
elif msgformat == "csv":
sep = ","
msg = error.msg + sep
msg += str(error.line) + sep + error.code + sep
msg += str(error.file)
elif msgformat == "text":
sep = ","
msg = error.msg
if error.ex is not None:
msg += sep + str(error.line) + sep + error.code + sep
msg += str(error.file) + sep
if self.errs_traceback is True or tb is True:
if error.tb is not None:
msg += sep + error.tb
elif msgformat == "dict":
msg = {"date": datetime.now()}
if error.ex is not None:
msg["msg"] = error.msg
msg["line"] = error.line
msg["code"] = error.code
msg["file"] = error.file
if self.errs_traceback is True or tb is True:
if error.tb is not None:
msg["traceback"] = error.tb
return msg | Get the error message | Below is the the instruction that describes the task:
### Input:
Get the error message
### Response:
def _errmsg(self, error: "Err", tb: bool=False, i: int=None,
msgformat: str="terminal") -> str:
"""
Get the error message
"""
if msgformat == "terminal":
msg = self._headline(error, i)
if error.ex is not None:
msg += "\n" + "line " + colors.bold(str(error.line))
msg += ": " + colors.yellow(error.code)
msg += "\n" + str(error.file)
if self.errs_traceback is True or tb is True:
if error.tb is not None:
msg += "\n" + error.tb
elif msgformat == "csv":
sep = ","
msg = error.msg + sep
msg += str(error.line) + sep + error.code + sep
msg += str(error.file)
elif msgformat == "text":
sep = ","
msg = error.msg
if error.ex is not None:
msg += sep + str(error.line) + sep + error.code + sep
msg += str(error.file) + sep
if self.errs_traceback is True or tb is True:
if error.tb is not None:
msg += sep + error.tb
elif msgformat == "dict":
msg = {"date": datetime.now()}
if error.ex is not None:
msg["msg"] = error.msg
msg["line"] = error.line
msg["code"] = error.code
msg["file"] = error.file
if self.errs_traceback is True or tb is True:
if error.tb is not None:
msg["traceback"] = error.tb
return msg |
def TakeWhile(self: dict, f):
"""
[
{
'self': [1, 2, 3, 4, 5],
'f': lambda x: x < 4,
'assert': lambda ret: list(ret) == [1, 2, 3]
}
]
"""
if is_to_destruct(f):
f = destruct_func(f)
for e in self.items():
if not f(e):
break
yield e | [
{
'self': [1, 2, 3, 4, 5],
'f': lambda x: x < 4,
'assert': lambda ret: list(ret) == [1, 2, 3]
}
] | Below is the the instruction that describes the task:
### Input:
[
{
'self': [1, 2, 3, 4, 5],
'f': lambda x: x < 4,
'assert': lambda ret: list(ret) == [1, 2, 3]
}
]
### Response:
def TakeWhile(self: dict, f):
"""
[
{
'self': [1, 2, 3, 4, 5],
'f': lambda x: x < 4,
'assert': lambda ret: list(ret) == [1, 2, 3]
}
]
"""
if is_to_destruct(f):
f = destruct_func(f)
for e in self.items():
if not f(e):
break
yield e |
def connect(self):
"""Connects to the lutron controller."""
if self._connected or self.is_alive():
raise ConnectionExistsError("Already connected")
# After starting the thread we wait for it to post us
# an event signifying that connection is established. This
# ensures that the caller only resumes when we are fully connected.
self.start()
with self._lock:
self._connect_cond.wait_for(lambda: self._connected) | Connects to the lutron controller. | Below is the the instruction that describes the task:
### Input:
Connects to the lutron controller.
### Response:
def connect(self):
"""Connects to the lutron controller."""
if self._connected or self.is_alive():
raise ConnectionExistsError("Already connected")
# After starting the thread we wait for it to post us
# an event signifying that connection is established. This
# ensures that the caller only resumes when we are fully connected.
self.start()
with self._lock:
self._connect_cond.wait_for(lambda: self._connected) |
def wrap(self, value):
''' Validate ``value`` and then use the document's class to wrap the
value'''
self.validate_wrap(value)
return self.type.wrap(value) | Validate ``value`` and then use the document's class to wrap the
value | Below is the the instruction that describes the task:
### Input:
Validate ``value`` and then use the document's class to wrap the
value
### Response:
def wrap(self, value):
''' Validate ``value`` and then use the document's class to wrap the
value'''
self.validate_wrap(value)
return self.type.wrap(value) |
def FileFinderOSFromClient(args):
"""This function expands paths from the args and returns related stat entries.
Args:
args: An `rdf_file_finder.FileFinderArgs` object.
Yields:
`rdf_paths.PathSpec` instances.
"""
stat_cache = filesystem.StatCache()
opts = args.action.stat
for path in GetExpandedPaths(args):
try:
content_conditions = conditions.ContentCondition.Parse(args.conditions)
for content_condition in content_conditions:
with io.open(path, "rb") as fd:
result = list(content_condition.Search(fd))
if not result:
raise _SkipFileException()
# TODO: `opts.resolve_links` has type `RDFBool`, not `bool`.
stat = stat_cache.Get(path, follow_symlink=bool(opts.resolve_links))
stat_entry = client_utils.StatEntryFromStatPathSpec(
stat, ext_attrs=opts.collect_ext_attrs)
yield stat_entry
except _SkipFileException:
pass | This function expands paths from the args and returns related stat entries.
Args:
args: An `rdf_file_finder.FileFinderArgs` object.
Yields:
`rdf_paths.PathSpec` instances. | Below is the the instruction that describes the task:
### Input:
This function expands paths from the args and returns related stat entries.
Args:
args: An `rdf_file_finder.FileFinderArgs` object.
Yields:
`rdf_paths.PathSpec` instances.
### Response:
def FileFinderOSFromClient(args):
"""This function expands paths from the args and returns related stat entries.
Args:
args: An `rdf_file_finder.FileFinderArgs` object.
Yields:
`rdf_paths.PathSpec` instances.
"""
stat_cache = filesystem.StatCache()
opts = args.action.stat
for path in GetExpandedPaths(args):
try:
content_conditions = conditions.ContentCondition.Parse(args.conditions)
for content_condition in content_conditions:
with io.open(path, "rb") as fd:
result = list(content_condition.Search(fd))
if not result:
raise _SkipFileException()
# TODO: `opts.resolve_links` has type `RDFBool`, not `bool`.
stat = stat_cache.Get(path, follow_symlink=bool(opts.resolve_links))
stat_entry = client_utils.StatEntryFromStatPathSpec(
stat, ext_attrs=opts.collect_ext_attrs)
yield stat_entry
except _SkipFileException:
pass |
def _append_message(self, text, char_format):
"""
Parses text and executes parsed operations.
"""
self._cursor = self._text_edit.textCursor()
operations = self._parser.parse_text(FormattedText(text, char_format))
for i, operation in enumerate(operations):
try:
func = getattr(self, '_%s' % operation.command)
except AttributeError:
print('command not implemented: %r - %r' % (
operation.command, operation.data))
else:
try:
func(operation.data)
except Exception:
_logger().exception('exception while running %r', operation)
# uncomment next line for debugging commands
self._text_edit.repaint() | Parses text and executes parsed operations. | Below is the the instruction that describes the task:
### Input:
Parses text and executes parsed operations.
### Response:
def _append_message(self, text, char_format):
"""
Parses text and executes parsed operations.
"""
self._cursor = self._text_edit.textCursor()
operations = self._parser.parse_text(FormattedText(text, char_format))
for i, operation in enumerate(operations):
try:
func = getattr(self, '_%s' % operation.command)
except AttributeError:
print('command not implemented: %r - %r' % (
operation.command, operation.data))
else:
try:
func(operation.data)
except Exception:
_logger().exception('exception while running %r', operation)
# uncomment next line for debugging commands
self._text_edit.repaint() |
def array(self, dtype=None):
"""An implementation of __array__()"""
t = self._t
# timestamp (12) through last enum (76)
if 11 <= t < 77:
dtype = dtypeof(self)
a = numpy.empty(len(self), dtype)
k2a(a, self)
return a
# table (98)
if t == 98:
if dtype is None:
dtype = list(zip(self.cols, (dtypeof(c) for c in self.flip.value)))
dtype = numpy.dtype(dtype)
a = numpy.empty(int(self.count), dtype)
for c in dtype.fields:
k2a(a[c], self[c])
return a
return numpy.array(list(self), dtype) | An implementation of __array__() | Below is the the instruction that describes the task:
### Input:
An implementation of __array__()
### Response:
def array(self, dtype=None):
"""An implementation of __array__()"""
t = self._t
# timestamp (12) through last enum (76)
if 11 <= t < 77:
dtype = dtypeof(self)
a = numpy.empty(len(self), dtype)
k2a(a, self)
return a
# table (98)
if t == 98:
if dtype is None:
dtype = list(zip(self.cols, (dtypeof(c) for c in self.flip.value)))
dtype = numpy.dtype(dtype)
a = numpy.empty(int(self.count), dtype)
for c in dtype.fields:
k2a(a[c], self[c])
return a
return numpy.array(list(self), dtype) |
def clear(self):
'''
Reset the current HyperLogLog to empty.
'''
self.reg = np.zeros((self.m,), dtype=np.int8) | Reset the current HyperLogLog to empty. | Below is the the instruction that describes the task:
### Input:
Reset the current HyperLogLog to empty.
### Response:
def clear(self):
'''
Reset the current HyperLogLog to empty.
'''
self.reg = np.zeros((self.m,), dtype=np.int8) |
def get_ht_mcs(mcs):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n591.
Positional arguments:
mcs -- bytearray.
Returns:
Dict.
"""
answers = dict()
max_rx_supp_data_rate = (mcs[10] & ((mcs[11] & 0x3) << 8))
tx_mcs_set_defined = not not (mcs[12] & (1 << 0))
tx_mcs_set_equal = not (mcs[12] & (1 << 1))
tx_max_num_spatial_streams = ((mcs[12] >> 2) & 3) + 1
tx_unequal_modulation = not not (mcs[12] & (1 << 4))
if max_rx_supp_data_rate:
answers['HT Max RX data rate (Mbps)'] = max_rx_supp_data_rate
if tx_mcs_set_defined and tx_mcs_set_equal:
answers['HT TX/RX MCS rate indexes supported'] = get_mcs_index(mcs)
elif tx_mcs_set_defined:
answers['HT RX MCS rate indexes supported'] = get_mcs_index(mcs)
answers['TX unequal modulation supported'] = bool(tx_unequal_modulation)
answers['HT TX Max spatial streams'] = tx_max_num_spatial_streams
else:
answers['HT RX MCS rate indexes supported'] = get_mcs_index(mcs)
return answers | http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n591.
Positional arguments:
mcs -- bytearray.
Returns:
Dict. | Below is the the instruction that describes the task:
### Input:
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n591.
Positional arguments:
mcs -- bytearray.
Returns:
Dict.
### Response:
def get_ht_mcs(mcs):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n591.
Positional arguments:
mcs -- bytearray.
Returns:
Dict.
"""
answers = dict()
max_rx_supp_data_rate = (mcs[10] & ((mcs[11] & 0x3) << 8))
tx_mcs_set_defined = not not (mcs[12] & (1 << 0))
tx_mcs_set_equal = not (mcs[12] & (1 << 1))
tx_max_num_spatial_streams = ((mcs[12] >> 2) & 3) + 1
tx_unequal_modulation = not not (mcs[12] & (1 << 4))
if max_rx_supp_data_rate:
answers['HT Max RX data rate (Mbps)'] = max_rx_supp_data_rate
if tx_mcs_set_defined and tx_mcs_set_equal:
answers['HT TX/RX MCS rate indexes supported'] = get_mcs_index(mcs)
elif tx_mcs_set_defined:
answers['HT RX MCS rate indexes supported'] = get_mcs_index(mcs)
answers['TX unequal modulation supported'] = bool(tx_unequal_modulation)
answers['HT TX Max spatial streams'] = tx_max_num_spatial_streams
else:
answers['HT RX MCS rate indexes supported'] = get_mcs_index(mcs)
return answers |
def transformed_value(self, output_name=DEFAULT_OUTPUT):
'''Returns transformed value either for DEFAULT_OUTPUT or for the output
given as output_name. Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize value.
'''
check.str_param(output_name, 'output_name')
if not self.solid.definition.has_output(output_name):
raise DagsterInvariantViolationError(
'{output_name} not defined in solid {solid}'.format(
output_name=output_name, solid=self.solid.name
)
)
if self.success:
for result in self.transforms:
if (
result.is_successful_output
and result.step_output_data.output_name == output_name
):
with self.reconstruct_context() as context:
value = self._get_value(context, result.step_output_data)
return value
raise DagsterInvariantViolationError(
(
'Did not find result {output_name} in solid {self.solid.name} '
'execution result'
).format(output_name=output_name, self=self)
)
else:
return None | Returns transformed value either for DEFAULT_OUTPUT or for the output
given as output_name. Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize value. | Below is the the instruction that describes the task:
### Input:
Returns transformed value either for DEFAULT_OUTPUT or for the output
given as output_name. Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize value.
### Response:
def transformed_value(self, output_name=DEFAULT_OUTPUT):
'''Returns transformed value either for DEFAULT_OUTPUT or for the output
given as output_name. Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize value.
'''
check.str_param(output_name, 'output_name')
if not self.solid.definition.has_output(output_name):
raise DagsterInvariantViolationError(
'{output_name} not defined in solid {solid}'.format(
output_name=output_name, solid=self.solid.name
)
)
if self.success:
for result in self.transforms:
if (
result.is_successful_output
and result.step_output_data.output_name == output_name
):
with self.reconstruct_context() as context:
value = self._get_value(context, result.step_output_data)
return value
raise DagsterInvariantViolationError(
(
'Did not find result {output_name} in solid {self.solid.name} '
'execution result'
).format(output_name=output_name, self=self)
)
else:
return None |
def carry_in(value, carry, base):
"""
Add a carry digit to a number represented by ``value``.
:param value: the value
:type value: list of int
:param int carry: the carry digit (>= 0)
:param int base: the base (>= 2)
:returns: carry-out and result
:rtype: tuple of int * (list of int)
Complexity: O(len(value))
"""
if base < 2:
raise BasesValueError(base, "base", "must be at least 2")
if any(x < 0 or x >= base for x in value):
raise BasesValueError(
value,
"value",
"elements must be at least 0 and less than %s" % base
)
if carry < 0 or carry >= base:
raise BasesValueError(
carry,
"carry",
"carry must be less than %s" % base
)
result = []
for val in reversed(value):
(carry, new_val) = divmod(val + carry, base)
result.append(new_val)
return (carry, list(reversed(result))) | Add a carry digit to a number represented by ``value``.
:param value: the value
:type value: list of int
:param int carry: the carry digit (>= 0)
:param int base: the base (>= 2)
:returns: carry-out and result
:rtype: tuple of int * (list of int)
Complexity: O(len(value)) | Below is the the instruction that describes the task:
### Input:
Add a carry digit to a number represented by ``value``.
:param value: the value
:type value: list of int
:param int carry: the carry digit (>= 0)
:param int base: the base (>= 2)
:returns: carry-out and result
:rtype: tuple of int * (list of int)
Complexity: O(len(value))
### Response:
def carry_in(value, carry, base):
"""
Add a carry digit to a number represented by ``value``.
:param value: the value
:type value: list of int
:param int carry: the carry digit (>= 0)
:param int base: the base (>= 2)
:returns: carry-out and result
:rtype: tuple of int * (list of int)
Complexity: O(len(value))
"""
if base < 2:
raise BasesValueError(base, "base", "must be at least 2")
if any(x < 0 or x >= base for x in value):
raise BasesValueError(
value,
"value",
"elements must be at least 0 and less than %s" % base
)
if carry < 0 or carry >= base:
raise BasesValueError(
carry,
"carry",
"carry must be less than %s" % base
)
result = []
for val in reversed(value):
(carry, new_val) = divmod(val + carry, base)
result.append(new_val)
return (carry, list(reversed(result))) |
def screenshot(self, filename, scale=1.0, quality=100):
'''take screenshot.'''
result = self.server.screenshot(filename, scale, quality)
if result:
return result
device_file = self.server.jsonrpc.takeScreenshot("screenshot.png",
scale, quality)
if not device_file:
return None
p = self.server.adb.cmd("pull", device_file, filename)
p.wait()
self.server.adb.cmd("shell", "rm", device_file).wait()
return filename if p.returncode is 0 else None | take screenshot. | Below is the the instruction that describes the task:
### Input:
take screenshot.
### Response:
def screenshot(self, filename, scale=1.0, quality=100):
'''take screenshot.'''
result = self.server.screenshot(filename, scale, quality)
if result:
return result
device_file = self.server.jsonrpc.takeScreenshot("screenshot.png",
scale, quality)
if not device_file:
return None
p = self.server.adb.cmd("pull", device_file, filename)
p.wait()
self.server.adb.cmd("shell", "rm", device_file).wait()
return filename if p.returncode is 0 else None |
def eulerian_tour_directed(graph):
"""Eulerian tour on a directed graph
:param graph: directed graph in listlist format, cannot be listdict
:assumes: graph is eulerian
:returns: eulerian cycle as a vertex list
:complexity: `O(|V|+|E|)`
"""
P = []
Q = [0]
R = []
succ = [0] * len(graph)
while Q:
node = Q.pop()
P.append(node)
while succ[node] < len(graph[node]):
neighbor = graph[node][succ[node]]
succ[node] += 1
R.append(neighbor)
node = neighbor
while R:
Q.append(R.pop())
return P | Eulerian tour on a directed graph
:param graph: directed graph in listlist format, cannot be listdict
:assumes: graph is eulerian
:returns: eulerian cycle as a vertex list
:complexity: `O(|V|+|E|)` | Below is the the instruction that describes the task:
### Input:
Eulerian tour on a directed graph
:param graph: directed graph in listlist format, cannot be listdict
:assumes: graph is eulerian
:returns: eulerian cycle as a vertex list
:complexity: `O(|V|+|E|)`
### Response:
def eulerian_tour_directed(graph):
"""Eulerian tour on a directed graph
:param graph: directed graph in listlist format, cannot be listdict
:assumes: graph is eulerian
:returns: eulerian cycle as a vertex list
:complexity: `O(|V|+|E|)`
"""
P = []
Q = [0]
R = []
succ = [0] * len(graph)
while Q:
node = Q.pop()
P.append(node)
while succ[node] < len(graph[node]):
neighbor = graph[node][succ[node]]
succ[node] += 1
R.append(neighbor)
node = neighbor
while R:
Q.append(R.pop())
return P |
def With(self, context_manager, *body, **kwargs):
"""
**With**
def With(context_manager, *body):
**Arguments**
* **context_manager**: a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) object or valid expression from the DSL that returns a context manager.
* ***body**: any valid expression of the DSL to be evaluated inside the context. `*body` is interpreted as a tuple so all expression contained are composed.
As with normal python programs you sometimes might want to create a context for a block of code. You normally give a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) to the [with](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement) statemente, in Phi you use `P.With` or `phi.With`
**Context**
Python's `with` statemente returns a context object through `as` keyword, in the DSL this object can be obtained using the `P.Context` method or the `phi.Context` function.
### Examples
from phi import P, Obj, Context, With, Pipe
text = Pipe(
"text.txt",
With( open, Context,
Obj.read()
)
)
The previous is equivalent to
with open("text.txt") as f:
text = f.read()
"""
context_f = _parse(context_manager)._f
body_f = E.Seq(*body)._f
def g(x, state):
context, state = context_f(x, state)
with context as scope:
with _WithContextManager(scope):
return body_f(x, state)
return self.__then__(g, **kwargs) | **With**
def With(context_manager, *body):
**Arguments**
* **context_manager**: a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) object or valid expression from the DSL that returns a context manager.
* ***body**: any valid expression of the DSL to be evaluated inside the context. `*body` is interpreted as a tuple so all expression contained are composed.
As with normal python programs you sometimes might want to create a context for a block of code. You normally give a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) to the [with](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement) statemente, in Phi you use `P.With` or `phi.With`
**Context**
Python's `with` statemente returns a context object through `as` keyword, in the DSL this object can be obtained using the `P.Context` method or the `phi.Context` function.
### Examples
from phi import P, Obj, Context, With, Pipe
text = Pipe(
"text.txt",
With( open, Context,
Obj.read()
)
)
The previous is equivalent to
with open("text.txt") as f:
text = f.read() | Below is the the instruction that describes the task:
### Input:
**With**
def With(context_manager, *body):
**Arguments**
* **context_manager**: a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) object or valid expression from the DSL that returns a context manager.
* ***body**: any valid expression of the DSL to be evaluated inside the context. `*body` is interpreted as a tuple so all expression contained are composed.
As with normal python programs you sometimes might want to create a context for a block of code. You normally give a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) to the [with](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement) statemente, in Phi you use `P.With` or `phi.With`
**Context**
Python's `with` statemente returns a context object through `as` keyword, in the DSL this object can be obtained using the `P.Context` method or the `phi.Context` function.
### Examples
from phi import P, Obj, Context, With, Pipe
text = Pipe(
"text.txt",
With( open, Context,
Obj.read()
)
)
The previous is equivalent to
with open("text.txt") as f:
text = f.read()
### Response:
def With(self, context_manager, *body, **kwargs):
"""
**With**
def With(context_manager, *body):
**Arguments**
* **context_manager**: a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) object or valid expression from the DSL that returns a context manager.
* ***body**: any valid expression of the DSL to be evaluated inside the context. `*body` is interpreted as a tuple so all expression contained are composed.
As with normal python programs you sometimes might want to create a context for a block of code. You normally give a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) to the [with](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement) statemente, in Phi you use `P.With` or `phi.With`
**Context**
Python's `with` statemente returns a context object through `as` keyword, in the DSL this object can be obtained using the `P.Context` method or the `phi.Context` function.
### Examples
from phi import P, Obj, Context, With, Pipe
text = Pipe(
"text.txt",
With( open, Context,
Obj.read()
)
)
The previous is equivalent to
with open("text.txt") as f:
text = f.read()
"""
context_f = _parse(context_manager)._f
body_f = E.Seq(*body)._f
def g(x, state):
context, state = context_f(x, state)
with context as scope:
with _WithContextManager(scope):
return body_f(x, state)
return self.__then__(g, **kwargs) |
def allState(self, *args, **kwargs):
"""
List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["allState"], *args, **kwargs) | List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental`` | Below is the the instruction that describes the task:
### Input:
List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental``
### Response:
def allState(self, *args, **kwargs):
"""
List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["allState"], *args, **kwargs) |
def to_exceptions(cls, errors):
""" Convert the validation errors into ValidationFailure exc's
Transform native schematics validation errors into a
goldman ValidationFailure exception.
:param errors:
dict of errors in schematics format
:return:
list of ValidationFailure exception objects
"""
ret = []
for key, val in errors.items():
if key in cls.relationships:
attr = '/data/relationships/%s' % key
else:
attr = '/data/attributes/%s' % key
for error in val:
ret.append(ValidationFailure(attr, detail=error))
return ret | Convert the validation errors into ValidationFailure exc's
Transform native schematics validation errors into a
goldman ValidationFailure exception.
:param errors:
dict of errors in schematics format
:return:
list of ValidationFailure exception objects | Below is the the instruction that describes the task:
### Input:
Convert the validation errors into ValidationFailure exc's
Transform native schematics validation errors into a
goldman ValidationFailure exception.
:param errors:
dict of errors in schematics format
:return:
list of ValidationFailure exception objects
### Response:
def to_exceptions(cls, errors):
""" Convert the validation errors into ValidationFailure exc's
Transform native schematics validation errors into a
goldman ValidationFailure exception.
:param errors:
dict of errors in schematics format
:return:
list of ValidationFailure exception objects
"""
ret = []
for key, val in errors.items():
if key in cls.relationships:
attr = '/data/relationships/%s' % key
else:
attr = '/data/attributes/%s' % key
for error in val:
ret.append(ValidationFailure(attr, detail=error))
return ret |
def incomplete_alg(alg_str, input_color, position):
"""
Converts a string written in short algebraic form into an incomplete move.
These incomplete moves do not have the initial location specified and
therefore cannot be used to update the board. IN order to fully utilize
incomplete move, it must be run through ``make_legal()`` with
the corresponding position. It is recommended to use
``short_alg()`` instead of this method because it returns a complete
move.
Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q
:type: alg_str: str
:type: input_color: Color
"""
edge_rank = 0 \
if input_color == color.white \
else 7
if alg_str is None or len(alg_str) <= 1:
raise ValueError("algebraic string {} is invalid".format(alg_str))
# King-side castle
if alg_str in ["00", "oo", "OO", "0-0", "o-o", "O-O"]:
return Move(end_loc=Location(edge_rank, 6),
piece=King(input_color, Location(edge_rank, 4)),
status=notation_const.KING_SIDE_CASTLE,
start_loc=Location(edge_rank, 4))
# Queen-side castle
if alg_str in ["000", "ooo", "OOO", "0-0-0", "o-o-o", "O-O-O"]:
return Move(end_loc=Location(edge_rank, 2),
piece=King(input_color, Location(edge_rank, 4)),
status=notation_const.QUEEN_SIDE_CASTLE,
start_loc=Location(edge_rank, 4))
try:
end_location = Location.from_string(alg_str[-2:])
except ValueError:
end_location = Location.from_string(alg_str[-4:-2])
# Pawn movement
if len(alg_str) == 2:
possible_pawn = position.piece_at_square(end_location.shift_back(input_color))
if type(possible_pawn) is Pawn and \
possible_pawn.color == input_color:
start_location = end_location.shift_back(input_color)
else:
start_location = end_location.shift_back(input_color, times=2)
return Move(end_loc=end_location,
piece=position.piece_at_square(start_location),
status=notation_const.MOVEMENT,
start_loc=start_location)
# Non-pawn Piece movement
if len(alg_str) == 3:
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position)
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.MOVEMENT,
start_loc=start_location)
# Multiple options (Capture or Piece movement with file specified)
if len(alg_str) == 4:
# Capture
if alg_str[1].upper() == "X":
# Pawn capture
if not alg_str[0].isupper():
pawn_location = Location(end_location.rank, ord(alg_str[0]) - 97).shift_back(input_color)
possible_pawn = position.piece_at_square(pawn_location)
if type(possible_pawn) is Pawn and \
possible_pawn.color == input_color:
en_passant_pawn = position.piece_at_square(end_location.shift_back(input_color))
if type(en_passant_pawn) is Pawn and \
en_passant_pawn.color != input_color and \
position.is_square_empty(end_location):
return Move(end_loc=end_location,
piece=position.piece_at_square(pawn_location),
status=notation_const.EN_PASSANT,
start_loc=pawn_location)
else:
return Move(end_loc=end_location,
piece=position.piece_at_square(pawn_location),
status=notation_const.CAPTURE,
start_loc=pawn_location)
# Piece capture
elif alg_str[0].isupper():
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position)
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.CAPTURE,
start_loc=start_location)
# Pawn Promotion
elif alg_str[2] == "=":
promote_end_loc = Location.from_string(alg_str[:2])
if promote_end_loc.rank != 0 and promote_end_loc.rank != 7:
raise ValueError("Promotion {} must be on the last rank".format(alg_str))
return Move(end_loc=promote_end_loc,
piece=Pawn(input_color, promote_end_loc),
status=notation_const.PROMOTE,
promoted_to_piece=_get_piece(alg_str, 3),
start_loc=promote_end_loc.shift_back(input_color))
# Non-pawn Piece movement with file specified (aRb7)
elif alg_str[1].isupper() and not alg_str[0].isdigit():
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 1),
position,
start_file=alg_str[0])
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.MOVEMENT,
start_loc=start_location)
# (alt) Non-pawn Piece movement with file specified (Rab7)
elif alg_str[0].isupper() and not alg_str[1].isdigit():
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position,
start_file=alg_str[1])
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.MOVEMENT,
start_loc=start_location)
# Non-pawn Piece movement with rank specified (R1b7)
elif alg_str[0].isupper() and alg_str[1].isdigit():
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position,
start_rank=alg_str[1])
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.MOVEMENT,
start_loc=start_location)
# Multiple options
if len(alg_str) == 5:
# Non-pawn Piece movement with rank and file specified (a2Ra1
if not alg_str[0].isdigit() and \
alg_str[1].isdigit() and \
alg_str[2].isupper() and \
not alg_str[3].isdigit() and \
alg_str[4].isdigit:
start_loc = Location.from_string(alg_str[:2])
return Move(end_loc=end_location,
piece=_get_piece(alg_str, 2)(input_color, end_location),
status=notation_const.MOVEMENT,
start_loc=start_loc)
# Multiple Piece capture options
if alg_str[2].upper() == "X":
# Piece capture with rank specified (R1xa1)
if alg_str[1].isdigit():
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position,
start_rank=alg_str[1])
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.CAPTURE,
start_loc=start_location)
# Piece capture with file specified (Rdxd7)
else:
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position,
start_file=alg_str[1])
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.CAPTURE,
start_loc=start_location)
# Pawn promotion with capture
if len(alg_str) == 6 and alg_str[4] == "=":
start_file = ord(alg_str[0]) - 97
promote_capture_end_loc = Location.from_string(alg_str[2:4])
return Move(end_loc=promote_capture_end_loc,
piece=Pawn(input_color, promote_capture_end_loc),
status=notation_const.CAPTURE_AND_PROMOTE,
promoted_to_piece=_get_piece(alg_str, 5),
start_loc=Location(end_location.shift_back(input_color).rank, start_file))
raise ValueError("algebraic string {} is invalid in \n{}".format(alg_str, position)) | Converts a string written in short algebraic form into an incomplete move.
These incomplete moves do not have the initial location specified and
therefore cannot be used to update the board. IN order to fully utilize
incomplete move, it must be run through ``make_legal()`` with
the corresponding position. It is recommended to use
``short_alg()`` instead of this method because it returns a complete
move.
Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q
:type: alg_str: str
:type: input_color: Color | Below is the the instruction that describes the task:
### Input:
Converts a string written in short algebraic form into an incomplete move.
These incomplete moves do not have the initial location specified and
therefore cannot be used to update the board. IN order to fully utilize
incomplete move, it must be run through ``make_legal()`` with
the corresponding position. It is recommended to use
``short_alg()`` instead of this method because it returns a complete
move.
Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q
:type: alg_str: str
:type: input_color: Color
### Response:
def incomplete_alg(alg_str, input_color, position):
"""
Converts a string written in short algebraic form into an incomplete move.
These incomplete moves do not have the initial location specified and
therefore cannot be used to update the board. IN order to fully utilize
incomplete move, it must be run through ``make_legal()`` with
the corresponding position. It is recommended to use
``short_alg()`` instead of this method because it returns a complete
move.
Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q
:type: alg_str: str
:type: input_color: Color
"""
edge_rank = 0 \
if input_color == color.white \
else 7
if alg_str is None or len(alg_str) <= 1:
raise ValueError("algebraic string {} is invalid".format(alg_str))
# King-side castle
if alg_str in ["00", "oo", "OO", "0-0", "o-o", "O-O"]:
return Move(end_loc=Location(edge_rank, 6),
piece=King(input_color, Location(edge_rank, 4)),
status=notation_const.KING_SIDE_CASTLE,
start_loc=Location(edge_rank, 4))
# Queen-side castle
if alg_str in ["000", "ooo", "OOO", "0-0-0", "o-o-o", "O-O-O"]:
return Move(end_loc=Location(edge_rank, 2),
piece=King(input_color, Location(edge_rank, 4)),
status=notation_const.QUEEN_SIDE_CASTLE,
start_loc=Location(edge_rank, 4))
try:
end_location = Location.from_string(alg_str[-2:])
except ValueError:
end_location = Location.from_string(alg_str[-4:-2])
# Pawn movement
if len(alg_str) == 2:
possible_pawn = position.piece_at_square(end_location.shift_back(input_color))
if type(possible_pawn) is Pawn and \
possible_pawn.color == input_color:
start_location = end_location.shift_back(input_color)
else:
start_location = end_location.shift_back(input_color, times=2)
return Move(end_loc=end_location,
piece=position.piece_at_square(start_location),
status=notation_const.MOVEMENT,
start_loc=start_location)
# Non-pawn Piece movement
if len(alg_str) == 3:
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position)
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.MOVEMENT,
start_loc=start_location)
# Multiple options (Capture or Piece movement with file specified)
if len(alg_str) == 4:
# Capture
if alg_str[1].upper() == "X":
# Pawn capture
if not alg_str[0].isupper():
pawn_location = Location(end_location.rank, ord(alg_str[0]) - 97).shift_back(input_color)
possible_pawn = position.piece_at_square(pawn_location)
if type(possible_pawn) is Pawn and \
possible_pawn.color == input_color:
en_passant_pawn = position.piece_at_square(end_location.shift_back(input_color))
if type(en_passant_pawn) is Pawn and \
en_passant_pawn.color != input_color and \
position.is_square_empty(end_location):
return Move(end_loc=end_location,
piece=position.piece_at_square(pawn_location),
status=notation_const.EN_PASSANT,
start_loc=pawn_location)
else:
return Move(end_loc=end_location,
piece=position.piece_at_square(pawn_location),
status=notation_const.CAPTURE,
start_loc=pawn_location)
# Piece capture
elif alg_str[0].isupper():
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position)
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.CAPTURE,
start_loc=start_location)
# Pawn Promotion
elif alg_str[2] == "=":
promote_end_loc = Location.from_string(alg_str[:2])
if promote_end_loc.rank != 0 and promote_end_loc.rank != 7:
raise ValueError("Promotion {} must be on the last rank".format(alg_str))
return Move(end_loc=promote_end_loc,
piece=Pawn(input_color, promote_end_loc),
status=notation_const.PROMOTE,
promoted_to_piece=_get_piece(alg_str, 3),
start_loc=promote_end_loc.shift_back(input_color))
# Non-pawn Piece movement with file specified (aRb7)
elif alg_str[1].isupper() and not alg_str[0].isdigit():
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 1),
position,
start_file=alg_str[0])
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.MOVEMENT,
start_loc=start_location)
# (alt) Non-pawn Piece movement with file specified (Rab7)
elif alg_str[0].isupper() and not alg_str[1].isdigit():
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position,
start_file=alg_str[1])
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.MOVEMENT,
start_loc=start_location)
# Non-pawn Piece movement with rank specified (R1b7)
elif alg_str[0].isupper() and alg_str[1].isdigit():
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position,
start_rank=alg_str[1])
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.MOVEMENT,
start_loc=start_location)
# Multiple options
if len(alg_str) == 5:
# Non-pawn Piece movement with rank and file specified (a2Ra1
if not alg_str[0].isdigit() and \
alg_str[1].isdigit() and \
alg_str[2].isupper() and \
not alg_str[3].isdigit() and \
alg_str[4].isdigit:
start_loc = Location.from_string(alg_str[:2])
return Move(end_loc=end_location,
piece=_get_piece(alg_str, 2)(input_color, end_location),
status=notation_const.MOVEMENT,
start_loc=start_loc)
# Multiple Piece capture options
if alg_str[2].upper() == "X":
# Piece capture with rank specified (R1xa1)
if alg_str[1].isdigit():
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position,
start_rank=alg_str[1])
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.CAPTURE,
start_loc=start_location)
# Piece capture with file specified (Rdxd7)
else:
possible_piece, start_location = _get_piece_start_location(end_location,
input_color,
_get_piece(alg_str, 0),
position,
start_file=alg_str[1])
return Move(end_loc=end_location,
piece=possible_piece,
status=notation_const.CAPTURE,
start_loc=start_location)
# Pawn promotion with capture
if len(alg_str) == 6 and alg_str[4] == "=":
start_file = ord(alg_str[0]) - 97
promote_capture_end_loc = Location.from_string(alg_str[2:4])
return Move(end_loc=promote_capture_end_loc,
piece=Pawn(input_color, promote_capture_end_loc),
status=notation_const.CAPTURE_AND_PROMOTE,
promoted_to_piece=_get_piece(alg_str, 5),
start_loc=Location(end_location.shift_back(input_color).rank, start_file))
raise ValueError("algebraic string {} is invalid in \n{}".format(alg_str, position)) |
def get_transfer_operation(self, operation_name):
"""
Gets an transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:return: transfer operation
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/Operation
:rtype: dict
"""
return (
self.get_conn()
.transferOperations()
.get(name=operation_name)
.execute(num_retries=self.num_retries)
) | Gets an transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:return: transfer operation
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/Operation
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Gets an transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:return: transfer operation
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/Operation
:rtype: dict
### Response:
def get_transfer_operation(self, operation_name):
"""
Gets an transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:return: transfer operation
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/Operation
:rtype: dict
"""
return (
self.get_conn()
.transferOperations()
.get(name=operation_name)
.execute(num_retries=self.num_retries)
) |
def write(self,output):
"""Writes the data to be output to the device buffer
:param output: data to output
:type output: numpy.ndarray
"""
w = c_int32()
self.WriteAnalogF64(self.npoints, 0, 10.0, DAQmx_Val_GroupByChannel,
output, w, None); | Writes the data to be output to the device buffer
:param output: data to output
:type output: numpy.ndarray | Below is the the instruction that describes the task:
### Input:
Writes the data to be output to the device buffer
:param output: data to output
:type output: numpy.ndarray
### Response:
def write(self,output):
"""Writes the data to be output to the device buffer
:param output: data to output
:type output: numpy.ndarray
"""
w = c_int32()
self.WriteAnalogF64(self.npoints, 0, 10.0, DAQmx_Val_GroupByChannel,
output, w, None); |
def process_hv_plots(widgets, plots):
"""
Temporary fix to patch HoloViews plot comms
"""
bokeh_plots = []
for plot in plots:
if hasattr(plot, '_update_callbacks'):
for subplot in plot.traverse(lambda x: x):
subplot.comm = widgets.server_comm
for cb in subplot.callbacks:
for c in cb.callbacks:
c.code = c.code.replace(plot.id, widgets.plot_id)
plot = plot.state
bokeh_plots.append(plot)
return bokeh_plots | Temporary fix to patch HoloViews plot comms | Below is the the instruction that describes the task:
### Input:
Temporary fix to patch HoloViews plot comms
### Response:
def process_hv_plots(widgets, plots):
"""
Temporary fix to patch HoloViews plot comms
"""
bokeh_plots = []
for plot in plots:
if hasattr(plot, '_update_callbacks'):
for subplot in plot.traverse(lambda x: x):
subplot.comm = widgets.server_comm
for cb in subplot.callbacks:
for c in cb.callbacks:
c.code = c.code.replace(plot.id, widgets.plot_id)
plot = plot.state
bokeh_plots.append(plot)
return bokeh_plots |
def _normalize(self, string):
''' Returns a sanitized string. '''
string = super(VerbixFr, self)._normalize(string)
string = string.replace('il; elle', 'il/elle')
string = string.replace('ils; elles', 'ils/elles')
string = string.strip()
return string | Returns a sanitized string. | Below is the the instruction that describes the task:
### Input:
Returns a sanitized string.
### Response:
def _normalize(self, string):
''' Returns a sanitized string. '''
string = super(VerbixFr, self)._normalize(string)
string = string.replace('il; elle', 'il/elle')
string = string.replace('ils; elles', 'ils/elles')
string = string.strip()
return string |
def get_branch(self, auth, username, repo_name, branch_name):
"""
Returns the branch with name ``branch_name`` in the repository with name ``repo_name``
owned by the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository containing the branch
:param str repo_name: name of the repository with the branch
:param str branch_name: name of the branch to return
:return: a branch
:rtype: GogsBranch
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = "/repos/{u}/{r}/branches/{b}".format(u=username, r=repo_name, b=branch_name)
response = self.get(path, auth=auth)
return GogsBranch.from_json(response.json()) | Returns the branch with name ``branch_name`` in the repository with name ``repo_name``
owned by the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository containing the branch
:param str repo_name: name of the repository with the branch
:param str branch_name: name of the branch to return
:return: a branch
:rtype: GogsBranch
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced | Below is the the instruction that describes the task:
### Input:
Returns the branch with name ``branch_name`` in the repository with name ``repo_name``
owned by the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository containing the branch
:param str repo_name: name of the repository with the branch
:param str branch_name: name of the branch to return
:return: a branch
:rtype: GogsBranch
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
### Response:
def get_branch(self, auth, username, repo_name, branch_name):
"""
Returns the branch with name ``branch_name`` in the repository with name ``repo_name``
owned by the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository containing the branch
:param str repo_name: name of the repository with the branch
:param str branch_name: name of the branch to return
:return: a branch
:rtype: GogsBranch
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = "/repos/{u}/{r}/branches/{b}".format(u=username, r=repo_name, b=branch_name)
response = self.get(path, auth=auth)
return GogsBranch.from_json(response.json()) |
def _drop_indices(self):
"""Drops the database indices relating to n-grams."""
self._logger.info('Dropping database indices')
self._conn.execute(constants.DROP_TEXTNGRAM_INDEX_SQL)
self._logger.info('Finished dropping database indices') | Drops the database indices relating to n-grams. | Below is the the instruction that describes the task:
### Input:
Drops the database indices relating to n-grams.
### Response:
def _drop_indices(self):
"""Drops the database indices relating to n-grams."""
self._logger.info('Dropping database indices')
self._conn.execute(constants.DROP_TEXTNGRAM_INDEX_SQL)
self._logger.info('Finished dropping database indices') |
def is_sub_to_any_kind(self, *super_entity_kinds):
"""
Find all entities that have super_entities of any of the specified kinds
"""
if super_entity_kinds:
# get the pks of the desired subs from the relationships table
if len(super_entity_kinds) == 1:
entity_pks = EntityRelationship.objects.filter(
super_entity__entity_kind=super_entity_kinds[0]
).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True)
else:
entity_pks = EntityRelationship.objects.filter(
super_entity__entity_kind__in=super_entity_kinds
).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True)
# return a queryset limited to only those pks
return self.filter(pk__in=entity_pks)
else:
return self | Find all entities that have super_entities of any of the specified kinds | Below is the the instruction that describes the task:
### Input:
Find all entities that have super_entities of any of the specified kinds
### Response:
def is_sub_to_any_kind(self, *super_entity_kinds):
"""
Find all entities that have super_entities of any of the specified kinds
"""
if super_entity_kinds:
# get the pks of the desired subs from the relationships table
if len(super_entity_kinds) == 1:
entity_pks = EntityRelationship.objects.filter(
super_entity__entity_kind=super_entity_kinds[0]
).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True)
else:
entity_pks = EntityRelationship.objects.filter(
super_entity__entity_kind__in=super_entity_kinds
).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True)
# return a queryset limited to only those pks
return self.filter(pk__in=entity_pks)
else:
return self |
def percentile_doy(arr, window=5, per=.1):
"""Percentile value for each day of the year
Return the climatological percentile over a moving window around each day of the year.
Parameters
----------
arr : xarray.DataArray
Input data.
window : int
Number of days around each day of the year to include in the calculation.
per : float
Percentile between [0,1]
Returns
-------
xarray.DataArray
The percentiles indexed by the day of the year.
"""
# TODO: Support percentile array, store percentile in coordinates.
# This is supported by DataArray.quantile, but not by groupby.reduce.
rr = arr.rolling(min_periods=1, center=True, time=window).construct('window')
# Create empty percentile array
g = rr.groupby('time.dayofyear')
p = g.reduce(np.nanpercentile, dim=('time', 'window'), q=per * 100)
# The percentile for the 366th day has a sample size of 1/4 of the other days.
# To have the same sample size, we interpolate the percentile from 1-365 doy range to 1-366
if p.dayofyear.max() == 366:
p = adjust_doy_calendar(p.loc[p.dayofyear < 366], arr)
p.attrs.update(arr.attrs.copy())
return p | Percentile value for each day of the year
Return the climatological percentile over a moving window around each day of the year.
Parameters
----------
arr : xarray.DataArray
Input data.
window : int
Number of days around each day of the year to include in the calculation.
per : float
Percentile between [0,1]
Returns
-------
xarray.DataArray
The percentiles indexed by the day of the year. | Below is the the instruction that describes the task:
### Input:
Percentile value for each day of the year
Return the climatological percentile over a moving window around each day of the year.
Parameters
----------
arr : xarray.DataArray
Input data.
window : int
Number of days around each day of the year to include in the calculation.
per : float
Percentile between [0,1]
Returns
-------
xarray.DataArray
The percentiles indexed by the day of the year.
### Response:
def percentile_doy(arr, window=5, per=.1):
"""Percentile value for each day of the year
Return the climatological percentile over a moving window around each day of the year.
Parameters
----------
arr : xarray.DataArray
Input data.
window : int
Number of days around each day of the year to include in the calculation.
per : float
Percentile between [0,1]
Returns
-------
xarray.DataArray
The percentiles indexed by the day of the year.
"""
# TODO: Support percentile array, store percentile in coordinates.
# This is supported by DataArray.quantile, but not by groupby.reduce.
rr = arr.rolling(min_periods=1, center=True, time=window).construct('window')
# Create empty percentile array
g = rr.groupby('time.dayofyear')
p = g.reduce(np.nanpercentile, dim=('time', 'window'), q=per * 100)
# The percentile for the 366th day has a sample size of 1/4 of the other days.
# To have the same sample size, we interpolate the percentile from 1-365 doy range to 1-366
if p.dayofyear.max() == 366:
p = adjust_doy_calendar(p.loc[p.dayofyear < 366], arr)
p.attrs.update(arr.attrs.copy())
return p |
def _getFieldsInDB(self, tablename):
"""get all the fields from a specific table"""
SQL = 'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.Columns where TABLE_NAME="%s"' % tablename
array_data = self.execQuery(SQL)
return [x[0] for x in array_data] | get all the fields from a specific table | Below is the the instruction that describes the task:
### Input:
get all the fields from a specific table
### Response:
def _getFieldsInDB(self, tablename):
"""get all the fields from a specific table"""
SQL = 'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.Columns where TABLE_NAME="%s"' % tablename
array_data = self.execQuery(SQL)
return [x[0] for x in array_data] |
def main():
"""Script entrypoint."""
# Parse the arguments
parser = argparse.ArgumentParser(
description='Convert MSBuild XML to JSON format')
parser.add_argument(
'-t', '--toolchain', help='The name of the toolchain', required=True)
parser.add_argument(
'-o', '--output', help='The output directory', default='')
parser.add_argument(
'-r',
'--overwrite',
help='Whether previously output should be overwritten',
dest='overwrite',
action='store_true')
parser.set_defaults(overwrite=False)
parser.add_argument(
'-d',
'--debug',
help="Debug tool output",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING)
parser.add_argument(
'-v',
'--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO)
parser.add_argument('input', help='The input files', nargs='+')
args = parser.parse_args()
toolchain = args.toolchain
logging.basicConfig(level=args.loglevel)
logging.info('Creating %s toolchain files', toolchain)
values = {}
# Iterate through the inputs
for input in args.input:
input = __get_path(input)
read_msbuild_xml(input, values)
# Determine if the output directory needs to be created
output_dir = __get_path(args.output)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
logging.info('Created output directory %s', output_dir)
for key, value in values.items():
output_path = __output_path(toolchain, key, output_dir)
if os.path.exists(output_path) and not args.overwrite:
logging.info('Comparing previous output to current')
__merge_json_values(value, read_msbuild_json(output_path))
else:
logging.info('Original output will be overwritten')
logging.info('Writing MS Build JSON file at %s', output_path)
__write_json_file(output_path, value) | Script entrypoint. | Below is the the instruction that describes the task:
### Input:
Script entrypoint.
### Response:
def main():
"""Script entrypoint."""
# Parse the arguments
parser = argparse.ArgumentParser(
description='Convert MSBuild XML to JSON format')
parser.add_argument(
'-t', '--toolchain', help='The name of the toolchain', required=True)
parser.add_argument(
'-o', '--output', help='The output directory', default='')
parser.add_argument(
'-r',
'--overwrite',
help='Whether previously output should be overwritten',
dest='overwrite',
action='store_true')
parser.set_defaults(overwrite=False)
parser.add_argument(
'-d',
'--debug',
help="Debug tool output",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING)
parser.add_argument(
'-v',
'--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO)
parser.add_argument('input', help='The input files', nargs='+')
args = parser.parse_args()
toolchain = args.toolchain
logging.basicConfig(level=args.loglevel)
logging.info('Creating %s toolchain files', toolchain)
values = {}
# Iterate through the inputs
for input in args.input:
input = __get_path(input)
read_msbuild_xml(input, values)
# Determine if the output directory needs to be created
output_dir = __get_path(args.output)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
logging.info('Created output directory %s', output_dir)
for key, value in values.items():
output_path = __output_path(toolchain, key, output_dir)
if os.path.exists(output_path) and not args.overwrite:
logging.info('Comparing previous output to current')
__merge_json_values(value, read_msbuild_json(output_path))
else:
logging.info('Original output will be overwritten')
logging.info('Writing MS Build JSON file at %s', output_path)
__write_json_file(output_path, value) |
def make_slack_blueprint(
client_id=None,
client_secret=None,
scope=None,
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
session_class=None,
storage=None,
):
"""
Make a blueprint for authenticating with Slack using OAuth 2. This requires
a client ID and client secret from Slack. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`SLACK_OAUTH_CLIENT_ID` and
:envvar:`SLACK_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your application on Slack.
client_secret (str): The client secret for your application on Slack
scope (str, optional): comma-separated list of scopes for the OAuth token
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/slack``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/slack/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
"""
scope = scope or ["identify", "chat:write:bot"]
slack_bp = SlackBlueprint(
"slack",
__name__,
client_id=client_id,
client_secret=client_secret,
scope=scope,
base_url="https://slack.com/api/",
authorization_url="https://slack.com/oauth/authorize",
token_url="https://slack.com/api/oauth.access",
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
authorized_url=authorized_url,
session_class=session_class,
storage=storage,
)
slack_bp.from_config["client_id"] = "SLACK_OAUTH_CLIENT_ID"
slack_bp.from_config["client_secret"] = "SLACK_OAUTH_CLIENT_SECRET"
@slack_bp.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.slack_oauth = slack_bp.session
return slack_bp | Make a blueprint for authenticating with Slack using OAuth 2. This requires
a client ID and client secret from Slack. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`SLACK_OAUTH_CLIENT_ID` and
:envvar:`SLACK_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your application on Slack.
client_secret (str): The client secret for your application on Slack
scope (str, optional): comma-separated list of scopes for the OAuth token
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/slack``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/slack/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app. | Below is the the instruction that describes the task:
### Input:
Make a blueprint for authenticating with Slack using OAuth 2. This requires
a client ID and client secret from Slack. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`SLACK_OAUTH_CLIENT_ID` and
:envvar:`SLACK_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your application on Slack.
client_secret (str): The client secret for your application on Slack
scope (str, optional): comma-separated list of scopes for the OAuth token
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/slack``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/slack/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
### Response:
def make_slack_blueprint(
client_id=None,
client_secret=None,
scope=None,
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
session_class=None,
storage=None,
):
"""
Make a blueprint for authenticating with Slack using OAuth 2. This requires
a client ID and client secret from Slack. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`SLACK_OAUTH_CLIENT_ID` and
:envvar:`SLACK_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your application on Slack.
client_secret (str): The client secret for your application on Slack
scope (str, optional): comma-separated list of scopes for the OAuth token
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/slack``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/slack/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
"""
scope = scope or ["identify", "chat:write:bot"]
slack_bp = SlackBlueprint(
"slack",
__name__,
client_id=client_id,
client_secret=client_secret,
scope=scope,
base_url="https://slack.com/api/",
authorization_url="https://slack.com/oauth/authorize",
token_url="https://slack.com/api/oauth.access",
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
authorized_url=authorized_url,
session_class=session_class,
storage=storage,
)
slack_bp.from_config["client_id"] = "SLACK_OAUTH_CLIENT_ID"
slack_bp.from_config["client_secret"] = "SLACK_OAUTH_CLIENT_SECRET"
@slack_bp.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.slack_oauth = slack_bp.session
return slack_bp |
def value_type(type_):
"""returns reference to `boost::shared_ptr` \
or `std::shared_ptr` value type"""
if not smart_pointer_traits.is_smart_pointer(type_):
raise TypeError(
'Type "%s" is not an instantiation of \
boost::shared_ptr or std::shared_ptr' %
type_.decl_string)
try:
return internal_type_traits.get_by_name(type_, "element_type")
except runtime_errors.declaration_not_found_t:
return _search_in_bases(type_) | returns reference to `boost::shared_ptr` \
or `std::shared_ptr` value type | Below is the the instruction that describes the task:
### Input:
returns reference to `boost::shared_ptr` \
or `std::shared_ptr` value type
### Response:
def value_type(type_):
"""returns reference to `boost::shared_ptr` \
or `std::shared_ptr` value type"""
if not smart_pointer_traits.is_smart_pointer(type_):
raise TypeError(
'Type "%s" is not an instantiation of \
boost::shared_ptr or std::shared_ptr' %
type_.decl_string)
try:
return internal_type_traits.get_by_name(type_, "element_type")
except runtime_errors.declaration_not_found_t:
return _search_in_bases(type_) |
def refresh(self, nice_repr=True, **kwargs):
"""
:param nice_repr: Append the repr of a list containing the items that
have been fetched to this point by the fetcher.
:type nice_repr: bool
:param kwargs: kwargs that should be passed to the fetcher when its
fetch method is called. These are merged with the values
provided to the constructor, with the ones provided here
taking precedence if there is a conflict.
"""
for key, value in self._kwargs.items():
kwargs.setdefault(key, value)
# No real good reason to hold on to this. DONT TOUCH.
self._original_iterable = self._fetcher.fetch(**kwargs)
self.exhausted = False
if nice_repr:
self._accumulated = []
self._original_iterable = self._make_nice_repr_iterator(
self._original_iterable, self._accumulated
)
else:
self._accumulated = None
self._clonable, = itertools.tee(self._original_iterable, 1)
return self | :param nice_repr: Append the repr of a list containing the items that
have been fetched to this point by the fetcher.
:type nice_repr: bool
:param kwargs: kwargs that should be passed to the fetcher when its
fetch method is called. These are merged with the values
provided to the constructor, with the ones provided here
taking precedence if there is a conflict. | Below is the the instruction that describes the task:
### Input:
:param nice_repr: Append the repr of a list containing the items that
have been fetched to this point by the fetcher.
:type nice_repr: bool
:param kwargs: kwargs that should be passed to the fetcher when its
fetch method is called. These are merged with the values
provided to the constructor, with the ones provided here
taking precedence if there is a conflict.
### Response:
def refresh(self, nice_repr=True, **kwargs):
"""
:param nice_repr: Append the repr of a list containing the items that
have been fetched to this point by the fetcher.
:type nice_repr: bool
:param kwargs: kwargs that should be passed to the fetcher when its
fetch method is called. These are merged with the values
provided to the constructor, with the ones provided here
taking precedence if there is a conflict.
"""
for key, value in self._kwargs.items():
kwargs.setdefault(key, value)
# No real good reason to hold on to this. DONT TOUCH.
self._original_iterable = self._fetcher.fetch(**kwargs)
self.exhausted = False
if nice_repr:
self._accumulated = []
self._original_iterable = self._make_nice_repr_iterator(
self._original_iterable, self._accumulated
)
else:
self._accumulated = None
self._clonable, = itertools.tee(self._original_iterable, 1)
return self |
def transform_audio(self, y):
'''Compute the CQT with unwrapped phase
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
CQT magnitude
data['dphase'] : np.ndarray, shape=(n_frames, n_bins)
Unwrapped phase differential
'''
data = super(CQTPhaseDiff, self).transform_audio(y)
data['dphase'] = self.phase_diff(data.pop('phase'))
return data | Compute the CQT with unwrapped phase
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
CQT magnitude
data['dphase'] : np.ndarray, shape=(n_frames, n_bins)
Unwrapped phase differential | Below is the the instruction that describes the task:
### Input:
Compute the CQT with unwrapped phase
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
CQT magnitude
data['dphase'] : np.ndarray, shape=(n_frames, n_bins)
Unwrapped phase differential
### Response:
def transform_audio(self, y):
'''Compute the CQT with unwrapped phase
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
CQT magnitude
data['dphase'] : np.ndarray, shape=(n_frames, n_bins)
Unwrapped phase differential
'''
data = super(CQTPhaseDiff, self).transform_audio(y)
data['dphase'] = self.phase_diff(data.pop('phase'))
return data |
def col_counts(col, weights=None, gap_chars='-.'):
"""Absolute counts of each residue type in a single column."""
cnt = defaultdict(float)
for aa, wt in zip(col, weights):
if aa not in gap_chars:
cnt[aa] += wt
return cnt | Absolute counts of each residue type in a single column. | Below is the the instruction that describes the task:
### Input:
Absolute counts of each residue type in a single column.
### Response:
def col_counts(col, weights=None, gap_chars='-.'):
"""Absolute counts of each residue type in a single column."""
cnt = defaultdict(float)
for aa, wt in zip(col, weights):
if aa not in gap_chars:
cnt[aa] += wt
return cnt |
def audio_open(path, backends=None):
"""Open an audio file using a library that is available on this
system.
The optional `backends` parameter can be a list of audio file
classes to try opening the file with. If it is not provided,
`audio_open` tries all available backends. If you call this function
many times, you can avoid the cost of checking for available
backends every time by calling `available_backends` once and passing
the result to each `audio_open` call.
If all backends fail to read the file, a NoBackendError exception is
raised.
"""
if backends is None:
backends = available_backends()
for BackendClass in backends:
try:
return BackendClass(path)
except DecodeError:
pass
# All backends failed!
raise NoBackendError() | Open an audio file using a library that is available on this
system.
The optional `backends` parameter can be a list of audio file
classes to try opening the file with. If it is not provided,
`audio_open` tries all available backends. If you call this function
many times, you can avoid the cost of checking for available
backends every time by calling `available_backends` once and passing
the result to each `audio_open` call.
If all backends fail to read the file, a NoBackendError exception is
raised. | Below is the the instruction that describes the task:
### Input:
Open an audio file using a library that is available on this
system.
The optional `backends` parameter can be a list of audio file
classes to try opening the file with. If it is not provided,
`audio_open` tries all available backends. If you call this function
many times, you can avoid the cost of checking for available
backends every time by calling `available_backends` once and passing
the result to each `audio_open` call.
If all backends fail to read the file, a NoBackendError exception is
raised.
### Response:
def audio_open(path, backends=None):
"""Open an audio file using a library that is available on this
system.
The optional `backends` parameter can be a list of audio file
classes to try opening the file with. If it is not provided,
`audio_open` tries all available backends. If you call this function
many times, you can avoid the cost of checking for available
backends every time by calling `available_backends` once and passing
the result to each `audio_open` call.
If all backends fail to read the file, a NoBackendError exception is
raised.
"""
if backends is None:
backends = available_backends()
for BackendClass in backends:
try:
return BackendClass(path)
except DecodeError:
pass
# All backends failed!
raise NoBackendError() |
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid | check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError: | Below is the the instruction that describes the task:
### Input:
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
### Response:
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid |
def update_position(self):
'''update position text'''
state = self.state
pos = self.mouse_pos
newtext = ''
alt = 0
if pos is not None:
(lat,lon) = self.coordinates(pos.x, pos.y)
newtext += 'Cursor: %f %f (%s)' % (lat, lon, mp_util.latlon_to_grid((lat, lon)))
if state.elevation:
alt = self.ElevationMap.GetElevation(lat, lon)
if alt is not None:
newtext += ' %.1fm' % alt
state.mt.set_download(state.download)
pending = 0
if state.download:
pending = state.mt.tiles_pending()
if pending:
newtext += ' Map Downloading %u ' % pending
if alt == -1:
newtext += ' SRTM Downloading '
newtext += '\n'
if self.click_pos is not None:
newtext += 'Click: %f %f (%s %s) (%s)' % (self.click_pos[0], self.click_pos[1],
mp_util.degrees_to_dms(self.click_pos[0]),
mp_util.degrees_to_dms(self.click_pos[1]),
mp_util.latlon_to_grid(self.click_pos))
if self.last_click_pos is not None:
distance = mp_util.gps_distance(self.last_click_pos[0], self.last_click_pos[1],
self.click_pos[0], self.click_pos[1])
bearing = mp_util.gps_bearing(self.last_click_pos[0], self.last_click_pos[1],
self.click_pos[0], self.click_pos[1])
newtext += ' Distance: %.1fm Bearing %.1f' % (distance, bearing)
if newtext != state.oldtext:
self.position.Clear()
self.position.WriteText(newtext)
state.oldtext = newtext | update position text | Below is the the instruction that describes the task:
### Input:
update position text
### Response:
def update_position(self):
'''update position text'''
state = self.state
pos = self.mouse_pos
newtext = ''
alt = 0
if pos is not None:
(lat,lon) = self.coordinates(pos.x, pos.y)
newtext += 'Cursor: %f %f (%s)' % (lat, lon, mp_util.latlon_to_grid((lat, lon)))
if state.elevation:
alt = self.ElevationMap.GetElevation(lat, lon)
if alt is not None:
newtext += ' %.1fm' % alt
state.mt.set_download(state.download)
pending = 0
if state.download:
pending = state.mt.tiles_pending()
if pending:
newtext += ' Map Downloading %u ' % pending
if alt == -1:
newtext += ' SRTM Downloading '
newtext += '\n'
if self.click_pos is not None:
newtext += 'Click: %f %f (%s %s) (%s)' % (self.click_pos[0], self.click_pos[1],
mp_util.degrees_to_dms(self.click_pos[0]),
mp_util.degrees_to_dms(self.click_pos[1]),
mp_util.latlon_to_grid(self.click_pos))
if self.last_click_pos is not None:
distance = mp_util.gps_distance(self.last_click_pos[0], self.last_click_pos[1],
self.click_pos[0], self.click_pos[1])
bearing = mp_util.gps_bearing(self.last_click_pos[0], self.last_click_pos[1],
self.click_pos[0], self.click_pos[1])
newtext += ' Distance: %.1fm Bearing %.1f' % (distance, bearing)
if newtext != state.oldtext:
self.position.Clear()
self.position.WriteText(newtext)
state.oldtext = newtext |
def _sliced_shape(shape, keys):
"""
Returns the shape that results from slicing an array of the given
shape by the given keys.
>>> _sliced_shape(shape=(52350, 70, 90, 180),
... keys=(np.newaxis, slice(None, 10), 3,
... slice(None), slice(2, 3)))
(1, 10, 90, 1)
"""
keys = _full_keys(keys, len(shape))
sliced_shape = []
shape_dim = -1
for key in keys:
shape_dim += 1
if _is_scalar(key):
continue
elif isinstance(key, slice):
size = len(range(*key.indices(shape[shape_dim])))
sliced_shape.append(size)
elif isinstance(key, np.ndarray) and key.dtype == np.dtype('bool'):
# Numpy boolean indexing.
sliced_shape.append(builtins.sum(key))
elif isinstance(key, (tuple, np.ndarray)):
sliced_shape.append(len(key))
elif key is np.newaxis:
shape_dim -= 1
sliced_shape.append(1)
else:
raise ValueError('Invalid indexing object "{}"'.format(key))
sliced_shape = tuple(sliced_shape)
return sliced_shape | Returns the shape that results from slicing an array of the given
shape by the given keys.
>>> _sliced_shape(shape=(52350, 70, 90, 180),
... keys=(np.newaxis, slice(None, 10), 3,
... slice(None), slice(2, 3)))
(1, 10, 90, 1) | Below is the the instruction that describes the task:
### Input:
Returns the shape that results from slicing an array of the given
shape by the given keys.
>>> _sliced_shape(shape=(52350, 70, 90, 180),
... keys=(np.newaxis, slice(None, 10), 3,
... slice(None), slice(2, 3)))
(1, 10, 90, 1)
### Response:
def _sliced_shape(shape, keys):
"""
Returns the shape that results from slicing an array of the given
shape by the given keys.
>>> _sliced_shape(shape=(52350, 70, 90, 180),
... keys=(np.newaxis, slice(None, 10), 3,
... slice(None), slice(2, 3)))
(1, 10, 90, 1)
"""
keys = _full_keys(keys, len(shape))
sliced_shape = []
shape_dim = -1
for key in keys:
shape_dim += 1
if _is_scalar(key):
continue
elif isinstance(key, slice):
size = len(range(*key.indices(shape[shape_dim])))
sliced_shape.append(size)
elif isinstance(key, np.ndarray) and key.dtype == np.dtype('bool'):
# Numpy boolean indexing.
sliced_shape.append(builtins.sum(key))
elif isinstance(key, (tuple, np.ndarray)):
sliced_shape.append(len(key))
elif key is np.newaxis:
shape_dim -= 1
sliced_shape.append(1)
else:
raise ValueError('Invalid indexing object "{}"'.format(key))
sliced_shape = tuple(sliced_shape)
return sliced_shape |
def _tab_pressed(self):
""" Called when the tab key is pressed. Returns whether to continue
processing the event.
"""
# Perform tab completion if:
# 1) The cursor is in the input buffer.
# 2) There is a non-whitespace character before the cursor.
text = self._get_input_buffer_cursor_line()
if text is None:
return False
complete = bool(text[:self._get_input_buffer_cursor_column()].strip())
if complete:
self._complete()
return not complete | Called when the tab key is pressed. Returns whether to continue
processing the event. | Below is the the instruction that describes the task:
### Input:
Called when the tab key is pressed. Returns whether to continue
processing the event.
### Response:
def _tab_pressed(self):
""" Called when the tab key is pressed. Returns whether to continue
processing the event.
"""
# Perform tab completion if:
# 1) The cursor is in the input buffer.
# 2) There is a non-whitespace character before the cursor.
text = self._get_input_buffer_cursor_line()
if text is None:
return False
complete = bool(text[:self._get_input_buffer_cursor_column()].strip())
if complete:
self._complete()
return not complete |
def coordInImage(x_coord, y_coord, numPix, deltapix):
"""
checks whether image positions are within the pixel image in units of arcsec
if not: remove it
:param imcoord: image coordinate (in units of angels) [[x,y,delta,magnification][...]]
:type imcoord: (n,4) numpy array
:returns: image positions within the pixel image
"""
idex=[]
min = -deltapix*numPix/2
max = deltapix*numPix/2
for i in range(len(x_coord)): #sum over image positions
if (x_coord[i] < min or x_coord[i] > max or y_coord[i] < min or y_coord[i] > max):
idex.append(i)
x_coord = np.delete(x_coord, idex, axis=0)
y_coord = np.delete(y_coord, idex, axis=0)
return x_coord, y_coord | checks whether image positions are within the pixel image in units of arcsec
if not: remove it
:param imcoord: image coordinate (in units of angels) [[x,y,delta,magnification][...]]
:type imcoord: (n,4) numpy array
:returns: image positions within the pixel image | Below is the the instruction that describes the task:
### Input:
checks whether image positions are within the pixel image in units of arcsec
if not: remove it
:param imcoord: image coordinate (in units of angels) [[x,y,delta,magnification][...]]
:type imcoord: (n,4) numpy array
:returns: image positions within the pixel image
### Response:
def coordInImage(x_coord, y_coord, numPix, deltapix):
"""
checks whether image positions are within the pixel image in units of arcsec
if not: remove it
:param imcoord: image coordinate (in units of angels) [[x,y,delta,magnification][...]]
:type imcoord: (n,4) numpy array
:returns: image positions within the pixel image
"""
idex=[]
min = -deltapix*numPix/2
max = deltapix*numPix/2
for i in range(len(x_coord)): #sum over image positions
if (x_coord[i] < min or x_coord[i] > max or y_coord[i] < min or y_coord[i] > max):
idex.append(i)
x_coord = np.delete(x_coord, idex, axis=0)
y_coord = np.delete(y_coord, idex, axis=0)
return x_coord, y_coord |
def _update_dicts(name_scope,
model_layer,
input_to_in_layer,
model_name_to_output,
prev_node_name):
"""Updates input_to_in_layer, model_name_to_output, and prev_node_name
based on the model_layer.
Args:
name_scope: a string representing a scope name, similar to that of tf.name_scope.
model_layer: a dict representing a Keras model configuration.
input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.
model_name_to_output: a dict mapping Keras Model name to output layer of the model.
prev_node_name: a string representing a previous, in sequential model layout,
node name.
Returns:
A tuple of (input_to_in_layer, model_name_to_output, prev_node_name).
input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.
model_name_to_output: a dict mapping Keras Model name to output layer of the model.
prev_node_name: a string representing a previous, in sequential model layout,
node name.
"""
layer_config = model_layer.get('config')
if not layer_config.get('layers'):
raise ValueError('layer is not a model.')
node_name = _scoped_name(name_scope, layer_config.get('name'))
input_layers = layer_config.get('input_layers')
output_layers = layer_config.get('output_layers')
inbound_nodes = model_layer.get('inbound_nodes')
is_functional_model = bool(input_layers and output_layers)
# In case of [1] and the parent model is functional, current layer
# will have the 'inbound_nodes' property.
is_parent_functional_model = bool(inbound_nodes)
if is_parent_functional_model and is_functional_model:
for (input_layer, inbound_node) in zip(input_layers, inbound_nodes):
input_layer_name = _scoped_name(node_name, input_layer)
inbound_node_name = _scoped_name(name_scope, inbound_node[0])
input_to_in_layer[input_layer_name] = inbound_node_name
elif is_parent_functional_model and not is_functional_model:
# Sequential model can take only one input. Make sure inbound to the
# model is linked to the first layer in the Sequential model.
prev_node_name = _scoped_name(name_scope, inbound_nodes[0][0][0])
elif not is_parent_functional_model and prev_node_name and is_functional_model:
assert len(input_layers) == 1, (
'Cannot have multi-input Functional model when parent model '
'is not Functional. Number of input layers: %d' % len(input_layer))
input_layer = input_layers[0]
input_layer_name = _scoped_name(node_name, input_layer)
input_to_in_layer[input_layer_name] = prev_node_name
if is_functional_model and output_layers:
layers = _norm_to_list_of_layers(output_layers)
layer_names = [_scoped_name(node_name, layer[0]) for layer in layers]
model_name_to_output[node_name] = layer_names
else:
last_layer = layer_config.get('layers')[-1]
last_layer_name = last_layer.get('config').get('name')
output_node = _scoped_name(node_name, last_layer_name)
model_name_to_output[node_name] = [output_node]
return (input_to_in_layer, model_name_to_output, prev_node_name) | Updates input_to_in_layer, model_name_to_output, and prev_node_name
based on the model_layer.
Args:
name_scope: a string representing a scope name, similar to that of tf.name_scope.
model_layer: a dict representing a Keras model configuration.
input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.
model_name_to_output: a dict mapping Keras Model name to output layer of the model.
prev_node_name: a string representing a previous, in sequential model layout,
node name.
Returns:
A tuple of (input_to_in_layer, model_name_to_output, prev_node_name).
input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.
model_name_to_output: a dict mapping Keras Model name to output layer of the model.
prev_node_name: a string representing a previous, in sequential model layout,
node name. | Below is the the instruction that describes the task:
### Input:
Updates input_to_in_layer, model_name_to_output, and prev_node_name
based on the model_layer.
Args:
name_scope: a string representing a scope name, similar to that of tf.name_scope.
model_layer: a dict representing a Keras model configuration.
input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.
model_name_to_output: a dict mapping Keras Model name to output layer of the model.
prev_node_name: a string representing a previous, in sequential model layout,
node name.
Returns:
A tuple of (input_to_in_layer, model_name_to_output, prev_node_name).
input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.
model_name_to_output: a dict mapping Keras Model name to output layer of the model.
prev_node_name: a string representing a previous, in sequential model layout,
node name.
### Response:
def _update_dicts(name_scope,
model_layer,
input_to_in_layer,
model_name_to_output,
prev_node_name):
"""Updates input_to_in_layer, model_name_to_output, and prev_node_name
based on the model_layer.
Args:
name_scope: a string representing a scope name, similar to that of tf.name_scope.
model_layer: a dict representing a Keras model configuration.
input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.
model_name_to_output: a dict mapping Keras Model name to output layer of the model.
prev_node_name: a string representing a previous, in sequential model layout,
node name.
Returns:
A tuple of (input_to_in_layer, model_name_to_output, prev_node_name).
input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.
model_name_to_output: a dict mapping Keras Model name to output layer of the model.
prev_node_name: a string representing a previous, in sequential model layout,
node name.
"""
layer_config = model_layer.get('config')
if not layer_config.get('layers'):
raise ValueError('layer is not a model.')
node_name = _scoped_name(name_scope, layer_config.get('name'))
input_layers = layer_config.get('input_layers')
output_layers = layer_config.get('output_layers')
inbound_nodes = model_layer.get('inbound_nodes')
is_functional_model = bool(input_layers and output_layers)
# In case of [1] and the parent model is functional, current layer
# will have the 'inbound_nodes' property.
is_parent_functional_model = bool(inbound_nodes)
if is_parent_functional_model and is_functional_model:
for (input_layer, inbound_node) in zip(input_layers, inbound_nodes):
input_layer_name = _scoped_name(node_name, input_layer)
inbound_node_name = _scoped_name(name_scope, inbound_node[0])
input_to_in_layer[input_layer_name] = inbound_node_name
elif is_parent_functional_model and not is_functional_model:
# Sequential model can take only one input. Make sure inbound to the
# model is linked to the first layer in the Sequential model.
prev_node_name = _scoped_name(name_scope, inbound_nodes[0][0][0])
elif not is_parent_functional_model and prev_node_name and is_functional_model:
assert len(input_layers) == 1, (
'Cannot have multi-input Functional model when parent model '
'is not Functional. Number of input layers: %d' % len(input_layer))
input_layer = input_layers[0]
input_layer_name = _scoped_name(node_name, input_layer)
input_to_in_layer[input_layer_name] = prev_node_name
if is_functional_model and output_layers:
layers = _norm_to_list_of_layers(output_layers)
layer_names = [_scoped_name(node_name, layer[0]) for layer in layers]
model_name_to_output[node_name] = layer_names
else:
last_layer = layer_config.get('layers')[-1]
last_layer_name = last_layer.get('config').get('name')
output_node = _scoped_name(node_name, last_layer_name)
model_name_to_output[node_name] = [output_node]
return (input_to_in_layer, model_name_to_output, prev_node_name) |
def update_resources(self, data, type_, names=None, languages=None):
"""
Update or add resource data.
type_ = resource type to update
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all)
"""
UpdateResources(self.filename, data, type_, names, languages) | Update or add resource data.
type_ = resource type to update
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all) | Below is the the instruction that describes the task:
### Input:
Update or add resource data.
type_ = resource type to update
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all)
### Response:
def update_resources(self, data, type_, names=None, languages=None):
"""
Update or add resource data.
type_ = resource type to update
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all)
"""
UpdateResources(self.filename, data, type_, names, languages) |
def wait_for_model_package(self, model_package_name, poll=5):
"""Wait for an Amazon SageMaker endpoint deployment to complete.
Args:
endpoint (str): Name of the ``Endpoint`` to wait for.
poll (int): Polling interval in seconds (default: 5).
Returns:
dict: Return value from the ``DescribeEndpoint`` API.
"""
desc = _wait_until(lambda: _create_model_package_status(self.sagemaker_client, model_package_name),
poll)
status = desc['ModelPackageStatus']
if status != 'Completed':
reason = desc.get('FailureReason', None)
raise ValueError('Error creating model package {}: {} Reason: {}'.format(
model_package_name, status, reason))
return desc | Wait for an Amazon SageMaker endpoint deployment to complete.
Args:
endpoint (str): Name of the ``Endpoint`` to wait for.
poll (int): Polling interval in seconds (default: 5).
Returns:
dict: Return value from the ``DescribeEndpoint`` API. | Below is the the instruction that describes the task:
### Input:
Wait for an Amazon SageMaker endpoint deployment to complete.
Args:
endpoint (str): Name of the ``Endpoint`` to wait for.
poll (int): Polling interval in seconds (default: 5).
Returns:
dict: Return value from the ``DescribeEndpoint`` API.
### Response:
def wait_for_model_package(self, model_package_name, poll=5):
"""Wait for an Amazon SageMaker endpoint deployment to complete.
Args:
endpoint (str): Name of the ``Endpoint`` to wait for.
poll (int): Polling interval in seconds (default: 5).
Returns:
dict: Return value from the ``DescribeEndpoint`` API.
"""
desc = _wait_until(lambda: _create_model_package_status(self.sagemaker_client, model_package_name),
poll)
status = desc['ModelPackageStatus']
if status != 'Completed':
reason = desc.get('FailureReason', None)
raise ValueError('Error creating model package {}: {} Reason: {}'.format(
model_package_name, status, reason))
return desc |
def escape_any(value):
"""
Section 4.1.2 defines SPARQL shortened forms
https://www.w3.org/TR/2013/REC-sparql11-query-20130321/#QSynLiterals
Examples of literal syntax in SPARQL include:
"chat"
'chat'@fr with language tag "fr"
"xyz"^^<http://example.org/ns/userDatatype>
"abc"^^appNS:appDataType
'''The librarian said, "Perhaps you would enjoy 'War and Peace'."'''
1, which is the same as "1"^^xsd:integer
1.3, which is the same as "1.3"^^xsd:decimal
1.300, which is the same as "1.300"^^xsd:decimal
1.0e6, which is the same as "1.0e6"^^xsd:double
true, which is the same as "true"^^xsd:boolean
false, which is the same as "false"^^xsd:boolean
"""
if isinstance(value, type):
raise TypeError("object %r is not an instance" % value)
for type_, escape_method in escapers:
if isinstance(value, type_):
return escape_method(value)
return escape_string(str(value)) | Section 4.1.2 defines SPARQL shortened forms
https://www.w3.org/TR/2013/REC-sparql11-query-20130321/#QSynLiterals
Examples of literal syntax in SPARQL include:
"chat"
'chat'@fr with language tag "fr"
"xyz"^^<http://example.org/ns/userDatatype>
"abc"^^appNS:appDataType
'''The librarian said, "Perhaps you would enjoy 'War and Peace'."'''
1, which is the same as "1"^^xsd:integer
1.3, which is the same as "1.3"^^xsd:decimal
1.300, which is the same as "1.300"^^xsd:decimal
1.0e6, which is the same as "1.0e6"^^xsd:double
true, which is the same as "true"^^xsd:boolean
false, which is the same as "false"^^xsd:boolean | Below is the the instruction that describes the task:
### Input:
Section 4.1.2 defines SPARQL shortened forms
https://www.w3.org/TR/2013/REC-sparql11-query-20130321/#QSynLiterals
Examples of literal syntax in SPARQL include:
"chat"
'chat'@fr with language tag "fr"
"xyz"^^<http://example.org/ns/userDatatype>
"abc"^^appNS:appDataType
'''The librarian said, "Perhaps you would enjoy 'War and Peace'."'''
1, which is the same as "1"^^xsd:integer
1.3, which is the same as "1.3"^^xsd:decimal
1.300, which is the same as "1.300"^^xsd:decimal
1.0e6, which is the same as "1.0e6"^^xsd:double
true, which is the same as "true"^^xsd:boolean
false, which is the same as "false"^^xsd:boolean
### Response:
def escape_any(value):
"""
Section 4.1.2 defines SPARQL shortened forms
https://www.w3.org/TR/2013/REC-sparql11-query-20130321/#QSynLiterals
Examples of literal syntax in SPARQL include:
"chat"
'chat'@fr with language tag "fr"
"xyz"^^<http://example.org/ns/userDatatype>
"abc"^^appNS:appDataType
'''The librarian said, "Perhaps you would enjoy 'War and Peace'."'''
1, which is the same as "1"^^xsd:integer
1.3, which is the same as "1.3"^^xsd:decimal
1.300, which is the same as "1.300"^^xsd:decimal
1.0e6, which is the same as "1.0e6"^^xsd:double
true, which is the same as "true"^^xsd:boolean
false, which is the same as "false"^^xsd:boolean
"""
if isinstance(value, type):
raise TypeError("object %r is not an instance" % value)
for type_, escape_method in escapers:
if isinstance(value, type_):
return escape_method(value)
return escape_string(str(value)) |
def _clean_path(path):
"""Create a fully fissile absolute system path with no symbolic links and environment variables"""
path = path.replace('"', '')
path = path.replace("'", '')
# Replace ~ with /home/user
path = os.path.expanduser(path)
# Replace environment variables
path = os.path.expandvars(path)
# If the path is relative, assume it is relative to the config file directory
if not os.path.isabs(path):
path = os.path.join(config.global_config.path, path)
# Clean path, e.g. replace /./ with /
path = os.path.abspath(path)
# Eliminate symbolic links
path = os.path.realpath(path)
return path | Create a fully fissile absolute system path with no symbolic links and environment variables | Below is the the instruction that describes the task:
### Input:
Create a fully fissile absolute system path with no symbolic links and environment variables
### Response:
def _clean_path(path):
"""Create a fully fissile absolute system path with no symbolic links and environment variables"""
path = path.replace('"', '')
path = path.replace("'", '')
# Replace ~ with /home/user
path = os.path.expanduser(path)
# Replace environment variables
path = os.path.expandvars(path)
# If the path is relative, assume it is relative to the config file directory
if not os.path.isabs(path):
path = os.path.join(config.global_config.path, path)
# Clean path, e.g. replace /./ with /
path = os.path.abspath(path)
# Eliminate symbolic links
path = os.path.realpath(path)
return path |
def get_simbad_astrometry_info (ident, items=_simbaditems, debug=False):
"""Fetch astrometric information from the Simbad web service.
Given the name of a source as known to the CDS Simbad service, this
function looks up its positional information and returns it in a
dictionary. In most cases you should use an :class:`AstrometryInfo` object
and its :meth:`~AstrometryInfo.fill_from_simbad` method instead of this
function.
Arguments:
ident
The Simbad name of the source to look up.
items
An iterable of data items to look up. The default fetches position,
proper motion, parallax, and radial velocity information. Each item name
resembles the string ``COO(d;A)`` or ``PLX(E)``. The allowed formats are
defined `on this CDS page
<http://simbad.u-strasbg.fr/Pages/guide/sim-fscript.htx>`_.
debug
If true, the response from the webserver will be printed.
The return value is a dictionary with a key corresponding to the textual
result returned for each requested item.
"""
import codecs
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
s = '\\n'.join ('%s %%%s' % (i, i) for i in items)
s = '''output console=off script=off
format object "%s"
query id %s''' % (s, ident)
url = _simbadbase + quote (s)
results = {}
errtext = None
for line in codecs.getreader('utf-8')(urlopen (url)):
line = line.strip ()
if debug:
print_ ('D: SA >>', line)
if errtext is not None:
errtext += line
elif line.startswith ('::error'):
errtext = ''
elif len (line):
k, v = line.split (' ', 1)
results[k] = v
if errtext is not None:
raise Exception ('SIMBAD query error: ' + errtext)
return results | Fetch astrometric information from the Simbad web service.
Given the name of a source as known to the CDS Simbad service, this
function looks up its positional information and returns it in a
dictionary. In most cases you should use an :class:`AstrometryInfo` object
and its :meth:`~AstrometryInfo.fill_from_simbad` method instead of this
function.
Arguments:
ident
The Simbad name of the source to look up.
items
An iterable of data items to look up. The default fetches position,
proper motion, parallax, and radial velocity information. Each item name
resembles the string ``COO(d;A)`` or ``PLX(E)``. The allowed formats are
defined `on this CDS page
<http://simbad.u-strasbg.fr/Pages/guide/sim-fscript.htx>`_.
debug
If true, the response from the webserver will be printed.
The return value is a dictionary with a key corresponding to the textual
result returned for each requested item. | Below is the the instruction that describes the task:
### Input:
Fetch astrometric information from the Simbad web service.
Given the name of a source as known to the CDS Simbad service, this
function looks up its positional information and returns it in a
dictionary. In most cases you should use an :class:`AstrometryInfo` object
and its :meth:`~AstrometryInfo.fill_from_simbad` method instead of this
function.
Arguments:
ident
The Simbad name of the source to look up.
items
An iterable of data items to look up. The default fetches position,
proper motion, parallax, and radial velocity information. Each item name
resembles the string ``COO(d;A)`` or ``PLX(E)``. The allowed formats are
defined `on this CDS page
<http://simbad.u-strasbg.fr/Pages/guide/sim-fscript.htx>`_.
debug
If true, the response from the webserver will be printed.
The return value is a dictionary with a key corresponding to the textual
result returned for each requested item.
### Response:
def get_simbad_astrometry_info (ident, items=_simbaditems, debug=False):
"""Fetch astrometric information from the Simbad web service.
Given the name of a source as known to the CDS Simbad service, this
function looks up its positional information and returns it in a
dictionary. In most cases you should use an :class:`AstrometryInfo` object
and its :meth:`~AstrometryInfo.fill_from_simbad` method instead of this
function.
Arguments:
ident
The Simbad name of the source to look up.
items
An iterable of data items to look up. The default fetches position,
proper motion, parallax, and radial velocity information. Each item name
resembles the string ``COO(d;A)`` or ``PLX(E)``. The allowed formats are
defined `on this CDS page
<http://simbad.u-strasbg.fr/Pages/guide/sim-fscript.htx>`_.
debug
If true, the response from the webserver will be printed.
The return value is a dictionary with a key corresponding to the textual
result returned for each requested item.
"""
import codecs
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
s = '\\n'.join ('%s %%%s' % (i, i) for i in items)
s = '''output console=off script=off
format object "%s"
query id %s''' % (s, ident)
url = _simbadbase + quote (s)
results = {}
errtext = None
for line in codecs.getreader('utf-8')(urlopen (url)):
line = line.strip ()
if debug:
print_ ('D: SA >>', line)
if errtext is not None:
errtext += line
elif line.startswith ('::error'):
errtext = ''
elif len (line):
k, v = line.split (' ', 1)
results[k] = v
if errtext is not None:
raise Exception ('SIMBAD query error: ' + errtext)
return results |
def consume(self, entrystream):
"""
Load a stream of entries into memory.
Only Feature objects and sequence-region directives are loaded, all
other entries are discarded.
"""
for entry in entrystream:
if isinstance(entry, tag.directive.Directive) and \
entry.type == 'sequence-region':
self.consume_seqreg(entry)
elif isinstance(entry, tag.feature.Feature):
self.consume_feature(entry) | Load a stream of entries into memory.
Only Feature objects and sequence-region directives are loaded, all
other entries are discarded. | Below is the the instruction that describes the task:
### Input:
Load a stream of entries into memory.
Only Feature objects and sequence-region directives are loaded, all
other entries are discarded.
### Response:
def consume(self, entrystream):
"""
Load a stream of entries into memory.
Only Feature objects and sequence-region directives are loaded, all
other entries are discarded.
"""
for entry in entrystream:
if isinstance(entry, tag.directive.Directive) and \
entry.type == 'sequence-region':
self.consume_seqreg(entry)
elif isinstance(entry, tag.feature.Feature):
self.consume_feature(entry) |
def _get_value_indices(names1, names2, lookups):
"""
>>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'],
... ['bar', 'foo'])
[1, 0]
>>> _get_value_indices(['foo', 'bar', 'baz'], ['FOO', 'bar', 'baz'],
... ['bar', 'FOO'])
[1, 0]
>>> _get_value_indices(['foo', 'bar', 'BAZ'], ['foo', 'BAZ', 'baz'],
... ['BAZ', 'foo'])
[2, 0]
>>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'],
... ['spam'])
Traceback (most recent call last):
...
KeyError: 'spam'
"""
positions = {name: idx for idx, name in enumerate(names2)}
positions.update({name: idx for idx, name in enumerate(names1)})
return [positions[name] for name in lookups] | >>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'],
... ['bar', 'foo'])
[1, 0]
>>> _get_value_indices(['foo', 'bar', 'baz'], ['FOO', 'bar', 'baz'],
... ['bar', 'FOO'])
[1, 0]
>>> _get_value_indices(['foo', 'bar', 'BAZ'], ['foo', 'BAZ', 'baz'],
... ['BAZ', 'foo'])
[2, 0]
>>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'],
... ['spam'])
Traceback (most recent call last):
...
KeyError: 'spam' | Below is the the instruction that describes the task:
### Input:
>>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'],
... ['bar', 'foo'])
[1, 0]
>>> _get_value_indices(['foo', 'bar', 'baz'], ['FOO', 'bar', 'baz'],
... ['bar', 'FOO'])
[1, 0]
>>> _get_value_indices(['foo', 'bar', 'BAZ'], ['foo', 'BAZ', 'baz'],
... ['BAZ', 'foo'])
[2, 0]
>>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'],
... ['spam'])
Traceback (most recent call last):
...
KeyError: 'spam'
### Response:
def _get_value_indices(names1, names2, lookups):
"""
>>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'],
... ['bar', 'foo'])
[1, 0]
>>> _get_value_indices(['foo', 'bar', 'baz'], ['FOO', 'bar', 'baz'],
... ['bar', 'FOO'])
[1, 0]
>>> _get_value_indices(['foo', 'bar', 'BAZ'], ['foo', 'BAZ', 'baz'],
... ['BAZ', 'foo'])
[2, 0]
>>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'],
... ['spam'])
Traceback (most recent call last):
...
KeyError: 'spam'
"""
positions = {name: idx for idx, name in enumerate(names2)}
positions.update({name: idx for idx, name in enumerate(names1)})
return [positions[name] for name in lookups] |
def get_queued():
"""
Returns a list of emails that should be sent:
- Status is queued
- Has scheduled_time lower than the current time or None
"""
return Email.objects.filter(status=STATUS.queued) \
.select_related('template') \
.filter(Q(scheduled_time__lte=now()) | Q(scheduled_time=None)) \
.order_by(*get_sending_order()).prefetch_related('attachments')[:get_batch_size()] | Returns a list of emails that should be sent:
- Status is queued
- Has scheduled_time lower than the current time or None | Below is the the instruction that describes the task:
### Input:
Returns a list of emails that should be sent:
- Status is queued
- Has scheduled_time lower than the current time or None
### Response:
def get_queued():
"""
Returns a list of emails that should be sent:
- Status is queued
- Has scheduled_time lower than the current time or None
"""
return Email.objects.filter(status=STATUS.queued) \
.select_related('template') \
.filter(Q(scheduled_time__lte=now()) | Q(scheduled_time=None)) \
.order_by(*get_sending_order()).prefetch_related('attachments')[:get_batch_size()] |
def find_vasp_calculations():
"""
Returns a list of all subdirectories that contain either a vasprun.xml file
or a compressed vasprun.xml.gz file.
Args:
None
Returns:
(List): list of all VASP calculation subdirectories.
"""
dir_list = [ './' + re.sub( r'vasprun\.xml', '', path ) for path in glob.iglob( '**/vasprun.xml', recursive=True ) ]
gz_dir_list = [ './' + re.sub( r'vasprun\.xml\.gz', '', path ) for path in glob.iglob( '**/vasprun.xml.gz', recursive=True ) ]
return dir_list + gz_dir_list | Returns a list of all subdirectories that contain either a vasprun.xml file
or a compressed vasprun.xml.gz file.
Args:
None
Returns:
(List): list of all VASP calculation subdirectories. | Below is the the instruction that describes the task:
### Input:
Returns a list of all subdirectories that contain either a vasprun.xml file
or a compressed vasprun.xml.gz file.
Args:
None
Returns:
(List): list of all VASP calculation subdirectories.
### Response:
def find_vasp_calculations():
"""
Returns a list of all subdirectories that contain either a vasprun.xml file
or a compressed vasprun.xml.gz file.
Args:
None
Returns:
(List): list of all VASP calculation subdirectories.
"""
dir_list = [ './' + re.sub( r'vasprun\.xml', '', path ) for path in glob.iglob( '**/vasprun.xml', recursive=True ) ]
gz_dir_list = [ './' + re.sub( r'vasprun\.xml\.gz', '', path ) for path in glob.iglob( '**/vasprun.xml.gz', recursive=True ) ]
return dir_list + gz_dir_list |
def parse_record(raw_record, is_training, dtype):
"""Parses a record containing a training example of an image.
The input record is parsed into a label and image, and the image is passed
through preprocessing steps (cropping, flipping, and so on).
Args:
raw_record: scalar Tensor tf.string containing a serialized
Example protocol buffer.
is_training: A boolean denoting whether the input is for training.
dtype: data type to use for images/features.
Returns:
Tuple with processed image tensor and one-hot-encoded label tensor.
"""
image_buffer, label = _parse_example_proto(raw_record)
image = imagenet_preprocessing.preprocess_image(
image_buffer=image_buffer,
output_height=_DEFAULT_IMAGE_SIZE,
output_width=_DEFAULT_IMAGE_SIZE,
num_channels=_NUM_CHANNELS,
is_training=is_training)
image = tf.cast(image, dtype)
return image, label | Parses a record containing a training example of an image.
The input record is parsed into a label and image, and the image is passed
through preprocessing steps (cropping, flipping, and so on).
Args:
raw_record: scalar Tensor tf.string containing a serialized
Example protocol buffer.
is_training: A boolean denoting whether the input is for training.
dtype: data type to use for images/features.
Returns:
Tuple with processed image tensor and one-hot-encoded label tensor. | Below is the the instruction that describes the task:
### Input:
Parses a record containing a training example of an image.
The input record is parsed into a label and image, and the image is passed
through preprocessing steps (cropping, flipping, and so on).
Args:
raw_record: scalar Tensor tf.string containing a serialized
Example protocol buffer.
is_training: A boolean denoting whether the input is for training.
dtype: data type to use for images/features.
Returns:
Tuple with processed image tensor and one-hot-encoded label tensor.
### Response:
def parse_record(raw_record, is_training, dtype):
"""Parses a record containing a training example of an image.
The input record is parsed into a label and image, and the image is passed
through preprocessing steps (cropping, flipping, and so on).
Args:
raw_record: scalar Tensor tf.string containing a serialized
Example protocol buffer.
is_training: A boolean denoting whether the input is for training.
dtype: data type to use for images/features.
Returns:
Tuple with processed image tensor and one-hot-encoded label tensor.
"""
image_buffer, label = _parse_example_proto(raw_record)
image = imagenet_preprocessing.preprocess_image(
image_buffer=image_buffer,
output_height=_DEFAULT_IMAGE_SIZE,
output_width=_DEFAULT_IMAGE_SIZE,
num_channels=_NUM_CHANNELS,
is_training=is_training)
image = tf.cast(image, dtype)
return image, label |
def crosstab(index, columns, values=None, rownames=None, colnames=None,
aggfunc=None, margins=False, margins_name='All', dropna=True,
normalize=False):
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
.. versionadded:: 0.21.0
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
.. versionadded:: 0.18.1
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
"""
index = com.maybe_make_list(index)
columns = com.maybe_make_list(columns)
rownames = _get_names(index, rownames, prefix='row')
colnames = _get_names(columns, colnames, prefix='col')
common_idx = _get_objs_combined_axis(index + columns, intersect=True,
sort=False)
data = {}
data.update(zip(rownames, index))
data.update(zip(colnames, columns))
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
from pandas import DataFrame
df = DataFrame(data, index=common_idx)
if values is None:
df['__dummy__'] = 0
kwargs = {'aggfunc': len, 'fill_value': 0}
else:
df['__dummy__'] = values
kwargs = {'aggfunc': aggfunc}
table = df.pivot_table('__dummy__', index=rownames, columns=colnames,
margins=margins, margins_name=margins_name,
dropna=dropna, **kwargs)
# Post-process
if normalize is not False:
table = _normalize(table, normalize=normalize, margins=margins,
margins_name=margins_name)
return table | Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
.. versionadded:: 0.21.0
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
.. versionadded:: 0.18.1
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0 | Below is the the instruction that describes the task:
### Input:
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
.. versionadded:: 0.21.0
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
.. versionadded:: 0.18.1
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
### Response:
def crosstab(index, columns, values=None, rownames=None, colnames=None,
aggfunc=None, margins=False, margins_name='All', dropna=True,
normalize=False):
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
.. versionadded:: 0.21.0
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
.. versionadded:: 0.18.1
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
"""
index = com.maybe_make_list(index)
columns = com.maybe_make_list(columns)
rownames = _get_names(index, rownames, prefix='row')
colnames = _get_names(columns, colnames, prefix='col')
common_idx = _get_objs_combined_axis(index + columns, intersect=True,
sort=False)
data = {}
data.update(zip(rownames, index))
data.update(zip(colnames, columns))
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
from pandas import DataFrame
df = DataFrame(data, index=common_idx)
if values is None:
df['__dummy__'] = 0
kwargs = {'aggfunc': len, 'fill_value': 0}
else:
df['__dummy__'] = values
kwargs = {'aggfunc': aggfunc}
table = df.pivot_table('__dummy__', index=rownames, columns=colnames,
margins=margins, margins_name=margins_name,
dropna=dropna, **kwargs)
# Post-process
if normalize is not False:
table = _normalize(table, normalize=normalize, margins=margins,
margins_name=margins_name)
return table |
def alias_grade_entry(self, grade_entry_id, alias_id):
"""Adds an ``Id`` to a ``GradeEntry`` for the purpose of creating compatibility.
The primary ``Id`` of the ``GradeEntry`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another grade entry, it is
reassigned to the given grade entry ``Id``.
arg: grade_entry_id (osid.id.Id): the ``Id`` of a
``GradeEntry``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``grade_entry_id`` not found
raise: NullArgument - ``grade_entry_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=grade_entry_id, equivalent_id=alias_id) | Adds an ``Id`` to a ``GradeEntry`` for the purpose of creating compatibility.
The primary ``Id`` of the ``GradeEntry`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another grade entry, it is
reassigned to the given grade entry ``Id``.
arg: grade_entry_id (osid.id.Id): the ``Id`` of a
``GradeEntry``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``grade_entry_id`` not found
raise: NullArgument - ``grade_entry_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Adds an ``Id`` to a ``GradeEntry`` for the purpose of creating compatibility.
The primary ``Id`` of the ``GradeEntry`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another grade entry, it is
reassigned to the given grade entry ``Id``.
arg: grade_entry_id (osid.id.Id): the ``Id`` of a
``GradeEntry``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``grade_entry_id`` not found
raise: NullArgument - ``grade_entry_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def alias_grade_entry(self, grade_entry_id, alias_id):
"""Adds an ``Id`` to a ``GradeEntry`` for the purpose of creating compatibility.
The primary ``Id`` of the ``GradeEntry`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another grade entry, it is
reassigned to the given grade entry ``Id``.
arg: grade_entry_id (osid.id.Id): the ``Id`` of a
``GradeEntry``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``grade_entry_id`` not found
raise: NullArgument - ``grade_entry_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=grade_entry_id, equivalent_id=alias_id) |
def parse_samblaster(self, f):
""" Go through log file looking for samblaster output.
If the
Grab the name from the RG tag of the preceding bwa command """
dups_regex = "samblaster: (Removed|Marked) (\d+) of (\d+) \((\d+.\d+)%\) read ids as duplicates"
input_file_regex = "samblaster: Opening (\S+) for read."
rgtag_name_regex = "\\\\tID:(\S*?)\\\\t"
data = {}
s_name = None
fh = f['f']
for l in fh:
# try to find name from RG-tag. If bwa mem is used upstream samblaster with pipes, then the bwa mem command
# including the read group will be written in the log
match = re.search(rgtag_name_regex, l)
if match:
s_name = self.clean_s_name( match.group(1), f['root'])
# try to find name from the input file name, if used
match = re.search(input_file_regex, l)
if match:
basefn = os.path.basename(match.group(1))
fname, ext = os.path.splitext(basefn)
# if it's stdin, then try bwa RG-tag instead
if fname != 'stdin':
s_name = self.clean_s_name( fname, f['root'])
match = re.search(dups_regex, l)
if match:
data['n_dups'] = int(match.group(2))
data['n_tot'] = int(match.group(3))
data['n_nondups'] = data['n_tot'] - data['n_dups']
data['pct_dups'] = float(match.group(4))
if s_name is None:
s_name = f['s_name']
if len(data) > 0:
if s_name in self.samblaster_data:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name))
self.add_data_source(f, s_name)
self.samblaster_data[s_name] = data | Go through log file looking for samblaster output.
If the
Grab the name from the RG tag of the preceding bwa command | Below is the the instruction that describes the task:
### Input:
Go through log file looking for samblaster output.
If the
Grab the name from the RG tag of the preceding bwa command
### Response:
def parse_samblaster(self, f):
""" Go through log file looking for samblaster output.
If the
Grab the name from the RG tag of the preceding bwa command """
dups_regex = "samblaster: (Removed|Marked) (\d+) of (\d+) \((\d+.\d+)%\) read ids as duplicates"
input_file_regex = "samblaster: Opening (\S+) for read."
rgtag_name_regex = "\\\\tID:(\S*?)\\\\t"
data = {}
s_name = None
fh = f['f']
for l in fh:
# try to find name from RG-tag. If bwa mem is used upstream samblaster with pipes, then the bwa mem command
# including the read group will be written in the log
match = re.search(rgtag_name_regex, l)
if match:
s_name = self.clean_s_name( match.group(1), f['root'])
# try to find name from the input file name, if used
match = re.search(input_file_regex, l)
if match:
basefn = os.path.basename(match.group(1))
fname, ext = os.path.splitext(basefn)
# if it's stdin, then try bwa RG-tag instead
if fname != 'stdin':
s_name = self.clean_s_name( fname, f['root'])
match = re.search(dups_regex, l)
if match:
data['n_dups'] = int(match.group(2))
data['n_tot'] = int(match.group(3))
data['n_nondups'] = data['n_tot'] - data['n_dups']
data['pct_dups'] = float(match.group(4))
if s_name is None:
s_name = f['s_name']
if len(data) > 0:
if s_name in self.samblaster_data:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name))
self.add_data_source(f, s_name)
self.samblaster_data[s_name] = data |
def _prepare_resources(self, variables, overrides=None):
"""Create and optionally open all shared resources."""
if overrides is None:
overrides = {}
res_map = {}
own_map = {}
for decl in self.resources.values():
resource = overrides.get(decl.name)
if resource is None:
args = _complete_parameters(decl.args, variables)
resource = decl.type(args)
own_map[decl.name] = resource
if decl.autocreate:
resource.open()
res_map[decl.name] = resource
return res_map, own_map | Create and optionally open all shared resources. | Below is the the instruction that describes the task:
### Input:
Create and optionally open all shared resources.
### Response:
def _prepare_resources(self, variables, overrides=None):
"""Create and optionally open all shared resources."""
if overrides is None:
overrides = {}
res_map = {}
own_map = {}
for decl in self.resources.values():
resource = overrides.get(decl.name)
if resource is None:
args = _complete_parameters(decl.args, variables)
resource = decl.type(args)
own_map[decl.name] = resource
if decl.autocreate:
resource.open()
res_map[decl.name] = resource
return res_map, own_map |
def render_child(self, child, view_name=None, context=None):
"""A shortcut to render a child block.
Use this method to render your children from your own view function.
If `view_name` is not provided, it will default to the view name you're
being rendered with.
Returns the same value as :func:`render`.
"""
return child.render(view_name or self._view_name, context) | A shortcut to render a child block.
Use this method to render your children from your own view function.
If `view_name` is not provided, it will default to the view name you're
being rendered with.
Returns the same value as :func:`render`. | Below is the the instruction that describes the task:
### Input:
A shortcut to render a child block.
Use this method to render your children from your own view function.
If `view_name` is not provided, it will default to the view name you're
being rendered with.
Returns the same value as :func:`render`.
### Response:
def render_child(self, child, view_name=None, context=None):
"""A shortcut to render a child block.
Use this method to render your children from your own view function.
If `view_name` is not provided, it will default to the view name you're
being rendered with.
Returns the same value as :func:`render`.
"""
return child.render(view_name or self._view_name, context) |
def add_public_note(self, public_note, source=None):
"""Add public note.
:param public_note: public note for the current article.
:type public_note: string
:param source: source for the given notes.
:type source: string
"""
self._append_to('public_notes', self._sourced_dict(
source,
value=public_note,
)) | Add public note.
:param public_note: public note for the current article.
:type public_note: string
:param source: source for the given notes.
:type source: string | Below is the the instruction that describes the task:
### Input:
Add public note.
:param public_note: public note for the current article.
:type public_note: string
:param source: source for the given notes.
:type source: string
### Response:
def add_public_note(self, public_note, source=None):
"""Add public note.
:param public_note: public note for the current article.
:type public_note: string
:param source: source for the given notes.
:type source: string
"""
self._append_to('public_notes', self._sourced_dict(
source,
value=public_note,
)) |
def isdisjoint(self, other):
"""
Return ``True`` if the set has no elements in common with *other*.
Sets are disjoint if and only if their intersection is the empty set.
:param other: Any kind of iterable.
:rtype: boolean
"""
def isdisjoint_trans_pure(pipe):
return not pipe.sinter(self.key, other.key)
def isdisjoint_trans_mixed(pipe):
self_values = set(self.__iter__(pipe))
if use_redis:
other_values = set(other.__iter__(pipe))
else:
other_values = set(other)
return self_values.isdisjoint(other_values)
if self._same_redis(other):
return self._transaction(isdisjoint_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(isdisjoint_trans_mixed, other.key)
use_redis = False
return self._transaction(isdisjoint_trans_mixed) | Return ``True`` if the set has no elements in common with *other*.
Sets are disjoint if and only if their intersection is the empty set.
:param other: Any kind of iterable.
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
Return ``True`` if the set has no elements in common with *other*.
Sets are disjoint if and only if their intersection is the empty set.
:param other: Any kind of iterable.
:rtype: boolean
### Response:
def isdisjoint(self, other):
"""
Return ``True`` if the set has no elements in common with *other*.
Sets are disjoint if and only if their intersection is the empty set.
:param other: Any kind of iterable.
:rtype: boolean
"""
def isdisjoint_trans_pure(pipe):
return not pipe.sinter(self.key, other.key)
def isdisjoint_trans_mixed(pipe):
self_values = set(self.__iter__(pipe))
if use_redis:
other_values = set(other.__iter__(pipe))
else:
other_values = set(other)
return self_values.isdisjoint(other_values)
if self._same_redis(other):
return self._transaction(isdisjoint_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(isdisjoint_trans_mixed, other.key)
use_redis = False
return self._transaction(isdisjoint_trans_mixed) |
def bellman_ford(graph, weight, source=0):
""" Single source shortest paths by Bellman-Ford
:param graph: directed graph in listlist or listdict format
:param weight: can be negative.
in matrix format or same listdict graph
:returns: distance table, precedence table, bool
:explanation: bool is True if a negative circuit is
reachable from the source, circuits
can have length 2.
:complexity: `O(|V|*|E|)`
"""
n = len(graph)
dist = [float('inf')] * n
prec = [None] * n
dist[source] = 0
for nb_iterations in range(n):
changed = False
for node in range(n):
for neighbor in graph[node]:
alt = dist[node] + weight[node][neighbor]
if alt < dist[neighbor]:
dist[neighbor] = alt
prec[neighbor] = node
changed = True
if not changed: # fixed point
return dist, prec, False
return dist, prec, True | Single source shortest paths by Bellman-Ford
:param graph: directed graph in listlist or listdict format
:param weight: can be negative.
in matrix format or same listdict graph
:returns: distance table, precedence table, bool
:explanation: bool is True if a negative circuit is
reachable from the source, circuits
can have length 2.
:complexity: `O(|V|*|E|)` | Below is the the instruction that describes the task:
### Input:
Single source shortest paths by Bellman-Ford
:param graph: directed graph in listlist or listdict format
:param weight: can be negative.
in matrix format or same listdict graph
:returns: distance table, precedence table, bool
:explanation: bool is True if a negative circuit is
reachable from the source, circuits
can have length 2.
:complexity: `O(|V|*|E|)`
### Response:
def bellman_ford(graph, weight, source=0):
""" Single source shortest paths by Bellman-Ford
:param graph: directed graph in listlist or listdict format
:param weight: can be negative.
in matrix format or same listdict graph
:returns: distance table, precedence table, bool
:explanation: bool is True if a negative circuit is
reachable from the source, circuits
can have length 2.
:complexity: `O(|V|*|E|)`
"""
n = len(graph)
dist = [float('inf')] * n
prec = [None] * n
dist[source] = 0
for nb_iterations in range(n):
changed = False
for node in range(n):
for neighbor in graph[node]:
alt = dist[node] + weight[node][neighbor]
if alt < dist[neighbor]:
dist[neighbor] = alt
prec[neighbor] = node
changed = True
if not changed: # fixed point
return dist, prec, False
return dist, prec, True |
def _construct_url(self, url, base, quote):
"""
Adds the orderbook to the url if base and quote are specified.
"""
if not base and not quote:
return url
else:
url = url + base.lower() + quote.lower() + "/"
return url | Adds the orderbook to the url if base and quote are specified. | Below is the the instruction that describes the task:
### Input:
Adds the orderbook to the url if base and quote are specified.
### Response:
def _construct_url(self, url, base, quote):
"""
Adds the orderbook to the url if base and quote are specified.
"""
if not base and not quote:
return url
else:
url = url + base.lower() + quote.lower() + "/"
return url |
async def container(self, container=None, container_type=None, params=None):
"""
Loads/dumps container
:return:
"""
# Container versioning is a bit tricky, primitive type containers are not versioned.
elem_type = x.container_elem_type(container_type, params)
raw_container = container_is_raw(container_type, params)
elem_elementary = TypeWrapper.is_elementary_type(elem_type)
is_versioned = not elem_elementary and not raw_container
version = None
if is_versioned:
version = await self.version(container_type, params, elem=container)
if self.is_tracked():
return self.get_tracked()
if hasattr(container_type, 'boost_serialize'):
container = container_type() if container is None else container
self.pop_track(is_versioned)
return await container.boost_serialize(self, elem=container, elem_type=container_type, params=params, version=version)
# Container entry version + container
if self.writing:
self.pop_track(is_versioned)
return await self.container_dump(container, container_type, params)
else:
obj = await self.container_load(container_type, params=params, container=container)
return self.track_obj(obj, is_versioned) | Loads/dumps container
:return: | Below is the the instruction that describes the task:
### Input:
Loads/dumps container
:return:
### Response:
async def container(self, container=None, container_type=None, params=None):
"""
Loads/dumps container
:return:
"""
# Container versioning is a bit tricky, primitive type containers are not versioned.
elem_type = x.container_elem_type(container_type, params)
raw_container = container_is_raw(container_type, params)
elem_elementary = TypeWrapper.is_elementary_type(elem_type)
is_versioned = not elem_elementary and not raw_container
version = None
if is_versioned:
version = await self.version(container_type, params, elem=container)
if self.is_tracked():
return self.get_tracked()
if hasattr(container_type, 'boost_serialize'):
container = container_type() if container is None else container
self.pop_track(is_versioned)
return await container.boost_serialize(self, elem=container, elem_type=container_type, params=params, version=version)
# Container entry version + container
if self.writing:
self.pop_track(is_versioned)
return await self.container_dump(container, container_type, params)
else:
obj = await self.container_load(container_type, params=params, container=container)
return self.track_obj(obj, is_versioned) |
def Load(cls, file_input, client=None):
"""Loads an IncrementalUploadHelper from the given file-like object.
Args:
file_input: a file-like object containing a serialized
IncrementalUploadHelper.
client: an AdWordsClient instance. If not specified, an AdWordsClient will
be instantiated using the default configuration file.
Returns:
An IncrementalUploadHelper instance initialized using the contents of the
serialized input file.
Raises:
GoogleAdsError: If there is an error reading the input file containing the
serialized IncrementalUploadHelper.
GoogleAdsValueError: If the contents of the input file can't be parsed to
produce an IncrementalUploadHelper.
"""
if client is None:
client = AdWordsClient.LoadFromStorage()
try:
data = yaml.safe_load(file_input)
except yaml.YAMLError as e:
raise googleads.errors.GoogleAdsError(
'Error loading IncrementalUploadHelper from file: %s' % str(e))
try:
request_builder = BatchJobHelper.GetRequestBuilder(
client, version=data['version'], server=data['server']
)
return cls(request_builder, data['upload_url'],
current_content_length=data['current_content_length'],
is_last=data['is_last'])
except KeyError as e:
raise googleads.errors.GoogleAdsValueError(
'Can\'t parse IncrementalUploadHelper from file. Required field '
'"%s" is missing.' % e.message) | Loads an IncrementalUploadHelper from the given file-like object.
Args:
file_input: a file-like object containing a serialized
IncrementalUploadHelper.
client: an AdWordsClient instance. If not specified, an AdWordsClient will
be instantiated using the default configuration file.
Returns:
An IncrementalUploadHelper instance initialized using the contents of the
serialized input file.
Raises:
GoogleAdsError: If there is an error reading the input file containing the
serialized IncrementalUploadHelper.
GoogleAdsValueError: If the contents of the input file can't be parsed to
produce an IncrementalUploadHelper. | Below is the the instruction that describes the task:
### Input:
Loads an IncrementalUploadHelper from the given file-like object.
Args:
file_input: a file-like object containing a serialized
IncrementalUploadHelper.
client: an AdWordsClient instance. If not specified, an AdWordsClient will
be instantiated using the default configuration file.
Returns:
An IncrementalUploadHelper instance initialized using the contents of the
serialized input file.
Raises:
GoogleAdsError: If there is an error reading the input file containing the
serialized IncrementalUploadHelper.
GoogleAdsValueError: If the contents of the input file can't be parsed to
produce an IncrementalUploadHelper.
### Response:
def Load(cls, file_input, client=None):
"""Loads an IncrementalUploadHelper from the given file-like object.
Args:
file_input: a file-like object containing a serialized
IncrementalUploadHelper.
client: an AdWordsClient instance. If not specified, an AdWordsClient will
be instantiated using the default configuration file.
Returns:
An IncrementalUploadHelper instance initialized using the contents of the
serialized input file.
Raises:
GoogleAdsError: If there is an error reading the input file containing the
serialized IncrementalUploadHelper.
GoogleAdsValueError: If the contents of the input file can't be parsed to
produce an IncrementalUploadHelper.
"""
if client is None:
client = AdWordsClient.LoadFromStorage()
try:
data = yaml.safe_load(file_input)
except yaml.YAMLError as e:
raise googleads.errors.GoogleAdsError(
'Error loading IncrementalUploadHelper from file: %s' % str(e))
try:
request_builder = BatchJobHelper.GetRequestBuilder(
client, version=data['version'], server=data['server']
)
return cls(request_builder, data['upload_url'],
current_content_length=data['current_content_length'],
is_last=data['is_last'])
except KeyError as e:
raise googleads.errors.GoogleAdsValueError(
'Can\'t parse IncrementalUploadHelper from file. Required field '
'"%s" is missing.' % e.message) |
def uninstall(self):
"""
Uninstalls the bundle
"""
with self._lock:
if self._state == Bundle.ACTIVE:
self.stop()
# Change the bundle state
self._state = Bundle.UNINSTALLED
# Call the framework
self.__framework.uninstall_bundle(self) | Uninstalls the bundle | Below is the the instruction that describes the task:
### Input:
Uninstalls the bundle
### Response:
def uninstall(self):
"""
Uninstalls the bundle
"""
with self._lock:
if self._state == Bundle.ACTIVE:
self.stop()
# Change the bundle state
self._state = Bundle.UNINSTALLED
# Call the framework
self.__framework.uninstall_bundle(self) |
def _DrawTrips(self,triplist,colpar=""):
"""Generates svg polylines for each transit trip.
Args:
# Class Trip is defined in transitfeed.py
[Trip, Trip, ...]
Returns:
# A string containing a polyline tag for each trip
' <polyline class="T" stroke="#336633" points="433,0 ...'
"""
stations = []
if not self._stations and triplist:
self._stations = self._CalculateYLines(self._TravelTimes(triplist))
if not self._stations:
self._AddWarning("Failed to use traveltimes for graph")
self._stations = self._CalculateYLines(self._Uniform(triplist))
if not self._stations:
self._AddWarning("Failed to calculate station distances")
return
stations = self._stations
tmpstrs = []
servlist = []
for t in triplist:
if not colpar:
if t.service_id not in servlist:
servlist.append(t.service_id)
shade = int(servlist.index(t.service_id) * (200/len(servlist))+55)
color = "#00%s00" % hex(shade)[2:4]
else:
color=colpar
start_offsets = [0]
first_stop = t.GetTimeStops()[0]
for j,freq_offset in enumerate(start_offsets):
if j>0 and not colpar:
color="purple"
scriptcall = 'onmouseover="LineClick(\'%s\',\'Trip %s starting %s\')"' % (t.trip_id,
t.trip_id, transitfeed.FormatSecondsSinceMidnight(t.GetStartTime()))
tmpstrhead = '<polyline class="T" id="%s" stroke="%s" %s points="' % \
(str(t.trip_id),color, scriptcall)
tmpstrs.append(tmpstrhead)
for i, s in enumerate(t.GetTimeStops()):
arr_t = s[0]
dep_t = s[1]
if arr_t is None or dep_t is None:
continue
arr_x = int(arr_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset
dep_x = int(dep_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset
tmpstrs.append("%s,%s " % (int(arr_x+20), int(stations[i]+20)))
tmpstrs.append("%s,%s " % (int(dep_x+20), int(stations[i]+20)))
tmpstrs.append('" />')
return "".join(tmpstrs) | Generates svg polylines for each transit trip.
Args:
# Class Trip is defined in transitfeed.py
[Trip, Trip, ...]
Returns:
# A string containing a polyline tag for each trip
' <polyline class="T" stroke="#336633" points="433,0 ...' | Below is the the instruction that describes the task:
### Input:
Generates svg polylines for each transit trip.
Args:
# Class Trip is defined in transitfeed.py
[Trip, Trip, ...]
Returns:
# A string containing a polyline tag for each trip
' <polyline class="T" stroke="#336633" points="433,0 ...'
### Response:
def _DrawTrips(self,triplist,colpar=""):
"""Generates svg polylines for each transit trip.
Args:
# Class Trip is defined in transitfeed.py
[Trip, Trip, ...]
Returns:
# A string containing a polyline tag for each trip
' <polyline class="T" stroke="#336633" points="433,0 ...'
"""
stations = []
if not self._stations and triplist:
self._stations = self._CalculateYLines(self._TravelTimes(triplist))
if not self._stations:
self._AddWarning("Failed to use traveltimes for graph")
self._stations = self._CalculateYLines(self._Uniform(triplist))
if not self._stations:
self._AddWarning("Failed to calculate station distances")
return
stations = self._stations
tmpstrs = []
servlist = []
for t in triplist:
if not colpar:
if t.service_id not in servlist:
servlist.append(t.service_id)
shade = int(servlist.index(t.service_id) * (200/len(servlist))+55)
color = "#00%s00" % hex(shade)[2:4]
else:
color=colpar
start_offsets = [0]
first_stop = t.GetTimeStops()[0]
for j,freq_offset in enumerate(start_offsets):
if j>0 and not colpar:
color="purple"
scriptcall = 'onmouseover="LineClick(\'%s\',\'Trip %s starting %s\')"' % (t.trip_id,
t.trip_id, transitfeed.FormatSecondsSinceMidnight(t.GetStartTime()))
tmpstrhead = '<polyline class="T" id="%s" stroke="%s" %s points="' % \
(str(t.trip_id),color, scriptcall)
tmpstrs.append(tmpstrhead)
for i, s in enumerate(t.GetTimeStops()):
arr_t = s[0]
dep_t = s[1]
if arr_t is None or dep_t is None:
continue
arr_x = int(arr_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset
dep_x = int(dep_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset
tmpstrs.append("%s,%s " % (int(arr_x+20), int(stations[i]+20)))
tmpstrs.append("%s,%s " % (int(dep_x+20), int(stations[i]+20)))
tmpstrs.append('" />')
return "".join(tmpstrs) |
def append(self, word, lemma=None, type=None, chunk=None, role=None, relation=None, pnp=None, anchor=None, iob=None, custom={}):
""" Appends the next word to the sentence / chunk / preposition.
For example: Sentence.append("clawed", "claw", "VB", "VP", role=None, relation=1)
- word : the current word,
- lemma : the canonical form of the word,
- type : part-of-speech tag for the word (NN, JJ, ...),
- chunk : part-of-speech tag for the chunk this word is part of (NP, VP, ...),
- role : the chunk's grammatical role (SBJ, OBJ, ...),
- relation : an id shared by other related chunks (e.g., SBJ-1 <=> VP-1),
- pnp : PNP if this word is in a prepositional noun phrase (B- prefix optional),
- iob : BEGIN if the word marks the start of a new chunk,
INSIDE (optional) if the word is part of the previous chunk,
- custom : a dictionary of (tag, value)-items for user-defined word tags.
"""
self._do_word(word, lemma, type) # Append Word object.
self._do_chunk(chunk, role, relation, iob) # Append Chunk, or add last word to last chunk.
self._do_conjunction()
self._do_relation()
self._do_pnp(pnp, anchor)
self._do_anchor(anchor)
self._do_custom(custom) | Appends the next word to the sentence / chunk / preposition.
For example: Sentence.append("clawed", "claw", "VB", "VP", role=None, relation=1)
- word : the current word,
- lemma : the canonical form of the word,
- type : part-of-speech tag for the word (NN, JJ, ...),
- chunk : part-of-speech tag for the chunk this word is part of (NP, VP, ...),
- role : the chunk's grammatical role (SBJ, OBJ, ...),
- relation : an id shared by other related chunks (e.g., SBJ-1 <=> VP-1),
- pnp : PNP if this word is in a prepositional noun phrase (B- prefix optional),
- iob : BEGIN if the word marks the start of a new chunk,
INSIDE (optional) if the word is part of the previous chunk,
- custom : a dictionary of (tag, value)-items for user-defined word tags. | Below is the the instruction that describes the task:
### Input:
Appends the next word to the sentence / chunk / preposition.
For example: Sentence.append("clawed", "claw", "VB", "VP", role=None, relation=1)
- word : the current word,
- lemma : the canonical form of the word,
- type : part-of-speech tag for the word (NN, JJ, ...),
- chunk : part-of-speech tag for the chunk this word is part of (NP, VP, ...),
- role : the chunk's grammatical role (SBJ, OBJ, ...),
- relation : an id shared by other related chunks (e.g., SBJ-1 <=> VP-1),
- pnp : PNP if this word is in a prepositional noun phrase (B- prefix optional),
- iob : BEGIN if the word marks the start of a new chunk,
INSIDE (optional) if the word is part of the previous chunk,
- custom : a dictionary of (tag, value)-items for user-defined word tags.
### Response:
def append(self, word, lemma=None, type=None, chunk=None, role=None, relation=None, pnp=None, anchor=None, iob=None, custom={}):
""" Appends the next word to the sentence / chunk / preposition.
For example: Sentence.append("clawed", "claw", "VB", "VP", role=None, relation=1)
- word : the current word,
- lemma : the canonical form of the word,
- type : part-of-speech tag for the word (NN, JJ, ...),
- chunk : part-of-speech tag for the chunk this word is part of (NP, VP, ...),
- role : the chunk's grammatical role (SBJ, OBJ, ...),
- relation : an id shared by other related chunks (e.g., SBJ-1 <=> VP-1),
- pnp : PNP if this word is in a prepositional noun phrase (B- prefix optional),
- iob : BEGIN if the word marks the start of a new chunk,
INSIDE (optional) if the word is part of the previous chunk,
- custom : a dictionary of (tag, value)-items for user-defined word tags.
"""
self._do_word(word, lemma, type) # Append Word object.
self._do_chunk(chunk, role, relation, iob) # Append Chunk, or add last word to last chunk.
self._do_conjunction()
self._do_relation()
self._do_pnp(pnp, anchor)
self._do_anchor(anchor)
self._do_custom(custom) |
async def generate_wallet_key(config: Optional[str]) -> str:
"""
Generate wallet master key.
Returned key is compatible with "RAW" key derivation method.
It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave.
:param config: (optional) key configuration json.
{
"seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created).
Can be UTF-8, base64 or hex string.
}
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("generate_wallet_key: >>> config: %r",
config)
if not hasattr(generate_wallet_key, "cb"):
logger.debug("generate_wallet_key: Creating callback")
generate_wallet_key.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_config = c_char_p(config.encode('utf-8')) if config is not None else None
key = await do_call('indy_generate_wallet_key',
c_config,
generate_wallet_key.cb)
res = key.decode()
logger.debug("generate_wallet_key: <<< res: %r", res)
return res | Generate wallet master key.
Returned key is compatible with "RAW" key derivation method.
It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave.
:param config: (optional) key configuration json.
{
"seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created).
Can be UTF-8, base64 or hex string.
}
:return: Error code | Below is the the instruction that describes the task:
### Input:
Generate wallet master key.
Returned key is compatible with "RAW" key derivation method.
It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave.
:param config: (optional) key configuration json.
{
"seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created).
Can be UTF-8, base64 or hex string.
}
:return: Error code
### Response:
async def generate_wallet_key(config: Optional[str]) -> str:
"""
Generate wallet master key.
Returned key is compatible with "RAW" key derivation method.
It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave.
:param config: (optional) key configuration json.
{
"seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created).
Can be UTF-8, base64 or hex string.
}
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("generate_wallet_key: >>> config: %r",
config)
if not hasattr(generate_wallet_key, "cb"):
logger.debug("generate_wallet_key: Creating callback")
generate_wallet_key.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_config = c_char_p(config.encode('utf-8')) if config is not None else None
key = await do_call('indy_generate_wallet_key',
c_config,
generate_wallet_key.cb)
res = key.decode()
logger.debug("generate_wallet_key: <<< res: %r", res)
return res |
def url(self):
"""The URL as a string of the resource."""
if not self._url[2].endswith('/'):
self._url[2] += '/'
return RestURL.url.__get__(self) | The URL as a string of the resource. | Below is the the instruction that describes the task:
### Input:
The URL as a string of the resource.
### Response:
def url(self):
"""The URL as a string of the resource."""
if not self._url[2].endswith('/'):
self._url[2] += '/'
return RestURL.url.__get__(self) |
def maybe_cythonize_extensions(top_path, config):
"""Tweaks for building extensions between release and development mode."""
is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO'))
if is_release:
build_from_c_and_cpp_files(config.ext_modules)
else:
message = ('Please install cython with a version >= {0} in order '
'to build a scikit-survival development version.').format(
CYTHON_MIN_VERSION)
try:
import Cython
if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION:
message += ' Your version of Cython was {0}.'.format(
Cython.__version__)
raise ValueError(message)
from Cython.Build import cythonize
except ImportError as exc:
exc.args += (message,)
raise
# http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#cythonize-arguments
directives = {'language_level': '3'}
cy_cov = os.environ.get('CYTHON_COVERAGE', False)
if cy_cov:
directives['linetrace'] = True
macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')]
else:
macros = []
config.ext_modules = cythonize(
config.ext_modules,
compiler_directives=directives)
for e in config.ext_modules:
e.define_macros.extend(macros) | Tweaks for building extensions between release and development mode. | Below is the the instruction that describes the task:
### Input:
Tweaks for building extensions between release and development mode.
### Response:
def maybe_cythonize_extensions(top_path, config):
"""Tweaks for building extensions between release and development mode."""
is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO'))
if is_release:
build_from_c_and_cpp_files(config.ext_modules)
else:
message = ('Please install cython with a version >= {0} in order '
'to build a scikit-survival development version.').format(
CYTHON_MIN_VERSION)
try:
import Cython
if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION:
message += ' Your version of Cython was {0}.'.format(
Cython.__version__)
raise ValueError(message)
from Cython.Build import cythonize
except ImportError as exc:
exc.args += (message,)
raise
# http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#cythonize-arguments
directives = {'language_level': '3'}
cy_cov = os.environ.get('CYTHON_COVERAGE', False)
if cy_cov:
directives['linetrace'] = True
macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')]
else:
macros = []
config.ext_modules = cythonize(
config.ext_modules,
compiler_directives=directives)
for e in config.ext_modules:
e.define_macros.extend(macros) |
def mknts(self, add_dct):
"""Add information from add_dct to a new copy of namedtuples stored in nts."""
nts = []
assert len(add_dct) == len(self.nts)
flds = list(next(iter(self.nts))._fields) + list(next(iter(add_dct)).keys())
ntobj = cx.namedtuple("ntgoea", " ".join(flds))
for dct_new, ntgoea in zip(add_dct, self.nts):
dct_curr = ntgoea._asdict()
for key, val in dct_new.items():
dct_curr[key] = val
nts.append(ntobj(**dct_curr))
return nts | Add information from add_dct to a new copy of namedtuples stored in nts. | Below is the the instruction that describes the task:
### Input:
Add information from add_dct to a new copy of namedtuples stored in nts.
### Response:
def mknts(self, add_dct):
"""Add information from add_dct to a new copy of namedtuples stored in nts."""
nts = []
assert len(add_dct) == len(self.nts)
flds = list(next(iter(self.nts))._fields) + list(next(iter(add_dct)).keys())
ntobj = cx.namedtuple("ntgoea", " ".join(flds))
for dct_new, ntgoea in zip(add_dct, self.nts):
dct_curr = ntgoea._asdict()
for key, val in dct_new.items():
dct_curr[key] = val
nts.append(ntobj(**dct_curr))
return nts |
def add(self, requirements, required=None):
"""
Add requirements to be managed
:param list/Requirement requirements: List of :class:`BumpRequirement` or :class:`pkg_resources.Requirement`
:param bool required: Set required flag for each requirement if provided.
"""
if isinstance(requirements, RequirementsManager):
requirements = list(requirements)
elif not isinstance(requirements, list):
requirements = [requirements]
for req in requirements:
name = req.project_name
if not isinstance(req, BumpRequirement):
req = BumpRequirement(req, required=required)
elif required is not None:
req.required = required
add = True
if name in self.requirements:
for existing_req in self.requirements[name]:
if req == existing_req:
add = False
break
# Need to replace existing as the new req will be used to bump next, and req.required could be
# updated.
replace = False
# Two pins: Use highest pinned version
if (req.specs and req.specs[0][0] == '==' and existing_req.specs and
existing_req.specs[0][0] == '=='):
if pkg_resources.parse_version(req.specs[0][1]) < pkg_resources.parse_version(
existing_req.specs[0][1]):
req.requirement = existing_req.requirement
replace = True
# Replace Any
if not (req.specs and existing_req.specs):
if existing_req.specs:
req.requirement = existing_req.requirement
replace = True
if replace:
req.required |= existing_req.required
if existing_req.required_by and not req.required_by:
req.required_by = existing_req.required_by
self.requirements[name].remove(existing_req)
break
if add:
self.requirements[name].append(req) | Add requirements to be managed
:param list/Requirement requirements: List of :class:`BumpRequirement` or :class:`pkg_resources.Requirement`
:param bool required: Set required flag for each requirement if provided. | Below is the the instruction that describes the task:
### Input:
Add requirements to be managed
:param list/Requirement requirements: List of :class:`BumpRequirement` or :class:`pkg_resources.Requirement`
:param bool required: Set required flag for each requirement if provided.
### Response:
def add(self, requirements, required=None):
"""
Add requirements to be managed
:param list/Requirement requirements: List of :class:`BumpRequirement` or :class:`pkg_resources.Requirement`
:param bool required: Set required flag for each requirement if provided.
"""
if isinstance(requirements, RequirementsManager):
requirements = list(requirements)
elif not isinstance(requirements, list):
requirements = [requirements]
for req in requirements:
name = req.project_name
if not isinstance(req, BumpRequirement):
req = BumpRequirement(req, required=required)
elif required is not None:
req.required = required
add = True
if name in self.requirements:
for existing_req in self.requirements[name]:
if req == existing_req:
add = False
break
# Need to replace existing as the new req will be used to bump next, and req.required could be
# updated.
replace = False
# Two pins: Use highest pinned version
if (req.specs and req.specs[0][0] == '==' and existing_req.specs and
existing_req.specs[0][0] == '=='):
if pkg_resources.parse_version(req.specs[0][1]) < pkg_resources.parse_version(
existing_req.specs[0][1]):
req.requirement = existing_req.requirement
replace = True
# Replace Any
if not (req.specs and existing_req.specs):
if existing_req.specs:
req.requirement = existing_req.requirement
replace = True
if replace:
req.required |= existing_req.required
if existing_req.required_by and not req.required_by:
req.required_by = existing_req.required_by
self.requirements[name].remove(existing_req)
break
if add:
self.requirements[name].append(req) |
def add_config_path(path):
"""Select config parser by file extension and add path into parser.
"""
if not os.path.isfile(path):
warnings.warn("Config file does not exist: {path}".format(path=path))
return False
# select parser by file extension
_base, ext = os.path.splitext(path)
if ext and ext[1:] in PARSERS:
parser = ext[1:]
else:
parser = PARSER
parser_class = PARSERS[parser]
_check_parser(parser_class, parser)
if parser != PARSER:
msg = (
"Config for {added} parser added, but used {used} parser. "
"Set up right parser via env var: "
"export LUIGI_CONFIG_PARSER={added}"
)
warnings.warn(msg.format(added=parser, used=PARSER))
# add config path to parser
parser_class.add_config_path(path)
return True | Select config parser by file extension and add path into parser. | Below is the the instruction that describes the task:
### Input:
Select config parser by file extension and add path into parser.
### Response:
def add_config_path(path):
"""Select config parser by file extension and add path into parser.
"""
if not os.path.isfile(path):
warnings.warn("Config file does not exist: {path}".format(path=path))
return False
# select parser by file extension
_base, ext = os.path.splitext(path)
if ext and ext[1:] in PARSERS:
parser = ext[1:]
else:
parser = PARSER
parser_class = PARSERS[parser]
_check_parser(parser_class, parser)
if parser != PARSER:
msg = (
"Config for {added} parser added, but used {used} parser. "
"Set up right parser via env var: "
"export LUIGI_CONFIG_PARSER={added}"
)
warnings.warn(msg.format(added=parser, used=PARSER))
# add config path to parser
parser_class.add_config_path(path)
return True |
def get_certificate(self, **kwargs):
"""Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A dictionary describing the configured array certificate.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
"""
if self._rest_version >= LooseVersion("1.12"):
return self._request("GET",
"cert/{0}".format(kwargs.pop('name', 'management')), kwargs)
else:
return self._request("GET", "cert", kwargs) | Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A dictionary describing the configured array certificate.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later. | Below is the the instruction that describes the task:
### Input:
Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A dictionary describing the configured array certificate.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
### Response:
def get_certificate(self, **kwargs):
"""Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A dictionary describing the configured array certificate.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
"""
if self._rest_version >= LooseVersion("1.12"):
return self._request("GET",
"cert/{0}".format(kwargs.pop('name', 'management')), kwargs)
else:
return self._request("GET", "cert", kwargs) |
def _extract_log_probs(num_states, dist):
"""Tabulate log probabilities from a batch of distributions."""
states = tf.reshape(tf.range(num_states),
tf.concat([[num_states],
tf.ones_like(dist.batch_shape_tensor())],
axis=0))
return distribution_util.move_dimension(dist.log_prob(states), 0, -1) | Tabulate log probabilities from a batch of distributions. | Below is the the instruction that describes the task:
### Input:
Tabulate log probabilities from a batch of distributions.
### Response:
def _extract_log_probs(num_states, dist):
"""Tabulate log probabilities from a batch of distributions."""
states = tf.reshape(tf.range(num_states),
tf.concat([[num_states],
tf.ones_like(dist.batch_shape_tensor())],
axis=0))
return distribution_util.move_dimension(dist.log_prob(states), 0, -1) |
def renderModelHasComponent(self, pchRenderModelName, pchComponentName):
"""Returns true if the render model has a component with the specified name"""
fn = self.function_table.renderModelHasComponent
result = fn(pchRenderModelName, pchComponentName)
return result | Returns true if the render model has a component with the specified name | Below is the the instruction that describes the task:
### Input:
Returns true if the render model has a component with the specified name
### Response:
def renderModelHasComponent(self, pchRenderModelName, pchComponentName):
"""Returns true if the render model has a component with the specified name"""
fn = self.function_table.renderModelHasComponent
result = fn(pchRenderModelName, pchComponentName)
return result |
def lock(self):
"""Returns a JSON representation of the Pipfile."""
data = self.data
data['_meta']['hash'] = {"sha256": self.hash}
data['_meta']['pipfile-spec'] = 6
return json.dumps(data, indent=4, separators=(',', ': ')) | Returns a JSON representation of the Pipfile. | Below is the the instruction that describes the task:
### Input:
Returns a JSON representation of the Pipfile.
### Response:
def lock(self):
"""Returns a JSON representation of the Pipfile."""
data = self.data
data['_meta']['hash'] = {"sha256": self.hash}
data['_meta']['pipfile-spec'] = 6
return json.dumps(data, indent=4, separators=(',', ': ')) |
Subsets and Splits