repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
aksaxena80/test | tensorflow/python/ops/array_ops.py | 5 | 39065 | """## Casting
TensorFlow provides several operations that you can use to cast tensor data
types in your graph.
@@string_to_number
@@to_double
@@to_float
@@to_bfloat16
@@to_int32
@@to_int64
@@cast
## Shapes and Shaping
TensorFlow provides several operations that you can use to determine the shape
of a tensor and change the shape of a tensor.
@@shape
@@size
@@rank
@@reshape
@@squeeze
@@expand_dims
## Slicing and Joining
TensorFlow provides several operations to slice or extract parts of a tensor,
or join multiple tensors together.
@@slice
@@split
@@tile
@@pad
@@concat
@@pack
@@unpack
@@reverse_sequence
@@reverse
@@transpose
@@gather
@@dynamic_partition
@@dynamic_stitch
"""
import sys
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import types
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# pylint: disable=wildcard-import
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.ops.constant_op import constant
from tensorflow.python.ops.gen_array_ops import *
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_baseslice = slice
# Aliases for some automatically-generated names.
listdiff = gen_array_ops.list_diff
# pylint: disable=undefined-variable,protected-access
def _SliceHelper(tensor, slice_spec):
"""Overload for Tensor.__getitem__.
Currently the size of the slice must be statically known in each dimension,
i.e. the "stop" of the slice must not be omitted.
TODO(mrry): Support slices where the sizes are not specified.
TODO(mrry): Support negative indices in slices with numpy/Python semantics.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
indices = []
sizes = []
squeeze_dims = []
for dim, s in enumerate(slice_spec):
if isinstance(s, int):
if s < 0:
raise NotImplementedError("Negative indices are currently unsupported")
indices.append(s)
sizes.append(1)
squeeze_dims.append(dim)
elif isinstance(s, _baseslice):
if s.step not in (None, 1):
raise NotImplementedError(
"Steps other than 1 are not currently supported")
start = s.start if s.start is not None else 0
if start < 0:
raise NotImplementedError(
"Negative start indices are not currently supported")
indices.append(start)
if s.stop is not None and s.stop < 0:
raise NotImplementedError(
"Negative stop indices are not currently supported")
# NOTE(mrry): If the stop is not specified, Python substitutes
# sys.maxsize, which is typically (2 ** 63) - 1. Since Slice currently
# supports signed DT_INT32 arguments, we use -1 to specify that all
# elements should be captured.
if s.stop is None or s.stop == sys.maxsize:
sizes.append(-1)
else:
if start > s.stop:
raise ValueError("Stop must be at least start")
sizes.append(s.stop - start)
elif s is Ellipsis:
raise NotImplementedError("Ellipsis is not currently supported")
else:
raise TypeError("Bad slice index %s of type %s" % (s, type(s)))
sliced = slice(tensor, indices, sizes)
if squeeze_dims:
return squeeze(sliced, squeeze_dims=squeeze_dims)
else:
return sliced
def slice(input_, begin, size, name=None):
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
[[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
ops.Tensor._override_operator("__getitem__", _SliceHelper)
def pack(values, name="pack"):
"""Packs a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs tensors in `values` into a tensor with rank one higher than each tensor
in `values` and shape `[len(values)] + values[0].shape`. The output satisfies
`output[i, ...] = values[i][...]`.
This is the opposite of unpack. The numpy equivalent is
tf.pack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A packed `Tensor` with the same type as `values`.
"""
return gen_array_ops._pack(values, name=name)
def unpack(value, num=None, name="unpack"):
"""Unpacks the outer dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` along the first dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[0]` is not known, `ValueError` is raised.
The ith tensor in `output` is the slice `value[i, ...]`. Each tensor in
`output` has shape `value.shape[1:]`.
This is the opposite of pack. The numpy equivalent is
tf.unpack(x, n) = list(x)
Args:
value: A rank `R > 0` `Tensor` to be unpacked.
num: An `int`. The first dimension of value. Automatically inferred if
`None` (the default).
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unpacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
if num is None:
value = ops.convert_to_tensor(value)
shape = value.get_shape()
num = shape[0].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % shape)
return gen_array_ops._unpack(value, num=num, name=name)
def concat(concat_dim, values, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `concat_dim`. If
`values[i].shape = [D0, D1, ... Dconcat_dim(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Rconcat_dim, ...Dn]
where
Rconcat_dim = sum(Dconcat_dim(i))
That is, the data from the input tensors is joined along the `concat_dim`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `concat_dim` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]
tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]
```
Args:
concat_dim: 0-D `int32` `Tensor`. Dimension along which to concatenate.
values: A list of `Tensor` objects or a single `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
return identity(values[0], name=name)
return gen_array_ops._concat(concat_dim=concat_dim,
values=values,
name=name)
@ops.RegisterShape("Pack")
def _PackShape(op):
input_shape = op.inputs[0].get_shape()
for inp in op.inputs[1:]:
input_shape = input_shape.merge_with(inp.get_shape())
return [tensor_shape.TensorShape([len(op.inputs)]).concatenate(input_shape)]
@ops.RegisterShape("Unpack")
def _UnpackShape(op):
input_shape = op.inputs[0].get_shape()
return [input_shape[1:]] * op.get_attr("num")
@ops.RegisterShape("Concat")
def _ConcatShape(op):
concat_dim = tensor_util.ConstantValue(op.inputs[0])
if concat_dim is None:
# Return an unknown shape with the same rank as the inputs, or an
# unknown rank if no input's rank is known.
rank = None
for value in op.inputs[1:]:
if rank is not None:
value.get_shape().assert_has_rank(rank)
else:
rank = value.get_shape().ndims
return [tensor_shape.unknown_shape(ndims=max(rank, 1))]
else:
# Merge all the non-concat dims, and sum the concat dim to make an
# output shape.
concat_dim = int(concat_dim)
output_shape = op.inputs[1].get_shape()
# TODO(irving): Remove once !kAllowLegacyScalars.
if output_shape.ndims == 0:
output_shape = tensor_shape.TensorShape([1])
for value in op.inputs[2:]:
value_shape = value.get_shape()
if value_shape.ndims is not None and concat_dim >= value_shape.ndims:
if value_shape.ndims == 0 and concat_dim == 0:
# Let concat handle scalars
# TODO(irving): Remove once !kAllowLegacyScalars.
value_shape = tensor_shape.TensorShape([1])
else:
raise ValueError("concat_dim is out of range (values rank = %d)" %
value_shape.ndims)
before = output_shape[:concat_dim].merge_with(value_shape[:concat_dim])
at = output_shape[concat_dim] + value_shape[concat_dim]
after = output_shape[
concat_dim + 1:].merge_with(value_shape[concat_dim + 1:])
output_shape = before.concatenate(at).concatenate(after)
return [output_shape]
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices specified
in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices => [12, 26, 37, 45]
tf.shape(a.values) => [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask of its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices => [26, 37]
tf.shape(b.values) => [2, 10]
```
Args:
* `a`: An `IndexedSlices` instance.
* `mask_indices`: Indices of elements to mask.
* `name`: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.op_scope([a, mask_indices], name, "sparse_mask") as name:
indices = a.indices
out_indices, to_gather = listdiff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
def split(split_dim, num_split, value, name="split"):
"""Splits a tensor into `num_split` tensors along one dimension.
Splits `value` along dimension `split_dim` into `num_split` smaller tensors.
Requires that `num_split` evenly divide `value.shape[split_dim]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(1, 3, value)
tf.shape(split0) ==> [5, 10]
```
Args:
split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[0, rank(value))`.
num_split: A 0-D `int32` `Tensor`. The number of ways to split.
value: The `Tensor` to split.
name: A name for the operation (optional).
Returns:
`num_split` `Tensor` objects resulting from splitting `value`.
"""
return gen_array_ops._split(split_dim=split_dim,
num_split=num_split,
value=value,
name=name)
@ops.RegisterShape("Reverse")
def _ReverseShape(op):
return [op.inputs[0].get_shape().with_rank_at_most(8)]
def transpose(a, perm=None, name="transpose"):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example:
```python
# 'x' is [[1 2 3]
# [4 5 6]]
tf.transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# Equivalently
tf.transpose(x perm=[0, 1]) ==> [[1 4]
[2 5]
[3 6]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
# 'x' is [[[1 2 3]
# [4 5 6]]
# [[7 8 9]
# [10 11 12]]]
# Take the transpose of the matrices in dimension-0
tf.transpose(b, perm=[0, 2, 1]) ==> [[[1 4]
[2 5]
[3 6]]
[[7 10]
[8 11]
[9 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
with ops.op_scope([a], name, "transpose") as name:
if perm is None:
dims = gen_math_ops._range(0, gen_array_ops.rank(a), 1)
perm = gen_array_ops.reverse(dims, [True])
ret = gen_array_ops.transpose(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = gen_array_ops.transpose(a, perm, name=name)
return ret
def zeros(shape, dtype=types.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.op_scope([shape], name, "zeros") as name:
if isinstance(shape, list):
output = constant(0, shape=shape, dtype=dtype, name=name)
else:
shape = ops.convert_to_tensor(shape, name="shape")
output = fill(shape, constant(0, dtype=dtype), name=name)
assert output.dtype.base_dtype == types.as_dtype(dtype).base_dtype
return output
def zeros_like(tensor, dtype=None, name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.op_scope([tensor], name, "zeros_like") as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
zeros_shape = shape(tensor)
if dtype is None:
dtype = tensor.dtype
return zeros(zeros_shape, dtype=dtype, name=name)
def ones_like(tensor, dtype=None, name=None):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.op_scope([tensor], name, "ones_like") as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape(tensor)
if dtype is None:
dtype = tensor.dtype
return ones(ones_shape, dtype=dtype, name=name)
def zeros_initializer(shape, dtype=types.float32):
"""An adaptor for zeros() to match the Initializer spec."""
return zeros(shape, dtype)
def ones(shape, dtype=types.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.op_scope([shape], name, "ones") as name:
if isinstance(shape, list):
output = constant(1, shape=shape, dtype=dtype, name=name)
else:
shape = ops.convert_to_tensor(shape, name="shape")
output = fill(shape, constant(1, dtype=dtype), name=name)
assert output.dtype.base_dtype == types.as_dtype(dtype).base_dtype
return output
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(float, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print sess.run(y) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print sess.run(y, feed_dict={x: rand_array}) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape = tensor_shape.as_shape(shape)
if shape.is_fully_defined():
dim_list = shape.as_list()
else:
dim_list = []
ret = gen_array_ops._placeholder(
dtype=dtype,
shape=dim_list,
name=name)
ret.set_shape(shape)
return ret
@ops.RegisterShape("Placeholder")
def _PlaceholderShape(op):
given_shape = tensor_util.TensorShapeProtoToList(op.get_attr("shape"))
if given_shape:
return [tensor_shape.TensorShape(given_shape)]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("CheckNumerics")
@ops.RegisterShape("Identity")
@ops.RegisterShape("RefIdentity")
@ops.RegisterShape("StopGradient")
def _UnchangedShape(op):
return [op.inputs[0].get_shape()]
@ops.RegisterShape("Rank")
@ops.RegisterShape("Size")
def _ScalarShape(unused_op):
return [tensor_shape.scalar()]
@ops.RegisterShape("Slice")
def _SliceShape(op):
"""Shape function for array_ops.slice."""
input_shape = op.inputs[0].get_shape()
begin_shape = op.inputs[1].get_shape().with_rank_at_most(1)
sizes_shape = op.inputs[2].get_shape().with_rank_at_most(1)
rank_vector_shape = begin_shape.merge_with(sizes_shape)
ndims = rank_vector_shape.num_elements()
if ndims is not None:
input_shape.assert_has_rank(ndims)
begin_value = tensor_util.ConstantValue(op.inputs[1])
sizes_value = tensor_util.ConstantValue(op.inputs[2])
if sizes_value is not None:
returned_dims = []
for i, slice_size in enumerate(sizes_value.ravel()):
if slice_size != -1:
returned_dims.append(slice_size)
elif begin_value is not None:
returned_dims.append(input_shape[i] - begin_value[i])
else:
returned_dims.append(None)
return [tensor_shape.TensorShape(returned_dims)]
else:
if input_shape.ndims is not None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
elif ndims is not None:
return [tensor_shape.unknown_shape(ndims=ndims)]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("Gather")
def _GatherShape(op):
"""Shape function for array_ops.gather."""
params_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
return [indices_shape.concatenate(params_shape[1:])]
@ops.RegisterShape("Unique")
def _UniqueShape(op):
"""Shape function for array_ops.Unique."""
# The output is a vector with data-dependent length.
input_shape = op.inputs[0].get_shape()
input_shape.assert_has_rank(1)
return [tensor_shape.vector(None), input_shape]
@ops.RegisterShape("Diag")
def _DiagShape(op):
"""Shape function for array_ops.diag.
This op has one input (of rank k <= 3), and one output (of rank 2k),
where the shape of the output is the concatenation of the input
shape with itself.
Args:
op: A Diag Operation.
Returns:
A single-element list containing the shape of the output.
"""
input_shape = op.inputs[0].get_shape().with_rank_at_most(3)
return [input_shape.concatenate(input_shape)]
@ops.RegisterShape("ExpandDims")
def _ExpandDimsShape(op):
"""Determine shape for expand op's output tensor.
Args:
op: Operation for which to determine shape.
op.inputs[0] is the input tensor.
op.inputs[1] is the dimension in which to expand.
Returns:
Shape of op's output tensor.
Raises:
ValueError: If dim is outside of [-rank - 1, rank], where rank is the number
of dimensions in the input tensor.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
dim = tensor_util.ConstantValue(op.inputs[1])
input_ndims = input_shape.ndims
if dim < -input_ndims - 1 or dim > input_ndims:
raise ValueError(
"dim %d not in [%d, %d]." % (dim, -input_ndims, input_ndims))
if dim < 0:
dim += (input_ndims + 1)
result_shape = list(input_shape.dims)
result_shape.insert(dim, 1)
return [tensor_shape.TensorShape(result_shape)]
@ops.RegisterShape("Squeeze")
def _SqueezeShape(op):
"""Determine shape for squeeze op's output tensor.
Args:
op: Operation for which to determine shape.
Returns:
Shape of op's output tensor.
Raises:
ValueError: if squeeze_dims includes a dimension outside of [-rank, rank),
where rank is the number of dimensions in the input tensor. Or, if
squeeze_dims includes a dimension for which input shape has a value
not equal to 1.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
squeeze_dims = op.get_attr("squeeze_dims") or []
wrapped_squeeze_dims = []
input_ndims = input_shape.ndims
for i, squeeze_dim in enumerate(squeeze_dims):
if squeeze_dim < -input_ndims or squeeze_dim >= input_ndims:
raise ValueError(
"squeeze_dims[%d]=%d not in [%d, %d)." % (
i, squeeze_dim, -input_ndims, input_ndims))
if squeeze_dim < 0:
squeeze_dim += input_ndims
wrapped_squeeze_dims.append(squeeze_dim)
result_shape = []
for i, dim in enumerate([d.value for d in input_shape.dims]):
is_explicit_match = i in wrapped_squeeze_dims
if is_explicit_match or not wrapped_squeeze_dims:
if dim is None:
return [tensor_shape.unknown_shape()]
if dim != 1:
if is_explicit_match:
raise ValueError(
"Can not squeeze dim[%d], expected a dimension of 1, got %d." % (
i, dim))
result_shape.append(dim)
else:
result_shape.append(dim)
return [tensor_shape.TensorShape(result_shape)]
@ops.RegisterShape("Reshape")
def _ReshapeShape(op):
"""Shape function for Reshape op."""
input_shape = op.inputs[0].get_shape()
new_shape_shape = op.inputs[1].get_shape().with_rank_at_most(1)
new_shape = tensor_util.ConstantValue(op.inputs[1])
if new_shape is None:
# Attempt to infer the rank of the output from the length of
# new_shape.
return [tensor_shape.unknown_shape(ndims=new_shape_shape.num_elements())]
new_shape = np.reshape(new_shape, -1).tolist()
if -1 not in new_shape:
# The new shape is fully defined.
return [tensor_shape.TensorShape(new_shape)]
elif input_shape.is_fully_defined():
# We know the input shape, so we can calculate the missing
# dimension in the new_shape.
num_elements = 1
for dim in input_shape.dims:
num_elements *= dim.value
known_elements = 1
unknown_index = None
for i, dim in enumerate(new_shape):
if dim == -1:
unknown_index = i
else:
known_elements *= dim
if known_elements == 0:
raise ValueError("cannot infer the missing input size for "
"an empty tensor unless all specified "
"input sizes are non-zero")
if num_elements % known_elements != 0:
raise ValueError("input has %s elements, which isn't divisible by %d" %
(num_elements, known_elements))
new_shape[unknown_index] = num_elements / known_elements
return [tensor_shape.TensorShape(new_shape)]
else:
# We don't know the input shape, but we know n-1 of the dimensions
# in the new shape.
new_shape[new_shape.index(-1)] = None
return [tensor_shape.TensorShape(new_shape)]
@ops.RegisterShape("BroadcastGradientArgs")
def _BroadcastGradientArgsShape(op):
"""Shape function for the BroadcastGradientArgs op."""
# TODO(mrry): Implement ConstantValue for BroadcastGradientArgs?
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(1)
return [tensor_shape.vector(None), tensor_shape.vector(None)]
@ops.RegisterShape("Fill")
def _FillShape(op):
"""Shape function for the Fill op.
This op takes a vector of dimensions and a scalar, and produces a
tensor with the given dimensions.
Args:
op: A Fill Operation.
Returns:
A single-element list containing the shape of the output.
"""
dimensions_shape = op.inputs[0].get_shape().with_rank_at_most(1)
op.inputs[1].get_shape().assert_is_compatible_with(tensor_shape.scalar())
fill_dims = tensor_util.ConstantValue(op.inputs[0])
if fill_dims is None:
# Attempt to infer the rank of the output from the length of
# dimensions.
return [tensor_shape.unknown_shape(ndims=dimensions_shape.num_elements())]
else:
return [tensor_shape.TensorShape(fill_dims.tolist())]
@ops.RegisterShape("InvertPermutation")
def _InvertPermutationShape(op):
"""Shape function for the InvertPermutation op."""
return [op.inputs[0].get_shape().with_rank(1)]
@ops.RegisterShape("ListDiff")
def _ListDiffShape(op):
"""Shape function for the ListDiff op."""
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(1)
# TODO(mrry): Indicate that the length falls within an interval?
return [tensor_shape.vector(None)] * 2
@ops.RegisterShape("Pad")
def _PadShape(op):
"""Shape function for the Pad op.
This op has two inputs:
* input: A rank-N tensor.
* paddings: An N-by-2 matrix, in which the i^th row contains the
number of padding elements to add before and after `input` in the
i^th dimension.
It has one output, which has the same rank as input, and additional
elements according to the values in paddings.
Args:
op: A Pad Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the input shapes are incompatible.
"""
paddings_shape = op.inputs[1].get_shape().with_rank(2)
input_shape = op.inputs[0].get_shape()
if input_shape.ndims == 0 and paddings_shape[0].value == 1:
# TODO(irving): Remove once !kAllowLegacyScalars.
input_shape = tensor_shape.TensorShape([1])
else:
input_shape = input_shape.with_rank(paddings_shape[0].value)
paddings_shape = paddings_shape.merge_with(
tensor_shape.matrix(input_shape.ndims, 2))
paddings = tensor_util.ConstantValue(op.inputs[1])
if paddings is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
for i, dim in enumerate(input_shape.dims):
if paddings[i, 0] < 0 or paddings[i, 1] < 0:
raise ValueError("paddings must be non-negative")
output_dims.append(dim + paddings[i, 0] + paddings[i, 1])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("ReverseSequence")
def _ReverseSequenceShape(op):
"""Shape function for the ReverseSequence op.
This op has two inputs:
* input: A rank-N tensor with size B in the 0th dimension.
* seq_lens: A vector of length B.
It has one output, with the same size as input.
Args:
op: A ReverseSequence Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the input shapes are incompatible.
"""
input_shape = op.inputs[0].get_shape()
seq_lens_shape = op.inputs[1].get_shape().with_rank(1)
batch_size = input_shape[0].merge_with(seq_lens_shape[0])
input_shape = tensor_shape.TensorShape([batch_size]).concatenate(
input_shape[1:])
seq_dim = op.get_attr("seq_dim")
if seq_dim >= input_shape.ndims:
raise ValueError("seq_dim must be < input.dims() (%d vs %d)" %
(seq_dim, input_shape.ndims))
return [input_shape]
@ops.RegisterShape("Shape")
def _ShapeShape(op):
"""Shape function for the Shape op."""
input_shape = op.inputs[0].get_shape()
return [tensor_shape.vector(input_shape.ndims)]
@ops.RegisterShape("Transpose")
def _TransposeShape(op):
"""Shape function for the Transpose op.
This op takes two inputs:
* input: a rank-N tensor of arbitrary shape.
* shuffle: a length-N vector.
Its output is the rank-N tensor computed by permuting the dimensions
of input according to shuffle.
Args:
op: A Transpose op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input and shuffle are incompatible.
IndexError: If shuffle contains an index that is >= the rank of input.
"""
input_shape = op.inputs[0].get_shape()
transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(
input_shape.ndims))
transpose_vec = tensor_util.ConstantValue(op.inputs[1])
if transpose_vec is None:
return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]
else:
return [tensor_shape.TensorShape([input_shape[i]
for i in transpose_vec.tolist()])]
@ops.RegisterShape("Split")
def _SplitShape(op):
"""Shape function for the Split op."""
split_dim = tensor_util.ConstantValue(op.inputs[0])
num_split = len(op.outputs)
input_shape = op.inputs[1].get_shape()
if split_dim is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] * num_split
else:
split_dim = int(split_dim)
input_shape = input_shape.with_rank_at_least(split_dim + 1)
if not (input_shape[split_dim] % num_split).is_compatible_with(0):
raise ValueError(
"Number of ways to split should evenly divide the split "
"dimension but got split_dim %d (size = %d) and num_split %d" %
(split_dim, input_shape[split_dim].value, num_split))
prefix = input_shape[:split_dim]
size_in_split_dim = input_shape[split_dim] / num_split
suffix = input_shape[split_dim + 1:]
output_shape = prefix.concatenate(size_in_split_dim).concatenate(suffix)
return [output_shape] * num_split
@ops.RegisterShape("Tile")
def _TileShape(op):
"""Shape function for the Tile op.
This op has two inputs:
* input: A rank-N tensor.
* multiples: A length-N vector, in which the i^th element contains
the factor by which `input` will be tiled in the i^th dimension.
It has one output, which has the same rank as input, and additional
elements according to the values in multiples
Args:
op: A Tile Operation.
Returns:
A single-element list containing the shape of the output.
"""
multiples_shape = op.inputs[1].get_shape().with_rank_at_most(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape.num_elements())
multiples = tensor_util.ConstantValue(op.inputs[1])
if multiples is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
multiples = multiples.ravel()
for i, dim in enumerate(input_shape.dims):
output_dims.append(dim * multiples[i])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("TileGrad")
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank_at_most(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape.num_elements())
multiples = tensor_util.ConstantValue(op.inputs[1])
if multiples is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
for i, dim in enumerate(input_shape.dims):
output_dims.append(dim / multiples[i])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("Where")
def _WhereShape(op):
"""Shape function for the Where op."""
input_shape = op.inputs[0].get_shape()
return [tensor_shape.matrix(None, input_shape.ndims)]
@ops.RegisterShape("ZerosLike")
def _ZerosLikeShape(op):
"""Shape function for the ZerosLike op."""
return [op.inputs[0].get_shape()]
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"]
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]]
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, ops.SparseTensor):
raise TypeError("Hypothesis must be a SparseTensor")
if not isinstance(truth, ops.SparseTensor):
raise TypeError("Truth must be a SparseTensor")
return gen_array_ops._edit_distance(hypothesis.indices,
hypothesis.values,
hypothesis.shape,
truth.indices,
truth.values,
truth.shape,
normalize=normalize,
name=name)
@ops.RegisterShape("EditDistance")
def _EditDistanceShape(op):
"""Shape function for the EditDistance op."""
hypothesis_shape = tensor_util.ConstantValue(op.inputs[2])
truth_shape = tensor_util.ConstantValue(op.inputs[5])
if hypothesis_shape is not None and truth_shape is not None:
if len(hypothesis_shape) != len(truth_shape):
raise ValueError(
"Inconsistent ranks in hypothesis and truth. Saw shapes: %s and %s" %
(str(hypothesis_shape), str(truth_shape)))
return [tensor_shape.TensorShape(
[max(h, t) for h, t in zip(hypothesis_shape[:-1], truth_shape[:-1])])]
return [tensor_shape.unknown_shape()]
# The remaining ops do not change the shape of their inputs.
@ops.RegisterShape("Quantize")
@ops.RegisterShape("Dequantize")
def _QuantizeDequantizeShape(op):
unused_min_range = op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
unused_max_range = op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
return common_shapes.unchanged_shape(op)
| apache-2.0 |
myjang0507/updatesource | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
tempbottle/kosmosfs-1 | scripts.solaris/kfsshell.py | 18 | 2037 | #!/usr/bin/python
#
# $Id: kfsshell.py 24 2007-09-27 07:17:06Z sriramsrao $
#
# Copyright 2007 Kosmix Corp.
#
# This file is part of Kosmos File System (KFS).
#
# Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Script that launches KfsShell: get the location of the metaserver
# from the machines.cfg file and launch KfsShell
#
# Look for <bin-dir>/tools/KfsShell
#
# Use machines.cfg
#
import os,os.path,sys,getopt
from ConfigParser import ConfigParser
def usage():
print "%s [-f, --file <machines.cfg>] [ -b, --bin ]\n" % sys.argv[0]
if __name__ == '__main__':
(opts, args) = getopt.getopt(sys.argv[1:], "b:f:h",
["bin=", "file=", "help"])
op = ""
filename = ""
bindir = ""
for (o, a) in opts:
if o in ("-h", "--help"):
usage()
sys.exit(2)
if o in ("-f", "--file"):
filename = a
elif o in ("-b", "--bin"):
bindir = a
if not os.path.exists(filename):
print "%s : config file doesn't exist\n" % filename
sys.exit(-1)
if not os.path.exists(bindir):
print "%s : bindir doesn't exist\n" % bindir
sys.exit(-1)
config = ConfigParser()
config.readfp(open(filename, 'r'))
if not config.has_section('metaserver'):
raise config.NoSectionError, "No metaserver section"
node = config.get('metaserver', 'node')
port = config.getint('metaserver', 'baseport')
cmd = "%s/tools/KfsShell -s %s -p %d" % (bindir, node, port)
os.system(cmd)
| apache-2.0 |
marinho/geraldo | geraldo/generators/base.py | 4 | 38749 | import random, shelve, os
from decimal import Decimal
from geraldo.utils import get_attr_value, calculate_size, memoize
from geraldo.widgets import Widget, Label, SystemField
from geraldo.graphics import Graphic, RoundRect, Rect, Line, Circle, Arc,\
Ellipse, Image
from geraldo.barcodes import BarCode
from geraldo.base import GeraldoObject, ManyElements
from geraldo.cache import CACHE_BY_QUERYSET, CACHE_BY_RENDER, CACHE_DISABLED,\
make_hash_key, get_cache_backend
from geraldo.charts import BaseChart
from geraldo.exceptions import AbortEvent
class ReportPage(GeraldoObject):
rect = None
_elements = None
width = None
randomic_number = None
def __init__(self):
self._elements = []
self.randomic_number = str(random.randint(1, 999999)).zfill(6)
def get_children(self):
return self._elements
def add_element(self, el):
"""In future this method can be used to store pages on disk and reduce
memory consuming"""
self._elements.append(el)
@property
def elements(self):
"""In future this method can be used to restore pages from disk and reduce
memory consuming"""
for el in self._elements:
yield el
@memoize
def repr_for_cache_hash_key(self):
return '/'.join([el.repr_for_cache_hash_key() for el in self.elements
if hasattr(el, 'repr_for_cache_hash_key')])
class ReportGenerator(GeraldoObject):
"""A report generator is used to generate a report to a specific format."""
cache_enabled = None
first_page_number = 1
variables = None
return_pages = False
_is_first_page = True
_is_latest_page = True
_current_top_position = 0
_current_left_position = 0
_current_page_number = 1 # This variable is just used for generating, so, keep in mind it
# has't the current number while rendering
_current_object = None
_current_queryset = None
_generation_datetime = None
_highest_height = 0
# Groupping
_groups_values = None
_groups_working_values = None
_groups_changed = None
_groups_stack = None
# The rendered report has pages, each page is a ReportPage instance
_rendered_pages = None
_page_rect = None
def __init__(self, report, first_page_number=1, variables=None, return_pages=False,
pages=None, **kwargs):
"""This method should be overrided to receive others arguments"""
self.report = report
# Initializes some attributes
self._rendered_pages = pages or []
self._current_page_number = len(self._rendered_pages)
self._groups_values = {}
self._groups_working_values = {}
self._groups_changed = {}
self._groups_stack = []
self.first_page_number = first_page_number
self.variables = variables or self.variables or {}
self.return_pages = return_pages
def get_children(self):
return self._rendered_pages
def execute(self):
"""This method must be overrided to execute the report generation."""
# Initializes pages
self._is_first_page = True
def render_border(self, borders_dict, rect_dict):
"""Renders a border in the coordinates setted in the rect."""
b_all = borders_dict.get('all', None)
if b_all:
graphic = isinstance(b_all, Graphic) and b_all or Rect()
graphic.set_rect(
left=rect_dict['left'],
top=rect_dict['top'] - rect_dict['height'],
width=rect_dict['right'] - rect_dict['left'],
height=rect_dict['height'],
)
# If border is a number, it is recognized as the stroke width
if isinstance(b_all, (int, float)):
graphic.stroke_width = b_all
self._rendered_pages[-1].add_element(graphic)
b_left = borders_dict.get('left', None)
if b_left:
graphic = isinstance(b_left, Graphic) and b_left or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['top'],
right=rect_dict['left'], bottom=rect_dict['bottom']
)
# If border is a number, it is recognized as the stroke width
if isinstance(b_left, (int, float)):
graphic.stroke_width = b_left
self._rendered_pages[-1].add_element(graphic)
b_top = borders_dict.get('top', None)
if b_top:
graphic = isinstance(b_top, Graphic) and b_top or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['top'],
right=rect_dict['right'], bottom=rect_dict['top']
)
# If border is a number, it is recognized as the stroke width
if isinstance(b_top, (int, float)):
graphic.stroke_width = b_top
self._rendered_pages[-1].add_element(graphic)
b_right = borders_dict.get('right', None)
if b_right:
graphic = isinstance(b_right, Graphic) and b_right or Line()
graphic.set_rect(
left=rect_dict['right'], top=rect_dict['top'],
right=rect_dict['right'], bottom=rect_dict['bottom']
)
# If border is a number, it is recognized as the stroke width
if isinstance(b_right, (int, float)):
graphic.stroke_width = b_right
self._rendered_pages[-1].add_element(graphic)
b_bottom = borders_dict.get('bottom', None)
if b_bottom:
graphic = isinstance(b_right, Graphic) and b_right or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['bottom'],
right=rect_dict['right'], bottom=rect_dict['bottom']
)
# If border is a number, it is recognized as the stroke width
if isinstance(b_bottom, (int, float)):
graphic.stroke_width = b_bottom
self._rendered_pages[-1].add_element(graphic)
def make_band_rect(self, band, top_position, left_position):
"""Returns the right band rect on the PDF canvas"""
band_rect = {
'left': left_position, #self.report.margin_left,
'top': top_position,
'right': left_position + self.calculate_size(band.width), #self.report.page_size[0] - self.report.margin_right,
'bottom': top_position - self.calculate_size(band.height),
'height': self.calculate_size(band.height),
}
return band_rect
def make_widget_rect(self, widget, band_rect):
"""Returns the right widget rect on the PDF canvas"""
widget_rect = {
'left': band_rect['left'] + calculate_size(widget.left),
'top': band_rect['top'] - calculate_size(widget.top),
'right': band_rect['left'] + calculate_size(widget.left) + calculate_size(widget.width),
'bottom': band_rect['top'] - calculate_size(widget.top) + calculate_size(widget.height),
'height': calculate_size(widget.height),
'width': calculate_size(widget.width),
}
return widget_rect
def render_element(self, element, current_object, band, band_rect, temp_top,
top_position):
# Doesn't render not visible element
if not element.visible:
return
# Widget element
if isinstance(element, Widget):
widget = element.clone()
# Set widget colors
widget.font_color = self.report.default_font_color
# Set widget basic attributes
widget.instance = current_object
widget.generator = self
widget.report = self.report # This should be done by a metaclass in Band domain TODO
widget.band = band # This should be done by a metaclass in Band domain TODO
widget.page = self._rendered_pages[-1]
# Border rect
widget_rect = self.make_widget_rect(widget, band_rect)
if isinstance(widget, SystemField):
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top))
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.height)
elif isinstance(widget, Label):
para = self.make_paragraph(widget.text, self.make_paragraph_style(band, widget.style))
if widget.truncate_overflow:
self.keep_in_frame(
widget,
self.calculate_size(widget.width),
self.calculate_size(widget.height),
[para],
mode='truncate',
)
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top), self.calculate_size(widget.height))
else:
self.wrap_paragraph_on(para, self.calculate_size(widget.width), self.calculate_size(widget.height))
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top), self.calculate_size(para.height))
temp_height = self.calculate_size(element.top) + self.calculate_size(para.height)
else:
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.height)
# Sets element height as the highest
if temp_height > self._highest_height:
self._highest_height = temp_height
self._rendered_pages[-1].add_element(widget)
# Borders
self.render_border(widget.borders or {}, widget_rect)
# Graphic element
elif isinstance(element, Graphic):
graphic = element.clone()
# Set widget basic attributes
graphic.instance = current_object
graphic.generator = self
graphic.report = self.report # This should be done by a metaclass in Band domain TODO
graphic.band = band # This should be done by a metaclass in Band domain TODO
graphic.page = self._rendered_pages[-1]
# Set graphic colors
graphic.fill_color = graphic.fill_color or self.report.default_fill_color
graphic.stroke_color = graphic.stroke_color or self.report.default_stroke_color
if isinstance(graphic, RoundRect):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
elif isinstance(graphic, Rect):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
elif isinstance(graphic, Line):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Circle):
graphic.left_center = band_rect['left'] + self.calculate_size(graphic.left_center)
graphic.top_center = top_position - self.calculate_size(graphic.top_center)
elif isinstance(graphic, Arc):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Ellipse):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Image):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
elif isinstance(graphic, BarCode):
barcode = graphic.render()
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
self.wrap_barcode_on(barcode, graphic.width, graphic.height)
elif isinstance(element, BaseChart):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
# Sets element height as the highest
temp_height = self.calculate_size(element.top) + self.calculate_size(graphic.height)
if temp_height > self._highest_height:
self._highest_height = temp_height
self._rendered_pages[-1].add_element(graphic)
# Many elements
elif isinstance(element, ManyElements):
# Set widget basic attributes
element.instance = current_object
element.generator = self
element.report = self.report # This should be done by a metaclass in Band domain TODO
element.band = band # This should be done by a metaclass in Band domain TODO
element.page = self._rendered_pages[-1]
# Get the elements and render them
for el in element.get_elements():
self.render_element(el, current_object, band, band_rect, temp_top, top_position)
def render_band(self, band, top_position=None, left_position=None,
update_top=True, current_object=None):
"""Generate a band having the current top position or informed as its
top coordinate"""
# Calls the before_print event
try:
band.do_before_print(generator=self)
except AbortEvent:
return False
# Sets the current object
current_object = current_object or self._current_object
# Page width. This should be done in a metaclass in Report domain TODO
self._rendered_pages[-1].width = self.calculate_size(self.report.page_size[0]) -\
self.calculate_size(self.report.margin_left) - self.calculate_size(self.report.margin_right)
# Default value for band width
band.width = self.calculate_size(band.width) or self._rendered_pages[-1].width
# Coordinates
left_position = left_position or self.get_left_pos()
# Increases the top position when being an inline displayed detail band
if left_position > self.calculate_size(self.report.margin_left) and\
getattr(band, 'display_inline', False) and\
band.width < self.get_available_width():
temp_height = band.height + getattr(band, 'margin_top', 0) + getattr(band, 'margin_bottom', 0)
self.update_top_pos(decrease=self.calculate_size(temp_height))
else:
self.update_left_pos(set_position=0)
left_position = self.get_left_pos()
temp_top = top_position = top_position or self.get_top_pos()
# Calculates the band dimensions on the canvas
band_rect = self.make_band_rect(band, top_position, left_position)
# Band borders
self.render_border(band.borders, band_rect)
# Variable that stores the highest height at all elements
self._highest_height = 0
# Loop at band widgets
for element in band.elements:
self.render_element(element, current_object, band, band_rect, temp_top,
top_position)
# Updates top position
if update_top:
if band.auto_expand_height:
band_height = self._highest_height
else:
band_height = self.calculate_size(band.height)
band_height += self.calculate_size(getattr(band, 'margin_top', 0))
band_height += self.calculate_size(getattr(band, 'margin_bottom', 0))
self.update_top_pos(band_height)
# Updates left position
if getattr(band, 'display_inline', False):
self.update_left_pos(band.width + self.calculate_size(getattr(band, 'margin_right', 0)))
else:
self.update_left_pos(set_position=0)
# Child bands
for child_band in band.child_bands or []: # TODO This "or []" here is a quickfix
# Doesn't generate if it is not visible
if not child_band.visible:
continue
self.in_tests = True # XXX
self.force_blank_page_by_height(self.calculate_size(child_band.height))
self.render_band(child_band)
# Calls the before_print event
band.do_after_print(generator=self)
return True
def force_blank_page_by_height(self, height):
"""Check if the height is in client available report height and
makes a new page if necessary"""
if Decimal(str(self.get_available_height())) < Decimal(str(height)):
self.start_new_page()
return True
return False
def append_new_page(self):
self._rendered_pages.append(ReportPage())
def start_new_page(self, with_header=True):
"""Starts a new blank page"""
# Ends the current page
self._current_top_position = 0
# Starts a new one
self.append_new_page()
self.report.do_on_new_page(
page=self._rendered_pages[-1],
page_number=len(self._rendered_pages) + self.first_page_number - 1,
generator=self,
)
if with_header:
self.render_page_header()
# Page borders
if self.report.borders:
if not self._page_rect:
self._page_rect = self.report.get_page_rect()
self._page_rect['top'] = self.calculate_size(self.report.page_size[1]) - self._page_rect['top']
self._page_rect['bottom'] = self.calculate_size(self.report.page_size[1]) - self._page_rect['bottom']
self.render_border(self.report.borders, self._page_rect)
# Page footer
self.render_page_footer()
def render_begin(self):
"""Renders the report begin band if it exists"""
if not self.report.band_begin:
return
# Doesn't generate this band if it is not visible
if not self.report.band_begin.visible:
return
# Call method that print the band area and its widgets
self.render_band(self.report.band_begin)
def render_summary(self):
"""Generate the report summary band if it exists"""
if not self.report.band_summary:
return
# Doesn't generate this band if it is not visible
if not self.report.band_summary.visible:
return
# Clears groups stack
self._groups_stack = []
# Check to force new page if there is no available space
self.force_blank_page_by_height(self.calculate_size(self.report.band_summary.height))
# Call method that print the band area and its widgets
self.render_band(self.report.band_summary)
def render_page_header(self):
"""Generate the report page header band if it exists"""
if not self.report.band_page_header:
return
# Doesn't generate this band if it is not visible
if not self.report.band_page_header.visible:
return
# Call method that print the band area and its widgets
self.render_band(
self.report.band_page_header,
top_position=self.calculate_size(self.report.margin_top),
update_top=False,
)
def render_page_footer(self):
"""Generate the report page footer band if it exists"""
if not self.report.band_page_footer:
return
# Doesn't generate this band if it is not visible
if not self.report.band_page_footer.visible:
return
# Call method that print the band area and its widgets
self.render_band(
self.report.band_page_footer,
top_position=self.calculate_size(self.report.page_size[1]) -\
self.calculate_size(self.report.margin_bottom) -\
self.calculate_size(self.report.band_page_footer.height),
update_top=False,
)
def render_end_current_page(self):
"""Closes the current page, using page breaker constant. Everything done after
this will draw into a new page. Before this, using the generate_page_footer
method to draw the footer"""
self.render_page_footer()
if self._is_latest_page:
self.render_summary()
self._current_page_number += 1
self._is_first_page = False
self.update_top_pos(set_position=0) # <---- update top position
def render_bands(self):
"""Loops into the objects list to create the report pages until the end"""
# Preparing local auxiliar variables
self._current_page_number = self.report.first_page_number
self._current_object_index = 0
objects = self.report.get_objects_list()
# just an alias to make it shorter
d_band = self.report.band_detail
# Empty report
if self.report.print_if_empty and not objects:
self.start_new_page()
self.render_begin()
self.render_end_current_page()
# Loop for pages
while self._current_object_index < len(objects):
# Starts a new page and generates the page header band
self.start_new_page()
first_object_on_page = True
# Generate the report begin band
if self._is_first_page:
self.render_begin()
# Does generate objects if there is no details band
if not d_band:
self._current_object_index = len(objects)
# Loop for objects to go into grid on current page
while self._current_object_index < len(objects):
# Get current object from list
self._current_object = objects[self._current_object_index]
# Renders group bands for changed values
self.calc_changed_groups(first_object_on_page)
if not first_object_on_page:
# The current_object of the groups' footers is the previous
# object, so we have access, in groups' footers, to the last
# object before the group breaking
self._current_object = objects[self._current_object_index-1]
self.render_groups_footers()
self._current_object = objects[self._current_object_index]
self.render_groups_headers(first_object_on_page)
# Generate this band only if it is visible
# - "done True" means band was rendered ok
# - "done False" means band rendering was aborted
# - "done None" means band didn't render, but wasn't aborted
if d_band.visible:
done = self.render_band(d_band)
else:
done = None
# Renders subreports
if done != False:
self.render_subreports()
# Next object
self._current_object_index += 1
first_object_on_page = False
# Break this if this page doesn't suppport nothing more...
# ... if there is no more available height
if done != False:
if self.get_available_height() < self.calculate_size(d_band.height):
# right margin is not considered to calculate the necessary space
d_width = self.calculate_size(d_band.width) + self.calculate_size(getattr(d_band, 'margin_left', 0))
# ... and this is not an inline displayed detail band or there is no width available
if not getattr(d_band, 'display_inline', False) or self.get_available_width() < d_width:
break
# ... or this band forces a new page and this is not the last object in objects list
elif d_band.force_new_page and self._current_object_index < len(objects):
break
# Sets this is the latest page or not
self._is_latest_page = self._current_object_index >= len(objects)
# Renders the finish group footer bands
if self._is_latest_page:
self.calc_changed_groups(False)
self.render_groups_footers(force=True)
# Ends the current page, printing footer and summary and necessary
self.render_end_current_page()
# Breaks if this is the latest item
if self._is_latest_page:
break
# Increment page number
self._current_page_number += 1
def calculate_size(self, size):
"""Uses the function 'calculate_size' to calculate a size"""
return calculate_size(size)
def get_left_pos(self):
"""Returns the left position of the drawer. Is useful on inline displayed detail bands"""
return self.calculate_size(self.report.margin_left) + self._current_left_position
def get_available_width(self):
return self.calculate_size(self.report.page_size[0]) - self.calculate_size(self.report.margin_left) -\
self.calculate_size(self.report.margin_right) - self._current_left_position
def calculate_top(self, *args):
return sum(args)
def get_top_pos(self):
"""We use this to use this to get the current top position,
considering also the top margin."""
ret = self.calculate_size(self.report.margin_top) + self._current_top_position
if self.report.band_page_header:
ret += self.calculate_size(self.report.band_page_header.height)
return ret
def get_available_height(self):
"""Returns the available client height area from the current top position
until the end of page, considering the bottom margin."""
ret = self.calculate_size(self.report.page_size[1]) - self.calculate_size(self.report.margin_bottom) -\
self.calculate_size(self.report.margin_top) - self._current_top_position
if self.report.band_page_header:
ret -= self.calculate_size(self.report.band_page_header.height)
if self.report.band_page_footer:
ret -= self.calculate_size(self.report.band_page_footer.height)
return ret
def update_top_pos(self, increase=0, decrease=0, set_position=None):
"""Updates the current top position controller, increasing (by default),
decreasing or setting it with a new value."""
if set_position is not None:
self._current_top_position = set_position
else:
self._current_top_position += increase
self._current_top_position -= decrease
return self._current_top_position
def update_left_pos(self, increase=0, decrease=0, set_position=None):
"""Updates the current left position controller, increasing (by default),
decreasing or setting it with a new value."""
if set_position is not None:
self._current_left_position = set_position
else:
self._current_left_position += increase
self._current_left_position -= decrease
return self._current_left_position
def get_page_count(self):
"""Calculate and returns the page count for this report. The challenge
here is do this calculate before to generate the pages."""
return len(self._rendered_pages)
def make_paragraph(self, text, style=None):
"""Uses the Paragraph class to return a new paragraph object"""
raise Exception('Not implemented')
def wrap_paragraph_on(self, paragraph, width, height):
"""Wraps the paragraph on the height/width informed"""
raise Exception('Not implemented')
def wrap_barcode_on(self, barcode, width, height):
"""Wraps the barcode on the height/width informed"""
raise Exception('Not implemented')
# Stylizing
def set_fill_color(self, color):
"""Sets the current fill on canvas. Used for fonts and shape fills"""
pass
def set_stroke_color(self, color):
"""Sets the current stroke on canvas"""
pass
def set_stroke_width(self, width):
"""Sets the stroke/line width for shapes"""
pass
# Groups topic
def calc_changed_groups(self, force_no_changed=False):
"""Defines which groups has been changed their driver values to be
used to render group bands"""
changed = force_no_changed
# Stores the previous group values
self._groups_working_values = self._groups_values.copy()
# Loops on groups until find the first changed, then all under it are considered
# changed also
for group in self.report.groups:
# Gets the current value to compare with the old one
current_value = get_attr_value(self._current_object, group.attribute_name)
# Set changed as True if if wasn't and there is a change
changed = changed or current_value != self._groups_values.get(group, None)
# Stores new values
self._groups_changed[group] = changed
self._groups_values[group] = current_value
# Appends to the stack
if changed:
self._groups_stack.append(group)
def render_groups_headers(self, first_object_on_page=False):
"""Renders the report headers using 'changed' definition calculated by
'calc_changed_groups'"""
# Update working values for groups
self._groups_working_values = self._groups_values
# Loops on groups to render changed ones
new_page = False
for group in self.report.groups:
if self._groups_changed.get(group, None):
# If there is no space for group header band, forces a new page
if group.band_header and group.band_header.visible:
new_page = self.force_blank_page_by_height(self.calculate_size(group.band_header.height))
# Forces a new page if this group is defined to do it
if not new_page and group.force_new_page and self._current_object_index > 0 and not first_object_on_page:
self.render_page_footer()
self.start_new_page()
# Renders the group header band
if group.band_header and group.band_header.visible:
self.render_band(group.band_header)
def render_groups_footers(self, force=False):
"""Renders the report footers using previous 'changed' definition calculated by
'calc_changed_groups'"""
# Loops on groups to render changed ones
for group in reversed(self.report.groups):
if force or ( self._groups_changed.get(group, None) and\
self._groups_stack and\
self._groups_stack[-1] == group ):
if group.band_footer and group.band_footer.visible:
self.force_blank_page_by_height(self.calculate_size(group.band_footer.height))
self.render_band(group.band_footer)
if self._groups_stack:
self._groups_working_values.pop(self._groups_stack[-1])
self._groups_stack.pop()
def get_current_queryset(self):
"""Returns the current queryset. This solves a problem with subreports
footers and headers, and solves also flexibility and customization issues."""
# Customized and SubReports
if self._current_queryset is not None:
return self._current_queryset
# Groups
elif self._groups_stack:
return self.get_objects_in_group()
# Defaul detail driver queryset
return self.report.queryset
def get_objects_in_group(self):
"""Returns objects filtered in the current group or all if there is no
group"""
filter_dict = dict([(group.attribute_name, value) for group, value in self._groups_working_values.items()])
def filter_object(obj):
for k,v in filter_dict.items():
if get_attr_value(obj, k) != v:
return False
return obj
return filter(filter_object, self.report.queryset)
# SubReports
def render_subreports(self):
"""Renders subreports bands for the current object in, usings its
own queryset.
For a while just the detail band is rendered. Maybe in future we
change this to accept header and footer."""
def force_new_page(height):
# Forces new page if there is no available space
if self.get_available_height() < self.calculate_size(height):
self.render_page_footer()
self.start_new_page()
for subreport in self.report.subreports:
# Subreports must have detail band
if not subreport.band_detail or not subreport.visible:
continue
# Sets the parent object and automatically clear the queryset
# in memory
subreport.parent_object = self._current_object
# Sets the temporary currenty queryset
self._current_queryset = subreport.get_objects_list()
# Loops objects
for num, obj in enumerate(self._current_queryset):
# Renders the header band
if num == 0 and subreport.band_header:
# Forces new page if there is no available space
force_new_page(subreport.band_header.height)
# Renders the header band
if subreport.band_header.visible:
self.render_band(subreport.band_header)
# Forces new page if there is no available space
force_new_page(subreport.band_detail.height)
# Renders the detail band
if subreport.band_detail.visible:
self.render_band(subreport.band_detail, current_object=obj)
# Renders the footer band
if subreport.band_footer:
# Forces new page if there is no available space
force_new_page(subreport.band_footer.height)
# Renders the header band
if subreport.band_footer.visible:
self.render_band(subreport.band_footer)
# Sets back the default currenty queryset
self._current_queryset = None
def make_paragraph_style(self, band, style=None):
"""Merge report default_style + band default_style + widget style"""
raise Exception('Not implemented')
def keep_in_frame(self, widget, width, height, paragraphs, mode):
raise Exception('Not implemented')
def fetch_from_cache(self):
if self.report.cache_status == CACHE_BY_QUERYSET:
hash_key = self.get_hash_key(self.report.queryset)
elif self.report.cache_status == CACHE_BY_RENDER:
hash_key = self.get_hash_key(self._rendered_pages)
cache = self.get_cache_backend()
buffer = cache.get(hash_key)
if buffer:
# Write to file stream
if hasattr(self.filename, 'write') and callable(self.filename.write):
self.filename.write(buffer)
return True
# Write to file path
elif isinstance(self.filename, basestring):
fp = file(self.filename, 'w')
fp.write(buffer)
fp.close()
return True
def cached_before_render(self):
"""Check and loads the generated report from caching system before call method
'render_bands'"""
if not self.cache_enabled or self.report.cache_status != CACHE_BY_QUERYSET:
return False
return self.fetch_from_cache()
def cached_before_generate(self):
"""Check and loads the generated report from caching system before call method
'generate_pages'"""
if not self.cache_enabled or self.report.cache_status != CACHE_BY_RENDER:
return False
return self.fetch_from_cache()
def store_in_cache(self, content):
"""Sends the canvas content to write in the cache backend"""
if not self.cache_enabled or self.report.cache_status == CACHE_DISABLED:
return
if self.report.cache_status == CACHE_BY_QUERYSET:
hash_key = self.get_hash_key(self.report.queryset)
elif self.report.cache_status == CACHE_BY_RENDER:
hash_key = self.get_hash_key(self._rendered_pages)
cache = self.get_cache_backend()
return cache.set(hash_key, content)
def get_hash_key(self, objects):
"""Calculates the hash_key, appending/prepending something if necessary"""
return make_hash_key(self.report, objects)
def get_cache_backend(self):
return get_cache_backend(
self.report.cache_backend,
cache_file_root=self.report.cache_file_root,
)
| lgpl-3.0 |
hainm/open-forcefield-group | forcebalance/XMLConvert/sidechain/99sc-cnv.py | 2 | 22220 | #!/usr/bin/env python
# The purpose of this script is to port LP's optimized intramolecular parameters for AMBER99SB
# into the OpenMM XML format.
# The strategy is to:
# 1) Read the OpenMM XML file
# 2) Read the GROMACS .itp file to figure out the interactions defined using atom classes
# 3) Write the new OpenMM XML file
from simtk.openmm.app import *
import itertools
from copy import deepcopy
import lxml.etree as ET
import numpy as np
import os, sys, re
import networkx as nx
from collections import defaultdict, OrderedDict
# This version incorporates residue-specific side chain torsions.
# What a painful script to write!!
# Parse the original AMBER99SB XML file.
A99SB = ET.parse('/home/leeping/src/OpenMM/wrappers/python/simtk/openmm/app/data/amber99sb.xml')
# Use GromacsTopFile to read the optimized. ITP file.
ITP = GromacsTopFile('/home/leeping/projects/VSP27-Protein/Dihedrals/Optimize/AMBER99SC/optimize.r1.mmc3/a99sc-v2.itp')
# Read the residue definitions with specific dihedral interactions.
RTP = '/home/leeping/opt/gromacs/share/gromacs/top/a99sc.ff/aminoacids.rtp'
# Parsed [ bondtypes ], [ angletypes ] and [ dihedraltypes ] sections
BT = ITP._bondTypes
AT = ITP._angleTypes
DT = ITP._dihedralTypes
root = A99SB.getroot()
# Amino Acid Dihedral Quartets (by New Atom Classes) to Dihedral Parameters
AA_DSC = OrderedDict()
# Amino Acid Atom Names to OpenMM Atom Types
AA_OAt = OrderedDict()
# Gromacs Atom Names to Atom Class
GAnAt = {}
# OpenMM Atom Types to Atom Class
OAtAc = {}
# Manually constructed atom name replacements.
Atom_Rename = {'ILE':{'CD':'CD1'}}
for i,j in Atom_Rename.items():
Atom_Rename['C'+i] = j
Atom_Rename['N'+i] = j
# Mapping of amino acid atom names to new atom classes. Mostly new
# atom classes for beta carbons but a few new gamma carbons are
# defined. They are named using the number "6" (for carbon) and the
# one-letter amino acid code. A few exceptions in the case of alternate
# protonation states.
NewAC = {"SER":{"CB":"6S"},
"THR":{"CB":"6T", "CG2":"6t"},
"LEU":{"CB":"6L"},
"VAL":{"CB":"6V"},
"ILE":{"CB":"6I", "CG2":"6i"},
"ASN":{"CB":"6N"},
"GLN":{"CB":"6Q", "CG":"6q"},
"ARG":{"CB":"6R"},
"HID":{"CB":"6H"},
"HIE":{"CB":"6h"},
"HIP":{"CB":"6+"},
"TRP":{"CB":"6W"},
"TYR":{"CB":"6Y"},
"PHE":{"CB":"6F"},
"GLU":{"CB":"6E", "CG":"6e"},
"ASP":{"CB":"6D"},
"LYS":{"CB":"6K"},
"LYN":{"CB":"6k"},
"PRO":{"CB":"6P"},
"CYS":{"CB":"6C"},
"CYM":{"CB":"6c"},
"MET":{"CB":"6M"},
"ASH":{"CB":"6d"},
"GLH":{"CB":"6J", "CG":"6j"}}
for i in NewAC.keys():
NewAC['C'+i] = NewAC[i]
NewAC['N'+i] = NewAC[i]
# Obtain a canonicalized ordering of dihedral atom classes.
def get_ijkl(aci, acj, ack, acl):
if ack > acj:
acijkl = tuple([aci, acj, ack, acl])
elif ack < acj:
acijkl = tuple([acl, ack, acj, aci])
else:
if acl >= aci:
acijkl = tuple([aci, acj, ack, acl])
else:
acijkl = tuple([acl, ack, acj, aci])
return acijkl
# Read amino acid definitions;
# This is so we can figure out which atom class quartets receive
# the sidechain-specific dihedral parameters.
def ParseRTP(rtp):
for line in open(rtp).readlines():
line = line.split(';')[0].strip()
s = line.split()
if len(s) == 0: continue
if re.match('\[ .* \]$', line):
section = s[1]
if section not in ['bondedtypes', 'atoms', 'bonds', 'dihedrals', 'impropers']:
AA = section
GAnAt[AA] = dict()
elif section == 'atoms':
# The GROMACS atom types are the same as OpenMM atom classes
# and they should serve as the default atom class when we
# haven't defined a new one
GAnAt[AA][s[0]] = s[1]
elif section == 'dihedrals':
dihan = tuple(s[:4])
# Obtain the quartet of new atom classes corresponding to this particular dihedral interaction
aci, acj, ack, acl = [NewAC.get(AA, {}).get(an,GAnAt[AA][an]) for an in dihan]
acijkl = get_ijkl(aci, acj, ack, acl)
# Insert the dihedral parameters into AA_DSC (keyed by the quartet of new atom classes)
if acijkl not in AA_DSC:
AA_DSC[acijkl] = []
if ITP._defines[s[4]].split() not in AA_DSC[acijkl]:
AA_DSC[acijkl].append(ITP._defines[s[4]].split())
ParseRTP(RTP)
def almostequal(i, j, tol):
return np.abs(i-j) < tol
def get_periodn(elem, period):
nprd = 0
for i, j in elem.items():
if 'periodicity' in i:
nprd += 1
if j == period:
return i[-1]
return "%i" % (nprd + 1)
NewAtAc = deepcopy(OAtAc)
# Amino Acid Graphs
AA_Gphs = OrderedDict()
# A dictionary of atom classes to copy FROM -> [TO]
FrcCopy = defaultdict(list)
# A list of bond types
NeedBT = set()
NeedAT = set()
NeedDT = set()
NeedIT = set()
HaveBT = set()
HaveAT = set()
HaveDT = set()
HaveIT = set()
for force in root:
# Check for atom classes that are missing from the ITP file
if force.tag == 'AtomTypes':
for elem in force:
OAtAc[elem.attrib['name']] = elem.attrib['class']
if elem.attrib['class'] not in ITP._atomTypes:
print "Atom Type", elem.attrib['name'], "Class", elem.attrib['class'], "not present in ITP file"
# Residue processing
if force.tag == 'Residues':
for elem in force:
res = elem.attrib['name']
# Initialize NetworkX graph
AA_Gphs[res] = nx.Graph()
G = AA_Gphs[res]
# List of (new) atom classes for each atom in the residue
resAc = []
# Number of nodes in the graph
nn = 0
for subelem in elem:
# Atom tag: Create a NetworkX node
if subelem.tag == 'Atom':
G.add_node(nn)
G.node[nn]['name'] = subelem.attrib['name']
G.node[nn]['type'] = subelem.attrib['type']
G.node[nn]['class'] = OAtAc[subelem.attrib['type']]
nn += 1
# FrcCopy is a dictionary denoting which interactions will get copied
# due to atom class duplication (e.g. (CT, CT) -> (CT, 6S))
if res in NewAC and subelem.attrib['name'] in NewAC[res]:
FrcCopy[OAtAc[subelem.attrib['type']]].append(NewAC[res][subelem.attrib['name']])
# Create OpenMM atom type dictionary.
if res not in AA_OAt:
AA_OAt[res] = OrderedDict()
AA_OAt[res][subelem.attrib['name']] = subelem.attrib['type']
# resAc will be used to create pairs, triplets and quartets of atom types in this residue
# for constructing the NeedBT, NeedAT, NeedDT and NeedIT dictionaries
resAc.append(NewAC.get(res, {}).get(subelem.attrib['name'], OAtAc[subelem.attrib['type']]))
# Record of new atom type -> atom class mapping; used to rewrite AtomTypes section.
NewAtAc[subelem.attrib['type']] = NewAC.get(res, {}).get(subelem.attrib['name'], OAtAc[subelem.attrib['type']])
if subelem.tag == 'Bond':
# Add edges in the graph.
G.add_edge(int(subelem.attrib['from']), int(subelem.attrib['to']))
if subelem.tag == 'ExternalBond':
# Add edges in the graph for bonds to the next residue.
ifrom = int(subelem.attrib['from'])
if G.node[ifrom]['class'] == 'C':
G.add_node(nn)
G.node[nn]['name'] = 'N'
G.node[nn]['type'] = '706'
G.node[nn]['class'] = 'N'
G.add_edge(ifrom, nn)
resAc.append('N')
nn += 1
elif G.node[ifrom]['class'] == 'N':
G.add_node(nn)
G.node[nn]['name'] = 'C'
G.node[nn]['type'] = '711'
G.node[nn]['class'] = 'C'
G.add_edge(ifrom, nn)
resAc.append('C')
nn += 1
# For now, don't treat nucleic acids and disulfide bonds.
elif G.node[ifrom]['name'] in ['SG', 'P', "O3'"]:
pass
else:
print G.node[ifrom]
raise RuntimeError('Spoo!')
# Build the NeedBT, NeedAT, NeedDT and NeedIT sets.
# These are the atom class combinations for the bond / angle / dihedral
# interactions that actually occur in the residues.
# If we didn't have these as a filter, then one atom class that gets expanded to 30 (i.e. CT -> 6T, 6L, 6V, etc...)
# would suddenly blow up to a humongous number of dihedral types.
for edge in G.edges():
# Build a list of bond types in this residue.
NeedBT.add(tuple(sorted([resAc[edge[0]], resAc[edge[1]]])))
for a2 in list(G.nodes()):
# List of angle types in this residue.
# Find all bonded neighbors to this atom
friends = sorted(list(nx.all_neighbors(G, a2)))
if len(friends) < 2: continue
# Double loop over bonded neighbors
for i, a1 in enumerate(friends):
for a3 in friends[i+1:]:
c1, c2, c3 = (resAc[k] for k in [a1, a2, a3])
if c3 > c1:
NeedAT.add(tuple((c1, c2, c3)))
else:
NeedAT.add(tuple((c3, c2, c1)))
for edge in G.edges():
# List of proper dihedral types.
a2 = edge[0]
a3 = edge[1]
for a1 in sorted(list(nx.all_neighbors(G, a2))):
if a1 != a3:
for a4 in sorted(list(nx.all_neighbors(G, a3))):
if a4 != a2:
c1, c2, c3, c4 = (resAc[i] for i in [a1, a2, a3, a4])
ijkl = get_ijkl(c1, c2, c3, c4)
NeedDT.add((ijkl[0], ijkl[1], ijkl[2], ijkl[3]))
NeedDT.add((ijkl[3], ijkl[2], ijkl[1], ijkl[0]))
NeedDT.add(("", ijkl[1], ijkl[2], ""))
NeedDT.add(("", ijkl[2], ijkl[1], ""))
for a1 in list(G.nodes()):
# List of improper dihedral types.
# Find all bonded neighbors to this atom
friends = sorted(list(nx.all_neighbors(G, a1)))
if len(friends) < 3: continue
for a2, a3, a4 in list(itertools.permutations(friends, 3)):
c1, c2, c3, c4 = (resAc[i] for i in [a1, a2, a3, a4])
# Include wildcards.
NeedIT.add((c1, c2, c3, c4))
NeedIT.add((c1, "", c3, c4))
NeedIT.add((c1, "", "", c4))
# FrcCopy is responsible for taking existing atom classes and
# propagating the interaction types out to the copied atom
# classes.
for i in FrcCopy.keys():
FrcCopy[i].append(i)
# Harmonic bond parameters
if force.tag == 'HarmonicBondForce':
# List of new interaction types if needed
newfrc = []
for elem in force:
att = elem.attrib
BC = (att['class1'], att['class2'])
BCr = (att['class2'], att['class1'])
# Look up parameters from the parsed GROMACS ITP file
if BC in BT.keys():
prm = BT[BC]
elif BCr in BT.keys():
prm = BT[BCr]
else:
print BC, "has no parameters from the ITP file"
prm = None
# Set parameters if they differ from the OpenMM values
if prm != None:
if not almostequal(float(elem.attrib['length']), float(prm[3]), 1e-8):
elem.attrib['length'] = '%.8f' % float(prm[3])
if not almostequal(float(elem.attrib['k']), float(prm[4]), 1e-8):
elem.attrib['k'] = '%.8f' % float(prm[4])
acij = tuple(sorted(BC))
# Add interaction type to HaveBT to avoid double counting
HaveBT.add(acij)
# Copy harmonic bond parameters to "copied" interaction types.
for aci, acj in itertools.product(*[FrcCopy.get(BC[0], [BC[0]]), FrcCopy.get(BC[1], [BC[1]])]):
acij = tuple(sorted([aci, acj]))
if acij in NeedBT and acij not in HaveBT:
elem1 = deepcopy(elem)
elem1.attrib['class1'] = acij[0]
elem1.attrib['class2'] = acij[1]
newfrc.append(elem1)
HaveBT.add(acij)
for elem in newfrc:
force.append(elem)
# Harmonic angle parameters. Same as for harmonic bonds.
if force.tag == 'HarmonicAngleForce':
newfrc = []
for elem in force:
att = elem.attrib
AC = (att['class1'], att['class2'], att['class3'])
ACr = (att['class3'], att['class2'], att['class1'])
if AC in AT.keys():
prm = AT[AC]
elif ACr in AT.keys():
prm = AT[ACr]
else:
print AC, "has no parameters from the ITP file"
prm = None
if prm != None:
gang = float(prm[4]) * np.pi / 180
oang = float(elem.attrib['angle'])
if not almostequal(gang, oang, 1e-8):
elem.attrib['angle'] = '%.8f' % gang
if not almostequal(float(prm[5]), float(elem.attrib['k']), 1e-8):
elem.attrib['k'] = '%.8f' % float(prm[5])
if AC[2] >= AC[0]:
acijk = tuple(AC)
else:
acijk = tuple(ACr)
HaveAT.add(acijk)
# Duplicate harmonic angle parameters for new atom classes.
for aci, acj, ack in itertools.product(*[FrcCopy.get(AC[0], [AC[0]]), FrcCopy.get(AC[1], [AC[1]]), FrcCopy.get(AC[2], [AC[2]])]):
if ack >= aci:
acijk = tuple([aci, acj, ack])
else:
acijk = tuple([ack, acj, aci])
if acijk in NeedAT and acijk not in HaveAT:
elem1 = deepcopy(elem)
elem1.attrib['class1'] = acijk[0]
elem1.attrib['class2'] = acijk[1]
elem1.attrib['class3'] = acijk[2]
newfrc.append(elem1)
HaveAT.add(acijk)
for elem in newfrc:
force.append(elem)
# Periodic torsion parameters. These are a bit of a pain.
if force.tag == 'PeriodicTorsionForce':
newfrc = []
for elem in force:
att = elem.attrib
# Since I got confused about the orderings, I add both orderings to HaveDT
if elem.tag == 'Proper':
DC = (att['class1'], att['class2'], att['class3'], att['class4'], '9')
DCr = (att['class4'], att['class3'], att['class2'], att['class1'], '9')
HaveDT.add((att['class1'], att['class2'], att['class3'], att['class4']))
HaveDT.add((att['class4'], att['class3'], att['class2'], att['class1']))
elif elem.tag == 'Improper':
DC = (att['class2'], att['class3'], att['class1'], att['class4'], '4')
DCr = (att['class2'], att['class3'], att['class4'], att['class1'], '4')
HaveIT.add((att['class1'], att['class2'], att['class3'], att['class4']))
# Look up parameters from the Gromacs ITP file
DC = tuple('X' if i == '' else i for i in DC)
DCr = tuple('X' if i == '' else i for i in DCr)
if DC in DT.keys():
prms = DT[DC]
elif DCr in DT.keys():
prms = DT[DCr]
else:
print DC, "has no parameters from the ITP file"
prms = None
# Edit parameters in-place for the existing interaction type
if prms != None:
for prm in prms:
prd = prm[7]
prdn = get_periodn(elem, prd)
if ('periodicity' + prdn) not in elem.attrib:
elem.attrib['periodicity' + prdn] = prd
elem.attrib['phase' + prdn] = '%.8f' % (float(prm[5]) * np.pi / 180)
elem.attrib['k' + prdn] = '%.8f' % (float(prm[6]))
else:
if not almostequal(float(prm[5]) * np.pi / 180, float(elem.attrib['phase' + prdn]), 1e-8):
elem.attrib['phase' + prdn] = '%.8f' % (float(prm[5]) * np.pi / 180)
if not almostequal(float(prm[6]), float(elem.attrib['k' + prdn]), 1e-8):
elem.attrib['k' + prdn] = '%.8f' % (float(prm[6]))
# Propagate interaction type to "copied" types
if elem.tag == 'Improper':
for dci, dcj, dck, dcl in itertools.product(*[FrcCopy.get(att['class1'], [att['class1']]),
FrcCopy.get(att['class2'], [att['class2']]),
FrcCopy.get(att['class3'], [att['class3']]),
FrcCopy.get(att['class4'], [att['class4']])]):
ijkl = (dci, dcj, dck, dcl)
if ijkl in NeedIT and ijkl not in HaveIT:
elem1 = deepcopy(elem)
elem1.attrib['class1'] = dci
elem1.attrib['class2'] = dcj
elem1.attrib['class3'] = dck
elem1.attrib['class4'] = dcl
newfrc.append(elem1)
HaveIT.add(ijkl)
elif elem.tag == 'Proper':
for dci, dcj, dck, dcl in itertools.product(*[FrcCopy.get(att['class1'], [att['class1']]),
FrcCopy.get(att['class2'], [att['class2']]),
FrcCopy.get(att['class3'], [att['class3']]),
FrcCopy.get(att['class4'], [att['class4']])]):
ijkl = (dci, dcj, dck, dcl)
lkji = ijkl[::-1]
if (ijkl in NeedDT or lkji in NeedDT) and ijkl not in HaveDT:
elem1 = deepcopy(elem)
elem1.attrib['class1'] = dci
elem1.attrib['class2'] = dcj
elem1.attrib['class3'] = dck
elem1.attrib['class4'] = dcl
newfrc.append(elem1)
HaveDT.add(ijkl)
HaveDT.add(lkji)
for elem in newfrc:
force.append(elem)
# Finally get side chain parameters from the ITP file.
newfrc = []
# Needed interaction types are pulled from the atom class quartets constructed in ParseRTP
for ijkl in NeedDT:
# Look up the interaction type in the ones from the ITP file
if ijkl in AA_DSC:
print ijkl, "sidechain parameters:",
prm = AA_DSC[ijkl]
replace = False
# Loop through the existing interaction types in the XML file. If already exists,
# then replace the parameters.
for elem in force:
if (ijkl == (elem.attrib['class1'], elem.attrib['class2'], elem.attrib['class3'], elem.attrib['class4']) or
ijkl == (elem.attrib['class4'], elem.attrib['class3'], elem.attrib['class2'], elem.attrib['class1'])):
print "replacing existing"
replace = True
for iixn, ixn in enumerate(prm):
elem.attrib["periodicity%i" % (iixn + 1)] = ixn[2]
elem.attrib["phase%i" % (iixn + 1)] = '%.10f' % (float(ixn[0]) * np.pi / 180)
elem.attrib["k%i" % (iixn + 1)] = ixn[1]
break
# If the interaction type doesn't exist in the XML file, create it anew.
if not replace:
print "creating new"
elem1 = ET.Element("Proper")
elem1.attrib['class1'] = ijkl[0]
elem1.attrib['class2'] = ijkl[1]
elem1.attrib['class3'] = ijkl[2]
elem1.attrib['class4'] = ijkl[3]
for iixn, ixn in enumerate(prm):
elem1.attrib["periodicity%i" % (iixn + 1)] = ixn[2]
elem1.attrib["phase%i" % (iixn + 1)] = '%.10f' % (float(ixn[0]) * np.pi / 180)
elem1.attrib["k%i" % (iixn + 1)] = ixn[1]
elem1.tail = '\n '
newfrc.append(elem1)
for elem in newfrc:
force.append(elem)
# Finally replace the atom classes in the AtomTypes section.
for force in root:
if force.tag == 'AtomTypes':
for elem in force:
if OAtAc[elem.attrib['name']] != NewAtAc[elem.attrib['name']]:
elem.attrib['class'] = NewAtAc[elem.attrib['name']]
# Write the output. (Whew!)
with open('new.xml', 'wb') as f:
A99SB.write(f)
| gpl-2.0 |
waseem18/oh-mainline | vendor/packages/Pygments/pygments/lexers/parsers.py | 363 | 25835 | # -*- coding: utf-8 -*-
"""
pygments.lexers.parsers
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for parser generators.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, \
include, bygroups, using
from pygments.token import Punctuation, Other, Text, Comment, Operator, \
Keyword, Name, String, Number, Whitespace
from pygments.lexers.compiled import JavaLexer, CLexer, CppLexer, \
ObjectiveCLexer, DLexer
from pygments.lexers.dotnet import CSharpLexer
from pygments.lexers.agile import RubyLexer, PythonLexer, PerlLexer
from pygments.lexers.web import ActionScriptLexer
__all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer',
'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer',
'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer',
'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer',
#'AntlrCLexer',
'AntlrCSharpLexer', 'AntlrObjectiveCLexer',
'AntlrJavaLexer', "AntlrActionScriptLexer",
'TreetopLexer']
class RagelLexer(RegexLexer):
"""
A pure `Ragel <http://www.complang.org/ragel/>`_ lexer. Use this for
fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead
(or one of the language-specific subclasses).
*New in Pygments 1.1.*
"""
name = 'Ragel'
aliases = ['ragel']
filenames = []
tokens = {
'whitespace': [
(r'\s+', Whitespace)
],
'comments': [
(r'\#.*$', Comment),
],
'keywords': [
(r'(access|action|alphtype)\b', Keyword),
(r'(getkey|write|machine|include)\b', Keyword),
(r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword),
(r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword)
],
'numbers': [
(r'0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
'literals': [
(r'"(\\\\|\\"|[^"])*"', String), # double quote string
(r"'(\\\\|\\'|[^'])*'", String), # single quote string
(r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals
(r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions
],
'identifiers': [
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name.Variable),
],
'operators': [
(r',', Operator), # Join
(r'\||&|--?', Operator), # Union, Intersection and Subtraction
(r'\.|<:|:>>?', Operator), # Concatention
(r':', Operator), # Label
(r'->', Operator), # Epsilon Transition
(r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions
(r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions
(r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions
(r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions
(r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions
(r'>|@|\$|%', Operator), # Transition Actions and Priorities
(r'\*|\?|\+|{[0-9]*,[0-9]*}', Operator), # Repetition
(r'!|\^', Operator), # Negation
(r'\(|\)', Operator), # Grouping
],
'root': [
include('literals'),
include('whitespace'),
include('comments'),
include('keywords'),
include('numbers'),
include('identifiers'),
include('operators'),
(r'{', Punctuation, 'host'),
(r'=', Operator),
(r';', Punctuation),
],
'host': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^{}\'"/#]+', # exclude unsafe characters
r'[^\\][\\][{}]', # allow escaped { or }
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'\#.*$\n?', # ruby comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
],
}
class RagelEmbeddedLexer(RegexLexer):
"""
A lexer for `Ragel`_ embedded in a host language file.
This will only highlight Ragel statements. If you want host language
highlighting then call the language-specific Ragel lexer.
*New in Pygments 1.1.*
"""
name = 'Embedded Ragel'
aliases = ['ragel-em']
filenames = ['*.rl']
tokens = {
'root': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^%\'"/#]+', # exclude unsafe characters
r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'//.*$\n?', # single line comment
r'\#.*$\n?', # ruby/ragel comment
r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
# Single Line FSM.
# Please don't put a quoted newline in a single line FSM.
# That's just mean. It will break this.
(r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation,
using(RagelLexer),
Punctuation, Text)),
# Multi Line FSM.
(r'(%%%%|%%){', Punctuation, 'multi-line-fsm'),
],
'multi-line-fsm': [
(r'(' + r'|'.join(( # keep ragel code in largest possible chunks.
r'(' + r'|'.join((
r'[^}\'"\[/#]', # exclude unsafe characters
r'}(?=[^%]|$)', # } is okay as long as it's not followed by %
r'}%(?=[^%]|$)', # ...well, one %'s okay, just not two...
r'[^\\][\\][{}]', # ...and } is okay if it's escaped
# allow / if it's preceded with one of these symbols
# (ragel EOF actions)
r'(>|\$|%|<|@|<>)/',
# specifically allow regex followed immediately by *
# so it doesn't get mistaken for a comment
r'/(?!\*)(\\\\|\\/|[^/])*/\*',
# allow / as long as it's not followed by another / or by a *
r'/(?=[^/\*]|$)',
# We want to match as many of these as we can in one block.
# Not sure if we need the + sign here,
# does it help performance?
)) + r')+',
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'//.*$\n?', # single line comment
r'\#.*$\n?', # ruby/ragel comment
)) + r')+', using(RagelLexer)),
(r'}%%', Punctuation, '#pop'),
]
}
def analyse_text(text):
return '@LANG: indep' in text or 0.1
class RagelRubyLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Ruby host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in Ruby Host'
aliases = ['ragel-ruby', 'ragel-rb']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelRubyLexer, self).__init__(RubyLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: ruby' in text
class RagelCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a C host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in C Host'
aliases = ['ragel-c']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelCLexer, self).__init__(CLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: c' in text
class RagelDLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a D host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in D Host'
aliases = ['ragel-d']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelDLexer, self).__init__(DLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: d' in text
class RagelCppLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a CPP host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in CPP Host'
aliases = ['ragel-cpp']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelCppLexer, self).__init__(CppLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: c++' in text
class RagelObjectiveCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in an Objective C host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in Objective C Host'
aliases = ['ragel-objc']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelObjectiveCLexer, self).__init__(ObjectiveCLexer,
RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: objc' in text
class RagelJavaLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Java host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in Java Host'
aliases = ['ragel-java']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelJavaLexer, self).__init__(JavaLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: java' in text
class AntlrLexer(RegexLexer):
"""
Generic `ANTLR`_ Lexer.
Should not be called directly, instead
use DelegatingLexer for your target language.
*New in Pygments 1.1.*
.. _ANTLR: http://www.antlr.org/
"""
name = 'ANTLR'
aliases = ['antlr']
filenames = []
_id = r'[A-Za-z][A-Za-z_0-9]*'
_TOKEN_REF = r'[A-Z][A-Za-z_0-9]*'
_RULE_REF = r'[a-z][A-Za-z_0-9]*'
_STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\''
_INT = r'[0-9]+'
tokens = {
'whitespace': [
(r'\s+', Whitespace),
],
'comments': [
(r'//.*$', Comment),
(r'/\*(.|\n)*?\*/', Comment),
],
'root': [
include('whitespace'),
include('comments'),
(r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class,
Punctuation)),
# optionsSpec
(r'options\b', Keyword, 'options'),
# tokensSpec
(r'tokens\b', Keyword, 'tokens'),
# attrScope
(r'(scope)(\s*)(' + _id + ')(\s*)({)',
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation), 'action'),
# exception
(r'(catch|finally)\b', Keyword, 'exception'),
# action
(r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)({)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
Name.Label, Whitespace, Punctuation), 'action'),
# rule
(r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', \
bygroups(Keyword, Whitespace, Name.Label, Punctuation),
('rule-alts', 'rule-prelims')),
],
'exception': [
(r'\n', Whitespace, '#pop'),
(r'\s', Whitespace),
include('comments'),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
],
'rule-prelims': [
include('whitespace'),
include('comments'),
(r'returns\b', Keyword),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
# throwsSpec
(r'(throws)(\s+)(' + _id + ')',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(,)(\s*)(' + _id + ')',
bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws
# optionsSpec
(r'options\b', Keyword, 'options'),
# ruleScopeSpec - scope followed by target language code or name of action
# TODO finish implementing other possibilities for scope
# L173 ANTLRv3.g from ANTLR book
(r'(scope)(\s+)({)', bygroups(Keyword, Whitespace, Punctuation),
'action'),
(r'(scope)(\s+)(' + _id + ')(\s*)(;)',
bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)),
# ruleAction
(r'(@' + _id + ')(\s*)({)',
bygroups(Name.Label, Whitespace, Punctuation), 'action'),
# finished prelims, go to rule alts!
(r':', Punctuation, '#pop')
],
'rule-alts': [
include('whitespace'),
include('comments'),
# These might need to go in a separate 'block' state triggered by (
(r'options\b', Keyword, 'options'),
(r':', Punctuation),
# literals
(r"'(\\\\|\\'|[^'])*'", String),
(r'"(\\\\|\\"|[^"])*"', String),
(r'<<([^>]|>[^>])>>', String),
# identifiers
# Tokens start with capital letter.
(r'\$?[A-Z_][A-Za-z_0-9]*', Name.Constant),
# Rules start with small letter.
(r'\$?[a-z_][A-Za-z_0-9]*', Name.Variable),
# operators
(r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator),
(r',', Punctuation),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
(r';', Punctuation, '#pop')
],
'tokens': [
include('whitespace'),
include('comments'),
(r'{', Punctuation),
(r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL
+ ')?(\s*)(;)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
String, Whitespace, Punctuation)),
(r'}', Punctuation, '#pop'),
],
'options': [
include('whitespace'),
include('comments'),
(r'{', Punctuation),
(r'(' + _id + r')(\s*)(=)(\s*)(' +
'|'.join((_id, _STRING_LITERAL, _INT, '\*'))+ ')(\s*)(;)',
bygroups(Name.Variable, Whitespace, Punctuation, Whitespace,
Text, Whitespace, Punctuation)),
(r'}', Punctuation, '#pop'),
],
'action': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^\${}\'"/\\]+', # exclude unsafe characters
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# backslashes are okay, as long as we are not backslashing a %
r'\\(?!%)',
# Now that we've handled regex and javadoc comments
# it's safe to let / through.
r'/',
)) + r')+', Other),
(r'(\\)(%)', bygroups(Punctuation, Other)),
(r'(\$[a-zA-Z]+)(\.?)(text|value)?',
bygroups(Name.Variable, Punctuation, Name.Property)),
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
],
'nested-arg-action': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks.
r'[^\$\[\]\'"/]+', # exclude unsafe characters
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# Now that we've handled regex and javadoc comments
# it's safe to let / through.
r'/',
)) + r')+', Other),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r'(\$[a-zA-Z]+)(\.?)(text|value)?',
bygroups(Name.Variable, Punctuation, Name.Property)),
(r'(\\\\|\\\]|\\\[|[^\[\]])+', Other),
]
}
def analyse_text(text):
return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M)
# http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
# TH: I'm not aware of any language features of C++ that will cause
# incorrect lexing of C files. Antlr doesn't appear to make a distinction,
# so just assume they're C++. No idea how to make Objective C work in the
# future.
#class AntlrCLexer(DelegatingLexer):
# """
# ANTLR with C Target
#
# *New in Pygments 1.1*
# """
#
# name = 'ANTLR With C Target'
# aliases = ['antlr-c']
# filenames = ['*.G', '*.g']
#
# def __init__(self, **options):
# super(AntlrCLexer, self).__init__(CLexer, AntlrLexer, **options)
#
# def analyse_text(text):
# return re.match(r'^\s*language\s*=\s*C\s*;', text)
class AntlrCppLexer(DelegatingLexer):
"""
`ANTLR`_ with CPP Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With CPP Target'
aliases = ['antlr-cpp']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrCppLexer, self).__init__(CppLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*C\s*;', text, re.M)
class AntlrObjectiveCLexer(DelegatingLexer):
"""
`ANTLR`_ with Objective-C Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With ObjectiveC Target'
aliases = ['antlr-objc']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrObjectiveCLexer, self).__init__(ObjectiveCLexer,
AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*ObjC\s*;', text)
class AntlrCSharpLexer(DelegatingLexer):
"""
`ANTLR`_ with C# Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With C# Target'
aliases = ['antlr-csharp', 'antlr-c#']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrCSharpLexer, self).__init__(CSharpLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M)
class AntlrPythonLexer(DelegatingLexer):
"""
`ANTLR`_ with Python Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With Python Target'
aliases = ['antlr-python']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrPythonLexer, self).__init__(PythonLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M)
class AntlrJavaLexer(DelegatingLexer):
"""
`ANTLR`_ with Java Target
*New in Pygments 1.1*
"""
name = 'ANTLR With Java Target'
aliases = ['antlr-java']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrJavaLexer, self).__init__(JavaLexer, AntlrLexer,
**options)
def analyse_text(text):
# Antlr language is Java by default
return AntlrLexer.analyse_text(text) and 0.9
class AntlrRubyLexer(DelegatingLexer):
"""
`ANTLR`_ with Ruby Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With Ruby Target'
aliases = ['antlr-ruby', 'antlr-rb']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrRubyLexer, self).__init__(RubyLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M)
class AntlrPerlLexer(DelegatingLexer):
"""
`ANTLR`_ with Perl Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With Perl Target'
aliases = ['antlr-perl']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrPerlLexer, self).__init__(PerlLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M)
class AntlrActionScriptLexer(DelegatingLexer):
"""
`ANTLR`_ with ActionScript Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With ActionScript Target'
aliases = ['antlr-as', 'antlr-actionscript']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrActionScriptLexer, self).__init__(ActionScriptLexer,
AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M)
class TreetopBaseLexer(RegexLexer):
"""
A base lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
Not for direct use; use TreetopLexer instead.
*New in Pygments 1.6.*
"""
tokens = {
'root': [
include('space'),
(r'require[ \t]+[^\n\r]+[\n\r]', Other),
(r'module\b', Keyword.Namespace, 'module'),
(r'grammar\b', Keyword, 'grammar'),
],
'module': [
include('space'),
include('end'),
(r'module\b', Keyword, '#push'),
(r'grammar\b', Keyword, 'grammar'),
(r'[A-Z][A-Za-z_0-9]*(?:::[A-Z][A-Za-z_0-9]*)*', Name.Namespace),
],
'grammar': [
include('space'),
include('end'),
(r'rule\b', Keyword, 'rule'),
(r'include\b', Keyword, 'include'),
(r'[A-Z][A-Za-z_0-9]*', Name),
],
'include': [
include('space'),
(r'[A-Z][A-Za-z_0-9]*(?:::[A-Z][A-Za-z_0-9]*)*', Name.Class, '#pop'),
],
'rule': [
include('space'),
include('end'),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'([A-Za-z_][A-Za-z_0-9]*)(:)', bygroups(Name.Label, Punctuation)),
(r'[A-Za-z_][A-Za-z_0-9]*', Name),
(r'[()]', Punctuation),
(r'[?+*/&!~]', Operator),
(r'\[(?:\\.|\[:\^?[a-z]+:\]|[^\\\]])+\]', String.Regex),
(r'([0-9]*)(\.\.)([0-9]*)',
bygroups(Number.Integer, Operator, Number.Integer)),
(r'(<)([^>]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)),
(r'{', Punctuation, 'inline_module'),
(r'\.', String.Regex),
],
'inline_module': [
(r'{', Other, 'ruby'),
(r'}', Punctuation, '#pop'),
(r'[^{}]+', Other),
],
'ruby': [
(r'{', Other, '#push'),
(r'}', Other, '#pop'),
(r'[^{}]+', Other),
],
'space': [
(r'[ \t\n\r]+', Whitespace),
(r'#[^\n]*', Comment.Single),
],
'end': [
(r'end\b', Keyword, '#pop'),
],
}
class TreetopLexer(DelegatingLexer):
"""
A lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
*New in Pygments 1.6.*
"""
name = 'Treetop'
aliases = ['treetop']
filenames = ['*.treetop', '*.tt']
def __init__(self, **options):
super(TreetopLexer, self).__init__(RubyLexer, TreetopBaseLexer, **options)
| agpl-3.0 |
AleksanderGondek/GUT_Manycore_Architectures_MCTS | serial-implementation/source/gutserialmcts/mcts_test.py | 1 | 2271 | from gutserialmcts.games.nim import NimState
from gutserialmcts.algorithm.utc import UTCSearch
class MonteCarloGameHandler(object):
"""
This class is responsible for playing out a simple game between two players.
Both of them are using UTC to determine their best moves, however they have different number of iterations.
Player with higher number of iterations should win more often.
Class should be provided with starting game state object, a class inheriting GameState properties -
this class can (in theory) describe any 2-player complete information deterministic zero-sum game.
!IMPORTANT!: 1 UTC iterations == 1 simulation == 1 MCTS node
"""
def __init__(self, starting_state, player_one_max_iterations=1000, player_two_max_iterations=100):
self.starting_state = starting_state
self.player_one_max_iterations = player_one_max_iterations
self.player_two_max_iterations = player_two_max_iterations
def play(self):
"""
This method plays out game accordingly to provided starting state and iterations
"""
while self.starting_state.get_available_actions():
print("Starting game state: {0}".format(repr(self.starting_state)))
if self.starting_state.last_active_player == 1:
action = UTCSearch.search(root_state=self.starting_state, max_iterations=self.player_one_max_iterations)
else:
action = UTCSearch.search(root_state=self.starting_state, max_iterations=self.player_two_max_iterations)
print("Best move determined: {0}\n".format(str(action)))
self.starting_state.perform_action(action)
if self.starting_state.get_value(self.starting_state.last_active_player) == 1.0:
print("Player #{0} wins!".format(self.starting_state.last_active_player))
elif self.starting_state.get_value(self.starting_state.last_active_player) == 0.0:
print("Player #{0} wins!".format(str(3 - self.starting_state.last_active_player)))
else:
print("It is a draw!")
if __name__ == "__main__":
gameHandler = MonteCarloGameHandler(NimState(last_active_player=2, _chips=15), 1000, 100)
gameHandler.play()
| apache-2.0 |
tobinjt/Flexget | flexget/tests/test_movieparser.py | 4 | 3580 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from flexget.components.parsing.parsers.parser_guessit import ParserGuessit
from flexget.components.parsing.parsers.parser_internal import ParserInternal
class TestParser(object):
@pytest.fixture(
scope='class', params=(ParserInternal, ParserGuessit), ids=['internal', 'guessit']
)
def parse(self, request):
p = request.param()
def parse(data, name=None, **kwargs):
return p.parse_movie(data, name=name, **kwargs)
return parse
def test_parsing(self, parse):
movie = parse('The.Matrix.1999.1080p.HDDVD.x264-FlexGet')
assert movie.name == 'The Matrix', 'failed to parse %s (got %s)' % (movie.data, movie.name)
assert movie.year == 1999, 'failed to parse year from %s' % movie.data
movie = parse('WALL-E 720p BluRay x264-FlexGet')
assert movie.name == 'WALL-E', 'failed to parse %s' % movie.data
assert movie.quality.name == '720p bluray h264', (
'failed to parse quality from %s' % movie.data
)
movie = parse('The.Pianist.2002.HDDVD.1080p.DTS.x264-FlexGet')
assert movie.name == 'The Pianist', 'failed to parse %s' % movie.data
assert movie.year == 2002, 'failed to parse year from %s' % movie.data
assert movie.quality.name == '1080p h264 dts', (
'failed to parse quality from %s' % movie.data
)
movie = parse("Howl's_Moving_Castle_(2004)_[720p,HDTV,x264,DTS]-FlexGet")
assert movie.name == "Howl's Moving Castle", 'failed to parse %s' % movie.data
assert movie.year == 2004, 'failed to parse year from %s' % movie.data
assert movie.quality.name == '720p hdtv h264 dts', (
'failed to parse quality from %s' % movie.data
)
movie = parse('Coraline.3D.1080p.BluRay.x264-FlexGet')
assert movie.name == 'Coraline', 'failed to parse %s' % movie.data
assert movie.quality.name == '1080p bluray h264', (
'failed to parse quality from %s' % movie.data
)
movie = parse('Slumdog.Millionaire.DVDRip.XviD-FlexGet')
assert movie.name == 'Slumdog Millionaire', 'failed to parse %s' % movie.data
assert movie.quality.name == 'dvdrip xvid', 'failed to parse quality from %s' % movie.data
movie = parse('TRON.Legacy.3D.2010.1080p.BluRay.Half.Over-Under.DTS.x264-FlexGet')
assert movie.name == 'TRON Legacy', 'failed to parse %s' % movie.data
movie = parse('[SomeThing]Up.2009.720p.x264-FlexGet')
assert movie.name == 'Up', 'failed to parse %s (got %s)' % (movie.data, movie.name)
assert movie.year == 2009, 'failed to parse year from %s' % movie.data
movie = parse('[720p] A.Movie.Title.2013.otherstuff.x264')
assert movie.name == 'A Movie Title', 'failed to parse %s (got %s)' % (
movie.data,
movie.name,
)
assert movie.year == 2013, 'failed to parse year from %s' % movie.data
assert movie.quality.name == '720p h264'
def test_multiple_property_values(self, parse):
""" Test correct parsing for title's with multiple propertie definitions """
movie = parse(
name='FlexGet',
data='FlexGet (premiere 2018)(2016/MHD/1080P/AC3 5.1/DUAL/SUB/bluray/Webrip)',
)
assert movie.valid
assert movie.year == 2018
assert movie.quality.source == 'bluray'
| mit |
alejandrorosas/ardupilot | Tools/autotest/param_metadata/param_parse.py | 43 | 5377 | #!/usr/bin/env python
import os, glob, re, sys
from param import *
from wikiemit import WikiEmit
from xmlemit import XmlEmit
from htmlemit import HtmlEmit
from optparse import OptionParser
parser = OptionParser("param_parse.py [options]")
parser.add_option("-v", "--verbose", dest='verbose', action='store_true', default=False, help="show debugging output")
parser.add_option("--vehicle", default='*', help="Vehicle type to generate for")
(opts, args) = parser.parse_args()
# Regular expressions for parsing the parameter metadata
prog_param = re.compile(r"@Param: *(\w+).*((?:\n[ \t]*// @(\w+): (.*))+)(?:\n\n|\n[ \t]+[A-Z])", re.MULTILINE)
prog_param_fields = re.compile(r"[ \t]*// @(\w+): (.*)")
prog_groups = re.compile(r"@Group: *(\w+).*((?:\n[ \t]*// @(Path): (\S+))+)", re.MULTILINE)
prog_group_param = re.compile(r"@Param: (\w+).*((?:\n[ \t]*// @(\w+): (.*))+)(?:\n\n|\n[ \t]+[A-Z])", re.MULTILINE)
apm_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../')
vehicle_paths = glob.glob(apm_path + "%s/Parameters.pde" % opts.vehicle)
vehicle_paths.sort(reverse=True)
vehicles = []
libraries = []
error_count = 0
def debug(str):
'''debug output if verbose is set'''
if opts.verbose:
print(str)
def error(str):
'''show errors'''
global error_count
error_count += 1
print(str)
for vehicle_path in vehicle_paths:
name = os.path.basename(os.path.dirname(vehicle_path))
path = os.path.normpath(os.path.dirname(vehicle_path))
vehicles.append(Vehicle(name, path))
debug('Found vehicle type %s' % name)
for vehicle in vehicles:
debug("===\n\n\nProcessing %s" % vehicle.name)
f = open(vehicle.path+'/Parameters.pde')
p_text = f.read()
f.close()
param_matches = prog_param.findall(p_text)
group_matches = prog_groups.findall(p_text)
debug(group_matches)
for group_match in group_matches:
l = Library(group_match[0])
fields = prog_param_fields.findall(group_match[1])
for field in fields:
if field[0] in known_group_fields:
setattr(l, field[0], field[1])
else:
error("unknown parameter metadata field '%s'" % field[0])
if not any(l.name == parsed_l.name for parsed_l in libraries):
libraries.append(l)
for param_match in param_matches:
p = Parameter(vehicle.name+":"+param_match[0])
debug(p.name + ' ')
field_text = param_match[1]
fields = prog_param_fields.findall(field_text)
field_list = []
for field in fields:
field_list.append(field[0])
if field[0] in known_param_fields:
setattr(p, field[0], field[1])
else:
error("unknown parameter metadata field '%s'" % field[0])
for req_field in required_param_fields:
if not req_field in field_list:
error("missing parameter metadata field '%s' in %s" % (req_field, field_text))
vehicle.params.append(p)
debug("Processed %u params" % len(vehicle.params))
debug("Found %u documented libraries" % len(libraries))
for library in libraries:
debug("===\n\n\nProcessing library %s" % library.name)
if hasattr(library, 'Path'):
paths = library.Path.split(',')
for path in paths:
path = path.strip()
debug("\n Processing file '%s'" % path)
if path.endswith('.pde'):
if len(vehicles) != 1:
print("Unable to handle multiple vehicles with .pde library")
continue
libraryfname = os.path.join(vehicles[0].path, path)
else:
libraryfname = os.path.normpath(os.path.join(apm_path + '/libraries/' + path))
if path and os.path.exists(libraryfname):
f = open(libraryfname)
p_text = f.read()
f.close()
else:
error("Path %s not found for library %s" % (path, library.name))
continue
param_matches = prog_group_param.findall(p_text)
debug("Found %u documented parameters" % len(param_matches))
for param_match in param_matches:
p = Parameter(library.name+param_match[0])
debug(p.name + ' ')
field_text = param_match[1]
fields = prog_param_fields.findall(field_text)
for field in fields:
if field[0] in known_param_fields:
setattr(p, field[0], field[1])
else:
error("unknown parameter metadata field %s" % field[0])
library.params.append(p)
else:
error("Skipped: no Path found")
debug("Processed %u documented parameters" % len(library.params))
def do_emit(emit):
for vehicle in vehicles:
emit.emit(vehicle, f)
emit.start_libraries()
for library in libraries:
if library.params:
emit.emit(library, f)
emit.close()
do_emit(XmlEmit())
do_emit(WikiEmit())
do_emit(HtmlEmit())
sys.exit(error_count)
| gpl-3.0 |
nicolargo/intellij-community | python/lib/Lib/site-packages/django/views/generic/edit.py | 73 | 7509 | from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.views.generic.base import TemplateResponseMixin, View
from django.views.generic.detail import (SingleObjectMixin,
SingleObjectTemplateResponseMixin, BaseDetailView)
class FormMixin(object):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = {'initial': self.get_initial()}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_context_data(self, **kwargs):
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
def get_form_class(self):
"""
Returns the form class to use in this view
"""
if self.form_class:
return self.form_class
else:
model = self.get_queryset().model
return model_forms.modelform_factory(model)
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url % self.object.__dict__
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def form_valid(self, form):
self.object = form.save()
return super(ModelFormMixin, self).form_valid(form)
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
context = kwargs
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
return context
class ProcessFormView(View):
"""
A mixin that processes a form on POST.
"""
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""
A base view for displaying a form
"""
class FormView(TemplateResponseMixin, BaseFormView):
"""
A view for displaying a form, and rendering a template response.
"""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating an new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template..
"""
template_name_suffix = '_form'
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
# Add support for browsers which only accept GET and POST for now.
def post(self, *args, **kwargs):
return self.delete(*args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
| apache-2.0 |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/Demos/win32clipboardDemo.py | 17 | 4452 | # win32clipboardDemo.py
#
# Demo/test of the win32clipboard module.
from win32clipboard import *
import win32con
import types
if not __debug__:
print "WARNING: The test code in this module uses assert"
print "This instance of Python has asserts disabled, so many tests will be skipped"
cf_names = {}
# Build map of CF_* constants to names.
for name, val in win32con.__dict__.items():
if name[:3]=="CF_" and name != "CF_SCREENFONTS": # CF_SCREEN_FONTS==CF_TEXT!?!?
cf_names[val] = name
def TestEmptyClipboard():
OpenClipboard()
try:
EmptyClipboard()
assert EnumClipboardFormats(0)==0, "Clipboard formats were available after emptying it!"
finally:
CloseClipboard()
def TestText():
OpenClipboard()
try:
text = "Hello from Python"
SetClipboardText(text)
got = GetClipboardData(win32con.CF_TEXT)
assert got == text, "Didnt get the correct result back - '%r'." % (got,)
# Win32 documentation says I can get the result back as CF_UNICODE or CF_OEMTEXT.
# But it appears I need to close the clipboard for this to kick-in.
# but if I attempt to, it fails!
finally:
CloseClipboard()
OpenClipboard()
try:
got = GetClipboardData(win32con.CF_UNICODETEXT)
assert got == text, "Didnt get the correct result back - '%r'." % (got,)
assert type(got)==types.UnicodeType, "Didnt get the correct result back - '%r'." % (got,)
got = GetClipboardData(win32con.CF_OEMTEXT)
assert got == text, "Didnt get the correct result back - '%r'." % (got,)
# Unicode tests
EmptyClipboard()
text = u"Hello from Python unicode"
# Now set the Unicode value
SetClipboardData(win32con.CF_UNICODETEXT, text)
# Get it in Unicode.
got = GetClipboardData(win32con.CF_UNICODETEXT)
assert got == text, "Didnt get the correct result back - '%r'." % (got,)
assert type(got)==types.UnicodeType, "Didnt get the correct result back - '%r'." % (got,)
# Close and open the clipboard to ensure auto-conversions take place.
finally:
CloseClipboard()
OpenClipboard()
try:
# Make sure I can still get the text.
got = GetClipboardData(win32con.CF_TEXT)
assert got == text, "Didnt get the correct result back - '%r'." % (got,)
# Make sure we get back the correct types.
got = GetClipboardData(win32con.CF_UNICODETEXT)
assert type(got)==types.UnicodeType, "Didnt get the correct result back - '%r'." % (got,)
got = GetClipboardData(win32con.CF_OEMTEXT)
assert got == text, "Didnt get the correct result back - '%r'." % (got,)
print "Clipboard text tests worked correctly"
finally:
CloseClipboard()
def TestClipboardEnum():
OpenClipboard()
try:
# Enumerate over the clipboard types
enum = 0
while 1:
enum = EnumClipboardFormats(enum)
if enum==0:
break
assert IsClipboardFormatAvailable(enum), "Have format, but clipboard says it is not available!"
n = cf_names.get(enum,"")
if not n:
try:
n = GetClipboardFormatName(enum)
except error:
n = "unknown (%s)" % (enum,)
print "Have format", n
print "Clipboard enumerator tests worked correctly"
finally:
CloseClipboard()
class Foo:
def __init__(self, **kw):
self.__dict__.update(kw)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
def TestCustomFormat():
OpenClipboard()
try:
# Just for the fun of it pickle Python objects through the clipboard
fmt = RegisterClipboardFormat("Python Pickle Format")
import cPickle
pickled_object = Foo(a=1, b=2, Hi=3)
SetClipboardData(fmt, cPickle.dumps( pickled_object ) )
# Now read it back.
data = GetClipboardData(fmt)
loaded_object = cPickle.loads(data)
assert cPickle.loads(data) == pickled_object, "Didnt get the correct data!"
print "Clipboard custom format tests worked correctly"
finally:
CloseClipboard()
if __name__=='__main__':
TestEmptyClipboard()
TestText()
TestCustomFormat()
TestClipboardEnum()
# And leave it empty at the end!
TestEmptyClipboard()
| apache-2.0 |
lukeiwanski/tensorflow-opencl | tensorflow/contrib/grid_rnn/python/kernel_tests/grid_rnn_test.py | 18 | 22279 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GridRNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.grid_rnn.python.ops import grid_rnn_cell
from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class GridRNNCellTest(test.TestCase):
def testGrid2BasicLSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.2)) as root_scope:
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 8])
cell = grid_rnn_cell.Grid2BasicLSTMCell(2)
self.assertEqual(cell.state_size, 8)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 8))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])
})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.36617181, 0.36617181]])
self.assertAllClose(res[1], [[0.71053141, 0.71053141, 0.36617181,
0.36617181, 0.72320831, 0.80555487,
0.39102408, 0.42150158]])
# emulate a loop through the input sequence,
# where we call cell() multiple times
root_scope.reuse_variables()
g2, s2 = cell(x, m)
self.assertEqual(g2.get_shape(), (1, 2))
self.assertEqual(s2.get_shape(), (1, 8))
res = sess.run([g2, s2], {x: np.array([[2., 2., 2.]]), m: res[1]})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.58847463, 0.58847463]])
self.assertAllClose(res[1], [[1.40469193, 1.40469193, 0.58847463,
0.58847463, 0.97726452, 1.04626071,
0.4927212, 0.51137757]])
def testGrid2BasicLSTMCellTied(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.2)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 8])
cell = grid_rnn_cell.Grid2BasicLSTMCell(2, tied=True)
self.assertEqual(cell.state_size, 8)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 8))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])
})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.36617181, 0.36617181]])
self.assertAllClose(res[1], [[0.71053141, 0.71053141, 0.36617181,
0.36617181, 0.72320831, 0.80555487,
0.39102408, 0.42150158]])
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]), m: res[1]})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.36703536, 0.36703536]])
self.assertAllClose(res[1], [[0.71200621, 0.71200621, 0.36703536,
0.36703536, 0.80941606, 0.87550586,
0.40108523, 0.42199609]])
def testGrid2BasicLSTMCellWithRelu(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.2)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 4])
cell = grid_rnn_cell.Grid2BasicLSTMCell(
2, tied=False, non_recurrent_fn=nn_ops.relu)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, s],
{x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[0.31667367, 0.31667367]])
self.assertAllClose(res[1], [[0.29530135, 0.37520045, 0.17044567,
0.21292259]])
"""LSTMCell
"""
def testGrid2LSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 8])
cell = grid_rnn_cell.Grid2LSTMCell(2, use_peepholes=True)
self.assertEqual(cell.state_size, 8)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 8))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])
})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.95686918, 0.95686918]])
self.assertAllClose(res[1], [[2.41515064, 2.41515064, 0.95686918,
0.95686918, 1.38917875, 1.49043763,
0.83884692, 0.86036491]])
def testGrid2LSTMCellTied(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 8])
cell = grid_rnn_cell.Grid2LSTMCell(2, tied=True, use_peepholes=True)
self.assertEqual(cell.state_size, 8)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 8))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])
})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.95686918, 0.95686918]])
self.assertAllClose(res[1], [[2.41515064, 2.41515064, 0.95686918,
0.95686918, 1.38917875, 1.49043763,
0.83884692, 0.86036491]])
def testGrid2LSTMCellWithRelu(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 4])
cell = grid_rnn_cell.Grid2LSTMCell(
2, use_peepholes=True, non_recurrent_fn=nn_ops.relu)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, s],
{x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[2.1831727, 2.1831727]])
self.assertAllClose(res[1], [[0.92270052, 1.02325559, 0.66159075,
0.70475441]])
"""RNNCell
"""
def testGrid2BasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([2, 2])
m = array_ops.zeros([2, 4])
cell = grid_rnn_cell.Grid2BasicRNNCell(2)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (2, 2))
self.assertEqual(s.get_shape(), (2, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x: np.array([[1., 1.], [2., 2.]]),
m: np.array([[0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2]])
})
self.assertEqual(res[0].shape, (2, 2))
self.assertEqual(res[1].shape, (2, 4))
self.assertAllClose(res[0], [[0.94685763, 0.94685763],
[0.99480951, 0.99480951]])
self.assertAllClose(res[1],
[[0.94685763, 0.94685763, 0.80049908, 0.80049908],
[0.99480951, 0.99480951, 0.97574311, 0.97574311]])
def testGrid2BasicRNNCellTied(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([2, 2])
m = array_ops.zeros([2, 4])
cell = grid_rnn_cell.Grid2BasicRNNCell(2, tied=True)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (2, 2))
self.assertEqual(s.get_shape(), (2, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x: np.array([[1., 1.], [2., 2.]]),
m: np.array([[0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2]])
})
self.assertEqual(res[0].shape, (2, 2))
self.assertEqual(res[1].shape, (2, 4))
self.assertAllClose(res[0], [[0.94685763, 0.94685763],
[0.99480951, 0.99480951]])
self.assertAllClose(res[1],
[[0.94685763, 0.94685763, 0.80049908, 0.80049908],
[0.99480951, 0.99480951, 0.97574311, 0.97574311]])
def testGrid2BasicRNNCellWithRelu(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = grid_rnn_cell.Grid2BasicRNNCell(2, non_recurrent_fn=nn_ops.relu)
self.assertEqual(cell.state_size, 2)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s],
{x: np.array([[1., 1.]]),
m: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 2))
self.assertAllClose(res[0], [[1.80049896, 1.80049896]])
self.assertAllClose(res[1], [[0.80049896, 0.80049896]])
"""1-LSTM
"""
def testGrid1LSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)) as root_scope:
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 4])
cell = grid_rnn_cell.Grid1LSTMCell(2, use_peepholes=True)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, s],
{x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[0.91287315, 0.91287315]])
self.assertAllClose(res[1],
[[2.26285243, 2.26285243, 0.91287315, 0.91287315]])
root_scope.reuse_variables()
x2 = array_ops.zeros([0, 0])
g2, s2 = cell(x2, m)
self.assertEqual(g2.get_shape(), (1, 2))
self.assertEqual(s2.get_shape(), (1, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run([g2, s2], {m: res[1]})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[0.9032144, 0.9032144]])
self.assertAllClose(res[1],
[[2.79966092, 2.79966092, 0.9032144, 0.9032144]])
g3, s3 = cell(x2, m)
self.assertEqual(g3.get_shape(), (1, 2))
self.assertEqual(s3.get_shape(), (1, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run([g3, s3], {m: res[1]})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[0.92727238, 0.92727238]])
self.assertAllClose(res[1],
[[3.3529923, 3.3529923, 0.92727238, 0.92727238]])
"""3-LSTM
"""
def testGrid3LSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 12])
cell = grid_rnn_cell.Grid3LSTMCell(2, use_peepholes=True)
self.assertEqual(cell.state_size, 12)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 12))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x:
np.array([[1., 1., 1.]]),
m:
np.array([[
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, -0.1, -0.2, -0.3,
-0.4
]])
})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 12))
self.assertAllClose(res[0], [[0.96892911, 0.96892911]])
self.assertAllClose(res[1], [[2.45227885, 2.45227885, 0.96892911,
0.96892911, 1.33592629, 1.4373529,
0.80867189, 0.83247656, 0.7317788,
0.63205892, 0.56548983, 0.50446129]])
"""Edge cases
"""
def testGridRNNEdgeCasesLikeRelu(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([3, 2])
m = array_ops.zeros([0, 0])
# this is equivalent to relu
cell = grid_rnn_cell.GridRNNCell(
num_units=2,
num_dims=1,
input_dims=0,
output_dims=0,
non_recurrent_dims=0,
non_recurrent_fn=nn_ops.relu)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (3, 2))
self.assertEqual(s.get_shape(), (0, 0))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {x: np.array([[1., -1.], [-2, 1], [2, -1]])})
self.assertEqual(res[0].shape, (3, 2))
self.assertEqual(res[1].shape, (0, 0))
self.assertAllClose(res[0], [[0, 0], [0, 0], [0.5, 0.5]])
def testGridRNNEdgeCasesNoOutput(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 4])
# This cell produces no output
cell = grid_rnn_cell.GridRNNCell(
num_units=2,
num_dims=2,
input_dims=0,
output_dims=None,
non_recurrent_dims=0,
non_recurrent_fn=nn_ops.relu)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (0, 0))
self.assertEqual(s.get_shape(), (1, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, s],
{x: np.array([[1., 1.]]),
m: np.array([[0.1, 0.1, 0.1, 0.1]])})
self.assertEqual(res[0].shape, (0, 0))
self.assertEqual(res[1].shape, (1, 4))
"""Test with tf.nn.rnn
"""
def testGrid2LSTMCellWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
cell = grid_rnn_cell.Grid2LSTMCell(num_units=num_units)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = core_rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state.get_shape(), (batch_size, 8))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape()[0], inp.get_shape()[0])
self.assertEqual(out.get_shape()[1], num_units)
self.assertEqual(out.dtype, inp.dtype)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for v in values:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid2LSTMCellReLUWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
cell = grid_rnn_cell.Grid2LSTMCell(
num_units=num_units, non_recurrent_fn=nn_ops.relu)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = core_rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state.get_shape(), (batch_size, 4))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape()[0], inp.get_shape()[0])
self.assertEqual(out.get_shape()[1], num_units)
self.assertEqual(out.dtype, inp.dtype)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for v in values:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid3LSTMCellReLUWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
cell = grid_rnn_cell.Grid3LSTMCell(
num_units=num_units, non_recurrent_fn=nn_ops.relu)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = core_rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state.get_shape(), (batch_size, 8))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape()[0], inp.get_shape()[0])
self.assertEqual(out.get_shape()[1], num_units)
self.assertEqual(out.dtype, inp.dtype)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for v in values:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid1LSTMCellWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
cell = grid_rnn_cell.Grid1LSTMCell(num_units=num_units)
# for 1-LSTM, we only feed the first step
inputs = ([
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
] + (max_length - 1) * [array_ops.zeros([batch_size, input_size])])
outputs, state = core_rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state.get_shape(), (batch_size, 4))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), (3, num_units))
self.assertEqual(out.dtype, inp.dtype)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for v in values:
self.assertTrue(np.all(np.isfinite(v)))
if __name__ == '__main__':
test.main()
| apache-2.0 |
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_sys_setprofile.py | 3 | 11695 | import gc
import pprint
import sys
import unittest
class TestGetProfile(unittest.TestCase):
def setUp(self):
sys.setprofile(None)
def tearDown(self):
sys.setprofile(None)
def test_empty(self):
self.assertIsNone(sys.getprofile())
def test_setget(self):
def fn(*args):
pass
sys.setprofile(fn)
self.assertIs(sys.getprofile(), fn)
class HookWatcher:
def __init__(self):
self.frames = []
self.events = []
def callback(self, frame, event, arg):
if (event == "call"
or event == "return"
or event == "exception"):
self.add_event(event, frame)
def add_event(self, event, frame=None):
"""Add an event to the log."""
if frame is None:
frame = sys._getframe(1)
try:
frameno = self.frames.index(frame)
except ValueError:
frameno = len(self.frames)
self.frames.append(frame)
self.events.append((frameno, event, ident(frame)))
def get_events(self):
"""Remove calls to add_event()."""
disallowed = [ident(self.add_event.__func__), ident(ident)]
self.frames = None
return [item for item in self.events if item[2] not in disallowed]
class ProfileSimulator(HookWatcher):
def __init__(self, testcase):
self.testcase = testcase
self.stack = []
HookWatcher.__init__(self)
def callback(self, frame, event, arg):
# Callback registered with sys.setprofile()/sys.settrace()
self.dispatch[event](self, frame)
def trace_call(self, frame):
self.add_event('call', frame)
self.stack.append(frame)
def trace_return(self, frame):
self.add_event('return', frame)
self.stack.pop()
def trace_exception(self, frame):
self.testcase.fail(
"the profiler should never receive exception events")
def trace_pass(self, frame):
pass
dispatch = {
'call': trace_call,
'exception': trace_exception,
'return': trace_return,
'c_call': trace_pass,
'c_return': trace_pass,
'c_exception': trace_pass,
}
class TestCaseBase(unittest.TestCase):
def check_events(self, callable, expected):
events = capture_events(callable, self.new_watcher())
if events != expected:
self.fail("Expected events:\n%s\nReceived events:\n%s"
% (pprint.pformat(expected), pprint.pformat(events)))
class ProfileHookTestCase(TestCaseBase):
def new_watcher(self):
return HookWatcher()
def test_simple(self):
def f(p):
pass
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_exception(self):
def f(p):
1/0
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_caught_exception(self):
def f(p):
try: 1/0
except: pass
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_caught_nested_exception(self):
def f(p):
try: 1/0
except: pass
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_nested_exception(self):
def f(p):
1/0
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
# This isn't what I expected:
# (0, 'exception', protect_ident),
# I expected this again:
(1, 'return', f_ident),
])
def test_exception_in_except_clause(self):
def f(p):
1/0
def g(p):
try:
f(p)
except:
try: f(p)
except: pass
f_ident = ident(f)
g_ident = ident(g)
self.check_events(g, [(1, 'call', g_ident),
(2, 'call', f_ident),
(2, 'return', f_ident),
(3, 'call', f_ident),
(3, 'return', f_ident),
(1, 'return', g_ident),
])
def test_exception_propagation(self):
def f(p):
1/0
def g(p):
try: f(p)
finally: p.add_event("falling through")
f_ident = ident(f)
g_ident = ident(g)
self.check_events(g, [(1, 'call', g_ident),
(2, 'call', f_ident),
(2, 'return', f_ident),
(1, 'falling through', g_ident),
(1, 'return', g_ident),
])
def test_raise_twice(self):
def f(p):
try: 1/0
except: 1/0
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_raise_reraise(self):
def f(p):
try: 1/0
except: raise
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_raise(self):
def f(p):
raise Exception()
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_distant_exception(self):
def f():
1/0
def g():
f()
def h():
g()
def i():
h()
def j(p):
i()
f_ident = ident(f)
g_ident = ident(g)
h_ident = ident(h)
i_ident = ident(i)
j_ident = ident(j)
self.check_events(j, [(1, 'call', j_ident),
(2, 'call', i_ident),
(3, 'call', h_ident),
(4, 'call', g_ident),
(5, 'call', f_ident),
(5, 'return', f_ident),
(4, 'return', g_ident),
(3, 'return', h_ident),
(2, 'return', i_ident),
(1, 'return', j_ident),
])
def test_generator(self):
def f():
for i in range(2):
yield i
def g(p):
for i in f():
pass
f_ident = ident(f)
g_ident = ident(g)
self.check_events(g, [(1, 'call', g_ident),
# call the iterator twice to generate values
(2, 'call', f_ident),
(2, 'return', f_ident),
(2, 'call', f_ident),
(2, 'return', f_ident),
# once more; returns end-of-iteration with
# actually raising an exception
(2, 'call', f_ident),
(2, 'return', f_ident),
(1, 'return', g_ident),
])
def test_stop_iteration(self):
def f():
for i in range(2):
yield i
def g(p):
for i in f():
pass
f_ident = ident(f)
g_ident = ident(g)
self.check_events(g, [(1, 'call', g_ident),
# call the iterator twice to generate values
(2, 'call', f_ident),
(2, 'return', f_ident),
(2, 'call', f_ident),
(2, 'return', f_ident),
# once more to hit the raise:
(2, 'call', f_ident),
(2, 'return', f_ident),
(1, 'return', g_ident),
])
class ProfileSimulatorTestCase(TestCaseBase):
def new_watcher(self):
return ProfileSimulator(self)
def test_simple(self):
def f(p):
pass
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_basic_exception(self):
def f(p):
1/0
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_caught_exception(self):
def f(p):
try: 1/0
except: pass
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_distant_exception(self):
def f():
1/0
def g():
f()
def h():
g()
def i():
h()
def j(p):
i()
f_ident = ident(f)
g_ident = ident(g)
h_ident = ident(h)
i_ident = ident(i)
j_ident = ident(j)
self.check_events(j, [(1, 'call', j_ident),
(2, 'call', i_ident),
(3, 'call', h_ident),
(4, 'call', g_ident),
(5, 'call', f_ident),
(5, 'return', f_ident),
(4, 'return', g_ident),
(3, 'return', h_ident),
(2, 'return', i_ident),
(1, 'return', j_ident),
])
# Test an invalid call (bpo-34126)
def test_unbound_method_no_args(self):
def f(p):
dict.get()
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident)])
# Test an invalid call (bpo-34126)
def test_unbound_method_invalid_args(self):
def f(p):
dict.get(print, 42)
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident)])
def ident(function):
if hasattr(function, "f_code"):
code = function.f_code
else:
code = function.__code__
return code.co_firstlineno, code.co_name
def protect(f, p):
try: f(p)
except: pass
protect_ident = ident(protect)
def capture_events(callable, p=None):
if p is None:
p = HookWatcher()
# Disable the garbage collector. This prevents __del__s from showing up in
# traces.
old_gc = gc.isenabled()
gc.disable()
try:
sys.setprofile(p.callback)
protect(callable, p)
sys.setprofile(None)
finally:
if old_gc:
gc.enable()
return p.get_events()[1:-1]
def show_events(callable):
import pprint
pprint.pprint(capture_events(callable))
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
iCHAIT/Django-Delhi_Metro | mymtero/metro/forms.py | 1 | 1612 | from django import forms
from django.db import connection
from .models import dir
from .models import info
from .models import near1
from .models import near2
from .models import rev1
from .models import rev2
class dirForm(forms.Form):
cursor = connection.cursor()
cursor.execute("select sname,sname from metro_stationinfo")
CHOICES = cursor.fetchall()
source = forms.ChoiceField(choices=CHOICES)
dest = forms.ChoiceField(choices=CHOICES)
class infoForm(forms.Form):
cursor = connection.cursor()
cursor.execute("select sname,sname from metro_stationinfo")
CHOICES = cursor.fetchall()
sname = forms.ChoiceField(choices=CHOICES)
class near1Form(forms.Form):
cursor = connection.cursor()
cursor.execute("select place,place from metro_places")
CHOICES = cursor.fetchall()
place = forms.ChoiceField(choices=CHOICES)
class near2Form(forms.Form):
cursor = connection.cursor()
cursor.execute("select pincode,pincode from metro_stationinfo")
CHOICES = cursor.fetchall()
pin = forms.ChoiceField(choices=CHOICES)
class rev1Form(forms.Form):
cursor = connection.cursor()
cursor.execute("select sname,sname from metro_stationinfo")
CHOICES = cursor.fetchall()
sname = forms.ChoiceField(choices=CHOICES)
class rev2Form(forms.Form):
cursor = connection.cursor()
cursor.execute("select sname,sname from metro_stationinfo")
CHOICES = cursor.fetchall()
sname = forms.ChoiceField(choices=CHOICES)
title = forms.CharField()
bodytext = forms.CharField(widget=forms.Textarea)
author = forms.CharField()
| gpl-3.0 |
nphilipp/rolekit | config/roles/memcache/role.py | 2 | 12347 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Authors:
# Stephen Gallagher <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This role provides a memory object caching service
# It is deployed inside of a Docker container
import os
import dbus.service
from rolekit.server.rolebase import RoleBase
from rolekit.server.rolebase import RoleDeploymentValues
from rolekit import async
from rolekit.dbus_utils import SystemdJobHandler
from rolekit.config import SYSTEMD_UNITS
from rolekit.errors import COMMAND_FAILED, INVALID_VALUE, INVALID_PROPERTY
from rolekit.errors import RolekitError
from rolekit.logger import log
from rolekit.server.io.systemd import enable_units
from rolekit.server.io.systemd import SystemdContainerServiceUnit
MEMCACHED_DOCKER_IMAGE = "fedora/memcached"
MEMCACHED_ENVIRONMENT_FILE = "/etc/sysconfig/memcached"
MEMCACHED_DEFAULT_PORT = 11211
MiB_SIZE = 1024 * 1024
GiB_SIZE = MiB_SIZE * 1024
class Role(RoleBase):
# Use _DEFAULTS from RoleBase and overwrite settings or add new if needed.
# Without overwrites or new settings, this can be omitted.
_DEFAULTS = dict(RoleBase._DEFAULTS, **{
# All roles must provide the following four options:
# version, services, packages and firewall
# Version of the *role* (not the services it provides)
"version": 1,
# A list of systemd services that must be started with
# this role.
"services": [ ],
# A list of packages that must be installed by the
# package manager to be able to deploy and run this
# role. These will be installed before the deploy()
# routine is invoked, so it can contain packages
# needed for deployment as well as runtime.
"packages": [ "memcached",
"docker",
"python3-docker-py",
"python3-psutil" ],
# The ports or "services" that need to be available
# in the firewall.
# These will be opened automatically as part of
# deployment and associated with the default
# firewall zone of the system.
"firewall": { "ports": [ '%s/tcp' % MEMCACHED_DEFAULT_PORT,
'%s/udp' % MEMCACHED_DEFAULT_PORT],
"services": [ ] },
# Role-specific settings belong here, with their defaults
# Roles that have no default should be specified here, with
# 'None' as their default
# How many megabytes to allocate for the cache
# If this is unspecified, the default will be 1 GB or
# 25% of the total RAM on the system, whichever is smaller
"cache_size": GiB_SIZE / MiB_SIZE,
# How many concurrent connections are allowed?
# Default: 1024 (from upstream recommendations)
"connections": 1024,
# How many threads should memcache run?
# Upstream does not recommend changing this value from the
# default.
"threads": 4,
})
# Maximum number of instances of this role that can be instantiated
# on a single host.
# Until we work out how to set multiple firewall ports, this will
# provide a single instance.
_MAX_INSTANCES = 1
# Initialize role
def __init__(self, name, directory, *args, **kwargs):
# Get the default initialization from the RoleBase class
# Always use this.
super(Role, self).__init__(name, directory, *args, **kwargs)
# Role-specific initialization goes here, if any
# Deploy code
def do_deploy_async(self, values, sender=None):
log.debug9("TRACE: do_deploy_async")
# Run whatever series of actions are needed to deploy
# this role in a meaningful way.
#
import docker
# Get the default cache size
# Find out how much RAM is available on the system
if 'cache_size' not in values:
# Do a late import of psutil. This will only get
# used during a deployment, so we don't need to
# have it as a dependency for rolekit itself
import psutil
# Get the total number of bytes in local system memory
total_ram = psutil.virtual_memory().total
# If 25% of the available memory is less than 1GB, use
# that for the cache.
if total_ram / 4 < GiB_SIZE:
# Set cache_size in MiB
values['cache_size'] = int(total_ram / 4 / MiB_SIZE)
else:
# Cap the default size at 1 GB in MiB
values['cache_size'] = int(GiB_SIZE / MiB_SIZE)
# Set defaults
if "connections" not in values:
values["connections"] = self._DEFAULTS["connections"]
if "threads" not in values:
values["threads"] = self._DEFAULTS["threads"]
# Create a container for memcached and launch that
log.debug2("Enabling the Docker container manager")
# Enable and start the docker service
enable_units(['docker.service'])
log.debug2("Starting the Docker container manager")
with SystemdJobHandler() as job_handler:
job_path = job_handler.manager.StartUnit("docker.service", "replace")
job_handler.register_job(job_path)
job_results = yield job_handler.all_jobs_done_future()
if any([x for x in job_results.values() if x not in ("skipped", "done")]):
details = ", ".join(["%s: %s" % item for item in job_results.items()])
raise RolekitError(COMMAND_FAILED, "Starting docker.service failed: %s" % details)
log.debug2("Pulling %s image from Docker Hub" % MEMCACHED_DOCKER_IMAGE)
dockerclient = docker.Client(base_url=docker.utils.utils.DEFAULT_UNIX_SOCKET,
version='auto')
# First, pull down the latest version of the memcached container
dockerclient.pull(MEMCACHED_DOCKER_IMAGE, tag="latest")
log.debug2("Creating systemd service unit")
# Generate a systemd service unit for this container
container_unit = SystemdContainerServiceUnit(
image_name = MEMCACHED_DOCKER_IMAGE,
container_name = "memcached_%s" % self.get_name(),
desc="memcached docker container - %s" % self.get_name(),
env = {
"MEMCACHED_CACHE_SIZE": str(values['cache_size']),
"MEMCACHED_CONNECTIONS": str(values['connections']),
"MEMCACHED_THREADS": str(values['threads'])
},
ports = ("{0}:{0}/tcp".format(MEMCACHED_DEFAULT_PORT),
"{0}:{0}/udp".format(MEMCACHED_DEFAULT_PORT))
)
container_unit.write()
# Make systemd load this new unit file
log.debug2("Running systemd daemon-reload")
with SystemdJobHandler() as job_handler:
job_handler.manager.Reload()
# Return the target information
target = RoleDeploymentValues(self.get_type(), self.get_name(),
"Memory Cache")
target.add_required_units(['memcached_%s.service' % self.get_name()])
log.debug9("TRACE: exiting do_deploy_async")
yield target
# Redeploy code
def do_redeploy_async(self, values, sender=None):
# Run whatever series of actions are needed to update the
# role with a new high-level configuration.
# Note: This should be configuration of the role itself,
# not configuration of data held by the role. That should
# be managed by the standard tools for interacting with
# the role.
#
# For this role, we can just run the decommission routine
# and then the deploy routine again.
yield async.call_future(self.do_decommission_async(values, sender))
# Invoke the deploy routine again
# Discard the target return; we don't need it
yield async.call_future(self.do_deploy_async(values, sender))
# Success
yield None
# Decommission code
def do_decommission_async(self, force=False, sender=None):
# Remove the container unit
# Nothing else needs to happen here; the image is
# removed as part of the role stop() operation
path = "%s/memcached_%s.service" % (SYSTEMD_UNITS, self.get_name())
try:
os.unlink(path)
except FileNotFoundError:
# If the file wasn't there, this is probably part of a
# redeploy fixing a failed initial deployment.
pass
yield None
# Update code
def do_update_async(self, sender=None):
# If this role requires any special processing during an
# update (other than simply updating the packages),
# run them here.
#
# Always yield None at the end or return a RolekitError exception
# yield None
# Remove this line for real roles
raise NotImplementedError()
# Check own properties
def do_check_property(self, prop, value):
# All options passed to the role must be validated
# At minimum, this routine should call one of the
# following routines for all known settings:
# * self.check_type_bool(value)
# * self.check_type_dict(value)
# * self.check_type_int(value)
# * self.check_type_list(value)
# * self.check_type_string(value)
# * self.check_type_string_list(value)
# Each of these routines will return True if
# the value is appropriate or raise a
# RolekitError if it is not.
# If you wish to add your own checks, this
# function must return as follows:
# * True: The value passes all validation
# * False: The setting was unknown to this role
# * RolekitError: The value failed to pass validation
# In the case of RolekitError, it is recommended to
# provide an explanation of the failure as the msg
# field of the exception.
# Example:
# raise RolekitError(INVALID_VALUE,
# "{0} must be at least eight characters"
# .format(prop))
if prop in [ "cache_size" ]:
import psutil
self.check_type_int(value)
if value > psutil.virtual_memory().total / MiB_SIZE:
raise RolekitError(INVALID_VALUE,
"Cache size exceeds physical memory")
return True
elif prop in [ "connections" ]:
return self.check_type_int(value)
elif prop in [ "threads" ]:
self.check_type_int(value)
# Up to four threads should be safe on any platform
# More than that should be limited by the available CPUs
if value <= 4:
return True
elif value > os.cpu_count():
raise RolekitError(INVALID_VALUE,
"Number of threads exceeds available CPUs")
return True
# We didn't recognize this argument
return False
@staticmethod
def do_get_dbus_property(x, prop):
# This method tells rolekit what D-BUS type to use for each
# of this role's custom settings.
if prop in [ "connections",
"threads" ]:
return dbus.Int32(x.get_property(x, prop))
elif prop in [ "cache_size" ]:
return dbus.Int64(x.get_property(x, prop))
# Lastly, always fall through to INVALID_PROPERTY if
# the setting is unknown.
raise RolekitError(INVALID_PROPERTY, prop)
| gpl-2.0 |
slavonic/cu-tex | hyphenation/specs.py | 1 | 6324 | '''
Missed (weighted): 1463 (11.452%)
False (weighted): 349 (2.732%)
Missed (weighted): 1698 (12.634%)
False (weighted): 722 (5.372%)
Missed (weighted): 1869 (13.171%)
False (weighted): 495 (3.488%)
SPECS = [
{ 'range': '1-2', 'selector': '1:5:30' },
{ 'range': '1-2', 'selector': '1:2:5' },
{ 'range': '1-3', 'selector': '1:3:10' },
{ 'range': '1-3', 'selector': '1:1:2' },
]
'''
'''
Missed (weighted): 1692 (13.245%)
False (weighted): 364 (2.849%)
Missed (weighted): 1910 (14.211%)
False (weighted): 784 (5.833%)
Missed (weighted): 2268 (15.983%)
False (weighted): 493 (3.474%)
SPECS = [
{ 'range': '1-3', 'selector': '1:6:20' },
{ 'range': '1-3', 'selector': '1:2:5' },
{ 'range': '3-4', 'selector': '1:3:10' },
{ 'range': '3-4', 'selector': '1:1:2' },
]
'''
'''
Missed (weighted): 1571 (12.297%)
False (weighted): 429 (3.358%)
Missed (weighted): 1704 (12.679%)
False (weighted): 722 (5.372%)
Missed (weighted): 1958 (13.798%)
False (weighted): 510 (3.594%)
SPECS = [
{ 'range': '1-3', 'selector': '1:5:50' },
{ 'range': '1-3', 'selector': '1:2:5' },
{ 'range': '2-4', 'selector': '1:3:10' },
{ 'range': '2-4', 'selector': '1:1:2' },
]
'''
'''
Missed (weighted): 1424 (11.147%)
False (weighted): 407 (3.186%)
Missed (weighted): 1835 (13.653%)
False (weighted): 731 (5.439%)
Missed (weighted): 1967 (13.862%)
False (weighted): 470 (3.312%)
SPECS = [
{ 'range': '1-2', 'selector': '1:5:30' },
{ 'range': '1-2', 'selector': '1:2:5' },
{ 'range': '2-4', 'selector': '1:3:10' },
{ 'range': '2-4', 'selector': '1:1:2' },
]
'''
'''
Missed (weighted): 1478 (11.569%)
False (weighted): 354 (2.771%)
Missed (weighted): 1673 (12.448%)
False (weighted): 701 (5.216%)
Missed (weighted): 1870 (13.178%)
False (weighted): 508 (3.580%)
SPECS = [
{ 'range': '1-2', 'selector': '1:5:40' },
{ 'range': '1-2', 'selector': '1:2:5' },
{ 'range': '1-3', 'selector': '1:3:10' },
{ 'range': '1-3', 'selector': '1:1:2' },
]
'''
'''
Missed (weighted): 1226 (9.597%)
False (weighted): 189 (1.479%)
Missed (weighted): 1285 (9.561%)
False (weighted): 370 (2.753%)
Missed (weighted): 1594 (11.233%)
False (weighted): 420 (2.960%)
SPECS = [
{ 'range': '1-3', 'selector': '1:20:1' },
{ 'range': '1-3', 'selector': '1:20:1' },
]
SPECS_X = [
{ 'range': '1-3', 'selector': '1:1:2' },
{ 'range': '1-3', 'selector': '1:10:1' },
]
'''
'''
Missed (weighted): 1204 (9.425%)
False (weighted): 256 (2.004%)
Missed (weighted): 1153 (8.579%)
False (weighted): 429 (3.192%)
Missed (weighted): 1532 (10.796%)
False (weighted): 518 (3.650%)
SPECS = [
{ 'range': '1-3', 'selector': '1:20:1' },
{ 'range': '1-3', 'selector': '1:20:1' },
{ 'range': '1-5', 'selector': '1:20:1' },
{ 'range': '1-5', 'selector': '1:20:1' },
]
'''
'''
Missed (weighted): 1105 (8.650%)
False (weighted): 234 (1.832%)
Missed (weighted): 1020 (7.589%)
False (weighted): 497 (3.698%)
Missed (weighted): 1438 (10.134%)
False (weighted): 511 (3.601%)
SPECS = [
{ 'range': '1-5', 'selector': '1:20:1' },
{ 'range': '1-4', 'selector': '1:20:1' },
]
SPECS_X = [
{ 'range': '1-4', 'selector': '1:1:2' },
{ 'range': '1-3', 'selector': '1:10:1' },
]
'''
'''
Performance of ch-ab on w3ac
Missed (weighted): 958 (7.499%)
False (weighted): 304 (2.380%)
Performance of ch-bc on w3aa
Missed (weighted): 922 (6.860%)
False (weighted): 562 (4.182%)
Performance of ch-ac on w3ab
Missed (weighted): 1319 (9.295%)
False (weighted): 554 (3.904%)
SPECS = [
{ 'range': '2-5', 'selector': '1:20:1' },
{ 'range': '2-4', 'selector': '1:20:1' },
]
SPECS_X = [
{ 'range': '2-4', 'selector': '1:1:2' },
{ 'range': '2-3', 'selector': '1:10:1' },
]
'''
'''
Performance of ch-ab on w3ac
Missed (weighted): 984 (7.703%)
False (weighted): 283 (2.215%)
Performance of ch-bc on w3aa
Missed (weighted): 944 (7.024%)
False (weighted): 512 (3.810%)
Performance of ch-ac on w3ab
Missed (weighted): 1373 (9.676%)
False (weighted): 525 (3.700%)
SPECS = [
{ 'range': '2-4', 'selector': '1:20:1' },
{ 'range': '2-4', 'selector': '1:20:1' },
]
SPECS_X = [
{ 'range': '2-4', 'selector': '1:1:2' },
{ 'range': '2-3', 'selector': '1:10:1' },
]
'''
'''
Performance of ch-ab on w3ac
Missed (weighted): 990 (7.750%)
False (weighted): 269 (2.106%)
Performance of ch-bc on w3aa
Missed (weighted): 949 (7.061%)
False (weighted): 473 (3.519%)
Performance of ch-ac on w3ab
Missed (weighted): 1373 (9.676%)
False (weighted): 506 (3.566%)
SPECS = [
{ 'range': '2-4', 'selector': '1:20:1' },
{ 'range': '2-3', 'selector': '1:10:1' },
]
SPECS_X = [
{ 'range': '2-4', 'selector': '1:1:2' },
{ 'range': '1-3', 'selector': '1:10:1' },
]
'''
'''Performance of ch-ab on w3ac
Missed (weighted): 1178 (9.221%)
False (weighted): 224 (1.753%)
Performance of ch-bc on w3aa
Missed (weighted): 1152 (8.571%)
False (weighted): 429 (3.192%)
Performance of ch-ac on w3ab
Missed (weighted): 1576 (11.106%)
False (weighted): 468 (3.298%)
SPECS = [
{ 'range': '2-4', 'selector': '1:20:1' },
{ 'range': '2-3', 'selector': '1:10:1' },
]
SPECS_X = [
{ 'range': '2-4', 'selector': '1:1:2' },
{ 'range': '1-3', 'selector': '1:5:1' },
]
'''
'''
Performance of ch-ab on w3ac
Missed (weighted): 997 (7.804%)
False (weighted): 269 (2.106%)
Performance of ch-bc on w3aa
Missed (weighted): 952 (7.083%)
False (weighted): 471 (3.504%)
Performance of ch-ac on w3ab
Missed (weighted): 1422 (10.021%)
False (weighted): 519 (3.658%)
SPECS = [
{ 'range': '2-4', 'selector': '1:20:1' },
{ 'range': '2-3', 'selector': '1:10:1' },
]
SPECS_X = [
{ 'range': '2-4', 'selector': '1:1:1' },
{ 'range': '1-3', 'selector': '1:10:1' },
]
'''
'''
Performance of ch-ab on w3ac
Missed (weighted): 919 (7.194%)
False (weighted): 324 (2.536%)
Performance of ch-bc on w3aa
Missed (weighted): 911 (6.778%)
False (weighted): 480 (3.571%)
Performance of ch-ac on w3ab
Missed (weighted): 1384 (9.753%)
False (weighted): 538 (3.791%)
SPECS = [
{ 'range': '2-4', 'selector': '1:20:1' },
{ 'range': '2-3', 'selector': '1:10:1' },
]
SPECS = [
{ 'range': '2-4', 'selector': '1:1:1' },
{ 'range': '1-3', 'selector': '1:20:1' },
]
'''
SPECS = [
{ 'range': '2-4', 'selector': '1:20:1' },
{ 'range': '2-3', 'selector': '1:10:1' },
]
| mit |
DYWCn/mxonline | MXOnline/extra_apps/xadmin/views/form.py | 6 | 5271 | import copy
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.db import models, transaction
from django.forms.models import modelform_factory
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.template import loader
from django.utils.translation import ugettext as _
from xadmin import widgets
from xadmin.layout import FormHelper, Layout, Fieldset, TabHolder, Container, Column, Col, Field
from xadmin.util import unquote
from xadmin.views.detail import DetailAdminUtil
from base import CommAdminView, filter_hook, csrf_protect_m
class FormAdminView(CommAdminView):
form = forms.ModelForm
title = None
readonly_fields = ()
form_template = 'xadmin/views/form.html'
form_layout = None
def init_request(self, *args, **kwargs):
# comm method for both get and post
self.prepare_form()
@filter_hook
def prepare_form(self):
self.view_form = self.form
@filter_hook
def instance_forms(self):
self.form_obj = self.view_form(**self.get_form_datas())
def setup_forms(self):
helper = self.get_form_helper()
if helper:
self.form_obj.helper = helper
@filter_hook
def valid_forms(self):
return self.form_obj.is_valid()
@filter_hook
def get_form_layout(self):
layout = copy.deepcopy(self.form_layout)
fields = self.form_obj.fields.keys()
if layout is None:
layout = Layout(Container(Col('full',
Fieldset("", *fields, css_class="unsort no_title"), horizontal=True, span=12)
))
elif type(layout) in (list, tuple) and len(layout) > 0:
if isinstance(layout[0], Column):
fs = layout
elif isinstance(layout[0], (Fieldset, TabHolder)):
fs = (Col('full', *layout, horizontal=True, span=12),)
else:
fs = (Col('full', Fieldset("", *layout, css_class="unsort no_title"), horizontal=True, span=12),)
layout = Layout(Container(*fs))
rendered_fields = [i[1] for i in layout.get_field_names()]
container = layout[0].fields
other_fieldset = Fieldset(_(u'Other Fields'), *[f for f in fields if f not in rendered_fields])
if len(other_fieldset.fields):
if len(container) and isinstance(container[0], Column):
container[0].fields.append(other_fieldset)
else:
container.append(other_fieldset)
return layout
@filter_hook
def get_form_helper(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
helper.add_layout(self.get_form_layout())
return helper
@filter_hook
def save_forms(self):
pass
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
self.instance_forms()
self.setup_forms()
return self.get_response()
@csrf_protect_m
@transaction.atomic
@filter_hook
def post(self, request, *args, **kwargs):
self.instance_forms()
self.setup_forms()
if self.valid_forms():
self.save_forms()
response = self.post_response()
if isinstance(response, basestring):
return HttpResponseRedirect(response)
else:
return response
return self.get_response()
@filter_hook
def get_context(self):
context = super(FormAdminView, self).get_context()
context.update({
'form': self.form_obj,
'title': self.title,
})
return context
@filter_hook
def get_media(self):
return super(FormAdminView, self).get_media() + self.form_obj.media + \
self.vendor('xadmin.page.form.js', 'xadmin.form.css')
def get_initial_data(self):
return {}
@filter_hook
def get_form_datas(self):
data = {'initial': self.get_initial_data()}
if self.request_method == 'get':
data['initial'].update(self.request.GET)
else:
data.update({'data': self.request.POST, 'files': self.request.FILES})
return data
@filter_hook
def get_breadcrumb(self):
bcs = super(FormAdminView, self).get_breadcrumb()
bcs.append({'title': self.title})
return bcs
@filter_hook
def get_response(self):
context = self.get_context()
context.update(self.kwargs or {})
return TemplateResponse(
self.request, self.form_template,
context)
@filter_hook
def post_response(self):
request = self.request
msg = _('The %s was changed successfully.') % self.title
self.message_user(msg, 'success')
if "_redirect" in request.GET:
return request.GET["_redirect"]
else:
return self.get_redirect_url()
@filter_hook
def get_redirect_url(self):
return self.get_admin_url('index')
| mit |
ChameleonOS/android_kernel_amazon_bowser-common | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
avanov/django | tests/lookup/tests.py | 153 | 37208 | from __future__ import unicode_literals
from datetime import datetime
from operator import attrgetter
from unittest import skipUnless
from django.core.exceptions import FieldError
from django.db import connection
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import Article, Author, Game, MyISAMArticle, Player, Season, Tag
class LookupTests(TestCase):
def setUp(self):
# Create a few Authors.
self.au1 = Author(name='Author 1')
self.au1.save()
self.au2 = Author(name='Author 2')
self.au2.save()
# Create a couple of Articles.
self.a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1)
self.a1.save()
self.a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a2.save()
self.a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a3.save()
self.a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1)
self.a4.save()
self.a5 = Article(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2)
self.a5.save()
self.a6 = Article(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2)
self.a6.save()
self.a7 = Article(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2)
self.a7.save()
# Create a few Tags.
self.t1 = Tag(name='Tag 1')
self.t1.save()
self.t1.articles.add(self.a1, self.a2, self.a3)
self.t2 = Tag(name='Tag 2')
self.t2.save()
self.t2.articles.add(self.a3, self.a4, self.a5)
self.t3 = Tag(name='Tag 3')
self.t3.save()
self.t3.articles.add(self.a5, self.a6, self.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertQuerysetEqual(Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline'))
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
self.assertRaises(TypeError, Article.objects.in_bulk)
self.assertRaises(TypeError, Article.objects.in_bulk, headline__startswith='Blah')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
identity = lambda x: x
self.assertQuerysetEqual(Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
transform=identity)
self.assertQuerysetEqual(Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
transform=identity)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertQuerysetEqual(Article.objects.values('id', 'headline').iterator(),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
transform=identity)
# The values() method works with "extra" fields specified in extra(select).
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
transform=identity)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertQuerysetEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}], transform=identity)
# You can specify fields from forward and reverse relations, just like filter().
self.assertQuerysetEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
], transform=identity)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a non-existent field name in values() (a field that is neither in the
# model nor in extra(select)).
self.assertRaises(FieldError,
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values,
'id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertQuerysetEqual(Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0)
}], transform=identity)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
identity = lambda x: x
self.assertQuerysetEqual(Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
], transform=identity)
self.assertQuerysetEqual(Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
transform=identity)
self.assertQuerysetEqual(
Author.objects.values_list('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
], transform=identity)
self.assertRaises(TypeError, Article.objects.values_list, 'id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')),
'<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()),
'<Article: Article 6>')
self.assertRaises(Article.DoesNotExist, self.a5.get_next_by_pub_date)
self.assertEqual(repr(self.a6.get_next_by_pub_date()),
'<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()),
'<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()),
'<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
a8 = Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
a8.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>'])
a9 = Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
a9.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>'])
a10 = Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
a10.save()
self.assertQuerysetEqual(Article.objects.filter(headline__contains='\\'),
['<Article: Article with \ backslash>'])
def test_exclude(self):
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
])
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(
Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(
Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual(
[article for article in Article.objects.none().iterator()],
[])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
try:
Article.objects.filter(pub_date_year='2005').count()
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(str(ex), "Cannot resolve keyword 'pub_date_year' "
"into field. Choices are: author, author_id, headline, "
"id, pub_date, tag")
try:
Article.objects.filter(headline__starts='Article')
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(
str(ex), "Unsupported lookup 'starts' for CharField "
"or join on the field not permitted.")
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
a1 = Article(pub_date=now, headline='f')
a1.save()
a2 = Article(pub_date=now, headline='fo')
a2.save()
a3 = Article(pub_date=now, headline='foo')
a3.save()
a4 = Article(pub_date=now, headline='fooo')
a4.save()
a5 = Article(pub_date=now, headline='hey-Foo')
a5.save()
a6 = Article(pub_date=now, headline='bar')
a6.save()
a7 = Article(pub_date=now, headline='AbBa')
a7.save()
a8 = Article(pub_date=now, headline='baz')
a8.save()
a9 = Article(pub_date=now, headline='baxZ')
a9.save()
# zero-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
])
# one-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>'])
# wildcard
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>'])
# leading anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'),
['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'),
['<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>'])
# character sets
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'),
['<Article: baxZ>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
# and more articles:
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
# alternation
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>'])
# greedy matching
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
])
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>'])
def test_regex_null(self):
"""
Ensure that a regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
Ensure that a regex lookup does not fail on non-string fields
"""
Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'),
['<Season: 2013>'])
def test_regex_non_ascii(self):
"""
Ensure that a regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
Ensure that a lookup query containing non-fields raises the proper
exception.
"""
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah=99)
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah__exact=99)
with self.assertRaises(FieldError):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Ensure that genuine field names don't collide with built-in lookup
types ('year', 'gt', 'range', 'in' etc.).
Refs #11670.
"""
# Here we're using 'gt' as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games = Game.objects.filter(season__year__in=[2009, 2010])
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games = Game.objects.filter(season__year=2009)
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games = Game.objects.filter(season__year__in=[2011])
johnson = Player.objects.create(name="Johnson")
johnson.games = Game.objects.filter(season__year__in=[2011])
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
def test_chain_date_time_lookups(self):
self.assertQuerysetEqual(
Article.objects.filter(pub_date__month__gt=7),
['<Article: Article 5>', '<Article: Article 6>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__day__gte=27),
['<Article: Article 2>', '<Article: Article 3>',
'<Article: Article 4>', '<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__hour__lt=8),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__minute__lte=0),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 5>', '<Article: Article 6>',
'<Article: Article 7>'],
ordered=False
)
class LookupTransactionTests(TransactionTestCase):
available_apps = ['lookup']
@skipUnless(connection.vendor == 'mysql', 'requires MySQL')
def test_mysql_lookup_search(self):
# To use fulltext indexes on MySQL either version 5.6 is needed, or one must use
# MyISAM tables. Neither of these combinations is currently available on CI, so
# lets manually create a MyISAM table for Article model.
with connection.cursor() as cursor:
cursor.execute(
"CREATE TEMPORARY TABLE myisam_article ("
" id INTEGER PRIMARY KEY AUTO_INCREMENT, "
" headline VARCHAR(100) NOT NULL "
") ENGINE MYISAM")
dr = MyISAMArticle.objects.create(headline='Django Reinhardt')
MyISAMArticle.objects.create(headline='Ringo Star')
# NOTE: Needs to be created after the article has been saved.
cursor.execute(
'CREATE FULLTEXT INDEX myisam_article_ft ON myisam_article (headline)')
self.assertQuerysetEqual(
MyISAMArticle.objects.filter(headline__search='Reinhardt'),
[dr], lambda x: x)
| bsd-3-clause |
ntt-sic/keystone | keystone/openstack/common/jsonutils.py | 6 | 6083 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
import types
import xmlrpclib
import netaddr
import six
from keystone.openstack.common import timeutils
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (types.NoneType, int, basestring, bool, float, long)
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| apache-2.0 |
arantebillywilson/python-snippets | microblog/flask/lib/python3.5/site-packages/coverage/html.py | 9 | 15233 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""HTML reporting for coverage.py."""
import datetime
import json
import os
import shutil
import coverage
from coverage import env
from coverage.backward import iitems
from coverage.files import flat_rootname
from coverage.misc import CoverageException, file_be_gone, Hasher, isolate_module
from coverage.report import Reporter
from coverage.results import Numbers
from coverage.templite import Templite
os = isolate_module(os)
# Static files are looked for in a list of places.
STATIC_PATH = [
# The place Debian puts system Javascript libraries.
"/usr/share/javascript",
# Our htmlfiles directory.
os.path.join(os.path.dirname(__file__), "htmlfiles"),
]
def data_filename(fname, pkgdir=""):
"""Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that sub-directory.
"""
tried = []
for static_dir in STATIC_PATH:
static_filename = os.path.join(static_dir, fname)
if os.path.exists(static_filename):
return static_filename
else:
tried.append(static_filename)
if pkgdir:
static_filename = os.path.join(static_dir, pkgdir, fname)
if os.path.exists(static_filename):
return static_filename
else:
tried.append(static_filename)
raise CoverageException(
"Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried)
)
def read_data(fname):
"""Return the contents of a data file of ours."""
with open(data_filename(fname)) as data_file:
return data_file.read()
def write_html(fname, html):
"""Write `html` to `fname`, properly encoded."""
with open(fname, "wb") as fout:
fout.write(html.encode('ascii', 'xmlcharrefreplace'))
class HtmlReporter(Reporter):
"""HTML reporting."""
# These files will be copied from the htmlfiles directory to the output
# directory.
STATIC_FILES = [
("style.css", ""),
("jquery.min.js", "jquery"),
("jquery.ba-throttle-debounce.min.js", "jquery-throttle-debounce"),
("jquery.hotkeys.js", "jquery-hotkeys"),
("jquery.isonscreen.js", "jquery-isonscreen"),
("jquery.tablesorter.min.js", "jquery-tablesorter"),
("coverage_html.js", ""),
("keybd_closed.png", ""),
("keybd_open.png", ""),
]
def __init__(self, cov, config):
super(HtmlReporter, self).__init__(cov, config)
self.directory = None
title = self.config.html_title
if env.PY2:
title = title.decode("utf8")
self.template_globals = {
'escape': escape,
'pair': pair,
'title': title,
'__url__': coverage.__url__,
'__version__': coverage.__version__,
}
self.source_tmpl = Templite(read_data("pyfile.html"), self.template_globals)
self.coverage = cov
self.files = []
self.all_files_nums = []
self.has_arcs = self.coverage.data.has_arcs()
self.status = HtmlStatus()
self.extra_css = None
self.totals = Numbers()
self.time_stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
def report(self, morfs):
"""Generate an HTML report for `morfs`.
`morfs` is a list of modules or file names.
"""
assert self.config.html_dir, "must give a directory for html reporting"
# Read the status data.
self.status.read(self.config.html_dir)
# Check that this run used the same settings as the last run.
m = Hasher()
m.update(self.config)
these_settings = m.hexdigest()
if self.status.settings_hash() != these_settings:
self.status.reset()
self.status.set_settings_hash(these_settings)
# The user may have extra CSS they want copied.
if self.config.extra_css:
self.extra_css = os.path.basename(self.config.extra_css)
# Process all the files.
self.report_files(self.html_file, morfs, self.config.html_dir)
if not self.all_files_nums:
raise CoverageException("No data to report.")
# Write the index file.
self.index_file()
self.make_local_static_report_files()
return self.totals.n_statements and self.totals.pc_covered
def make_local_static_report_files(self):
"""Make local instances of static files for HTML report."""
# The files we provide must always be copied.
for static, pkgdir in self.STATIC_FILES:
shutil.copyfile(
data_filename(static, pkgdir),
os.path.join(self.directory, static)
)
# The user may have extra CSS they want copied.
if self.extra_css:
shutil.copyfile(
self.config.extra_css,
os.path.join(self.directory, self.extra_css)
)
def file_hash(self, source, fr):
"""Compute a hash that changes if the file needs to be re-reported."""
m = Hasher()
m.update(source)
self.coverage.data.add_to_hash(fr.filename, m)
return m.hexdigest()
def html_file(self, fr, analysis):
"""Generate an HTML file for one source file."""
rootname = flat_rootname(fr.relative_filename())
html_filename = rootname + ".html"
html_path = os.path.join(self.directory, html_filename)
# Get the numbers for this file.
nums = analysis.numbers
self.all_files_nums.append(nums)
if self.config.skip_covered:
# Don't report on 100% files.
no_missing_lines = (nums.n_missing == 0)
no_missing_branches = (nums.n_partial_branches == 0)
if no_missing_lines and no_missing_branches:
# If there's an existing file, remove it.
file_be_gone(html_path)
return
source = fr.source()
# Find out if the file on disk is already correct.
this_hash = self.file_hash(source.encode('utf-8'), fr)
that_hash = self.status.file_hash(rootname)
if this_hash == that_hash:
# Nothing has changed to require the file to be reported again.
self.files.append(self.status.index_info(rootname))
return
self.status.set_file_hash(rootname, this_hash)
if self.has_arcs:
missing_branch_arcs = analysis.missing_branch_arcs()
arcs_executed = analysis.arcs_executed()
# These classes determine which lines are highlighted by default.
c_run = "run hide_run"
c_exc = "exc"
c_mis = "mis"
c_par = "par " + c_run
lines = []
for lineno, line in enumerate(fr.source_token_lines(), start=1):
# Figure out how to mark this line.
line_class = []
annotate_html = ""
annotate_long = ""
if lineno in analysis.statements:
line_class.append("stm")
if lineno in analysis.excluded:
line_class.append(c_exc)
elif lineno in analysis.missing:
line_class.append(c_mis)
elif self.has_arcs and lineno in missing_branch_arcs:
line_class.append(c_par)
shorts = []
longs = []
for b in missing_branch_arcs[lineno]:
if b < 0:
shorts.append("exit")
else:
shorts.append(b)
longs.append(fr.missing_arc_description(lineno, b, arcs_executed))
# 202F is NARROW NO-BREAK SPACE.
# 219B is RIGHTWARDS ARROW WITH STROKE.
short_fmt = "%s ↛ %s"
annotate_html = ", ".join(short_fmt % (lineno, d) for d in shorts)
if len(longs) == 1:
annotate_long = longs[0]
else:
annotate_long = "%d missed branches: %s" % (
len(longs),
", ".join("%d) %s" % (num, ann_long)
for num, ann_long in enumerate(longs, start=1)),
)
elif lineno in analysis.statements:
line_class.append(c_run)
# Build the HTML for the line.
html = []
for tok_type, tok_text in line:
if tok_type == "ws":
html.append(escape(tok_text))
else:
tok_html = escape(tok_text) or ' '
html.append(
'<span class="%s">%s</span>' % (tok_type, tok_html)
)
lines.append({
'html': ''.join(html),
'number': lineno,
'class': ' '.join(line_class) or "pln",
'annotate': annotate_html,
'annotate_long': annotate_long,
})
# Write the HTML page for this file.
html = self.source_tmpl.render({
'c_exc': c_exc,
'c_mis': c_mis,
'c_par': c_par,
'c_run': c_run,
'has_arcs': self.has_arcs,
'extra_css': self.extra_css,
'fr': fr,
'nums': nums,
'lines': lines,
'time_stamp': self.time_stamp,
})
write_html(html_path, html)
# Save this file's information for the index file.
index_info = {
'nums': nums,
'html_filename': html_filename,
'relative_filename': fr.relative_filename(),
}
self.files.append(index_info)
self.status.set_index_info(rootname, index_info)
def index_file(self):
"""Write the index.html file for this report."""
index_tmpl = Templite(read_data("index.html"), self.template_globals)
self.totals = sum(self.all_files_nums)
html = index_tmpl.render({
'has_arcs': self.has_arcs,
'extra_css': self.extra_css,
'files': self.files,
'totals': self.totals,
'time_stamp': self.time_stamp,
})
write_html(os.path.join(self.directory, "index.html"), html)
# Write the latest hashes for next time.
self.status.write(self.directory)
class HtmlStatus(object):
"""The status information we keep to support incremental reporting."""
STATUS_FILE = "status.json"
STATUS_FORMAT = 1
# pylint: disable=wrong-spelling-in-comment,useless-suppression
# The data looks like:
#
# {
# 'format': 1,
# 'settings': '540ee119c15d52a68a53fe6f0897346d',
# 'version': '4.0a1',
# 'files': {
# 'cogapp___init__': {
# 'hash': 'e45581a5b48f879f301c0f30bf77a50c',
# 'index': {
# 'html_filename': 'cogapp___init__.html',
# 'name': 'cogapp/__init__',
# 'nums': <coverage.results.Numbers object at 0x10ab7ed0>,
# }
# },
# ...
# 'cogapp_whiteutils': {
# 'hash': '8504bb427fc488c4176809ded0277d51',
# 'index': {
# 'html_filename': 'cogapp_whiteutils.html',
# 'name': 'cogapp/whiteutils',
# 'nums': <coverage.results.Numbers object at 0x10ab7d90>,
# }
# },
# },
# }
def __init__(self):
self.reset()
def reset(self):
"""Initialize to empty."""
self.settings = ''
self.files = {}
def read(self, directory):
"""Read the last status in `directory`."""
usable = False
try:
status_file = os.path.join(directory, self.STATUS_FILE)
with open(status_file, "r") as fstatus:
status = json.load(fstatus)
except (IOError, ValueError):
usable = False
else:
usable = True
if status['format'] != self.STATUS_FORMAT:
usable = False
elif status['version'] != coverage.__version__:
usable = False
if usable:
self.files = {}
for filename, fileinfo in iitems(status['files']):
fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
self.files[filename] = fileinfo
self.settings = status['settings']
else:
self.reset()
def write(self, directory):
"""Write the current status to `directory`."""
status_file = os.path.join(directory, self.STATUS_FILE)
files = {}
for filename, fileinfo in iitems(self.files):
fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
files[filename] = fileinfo
status = {
'format': self.STATUS_FORMAT,
'version': coverage.__version__,
'settings': self.settings,
'files': files,
}
with open(status_file, "w") as fout:
json.dump(status, fout, separators=(',', ':'))
# Older versions of ShiningPanda look for the old name, status.dat.
# Accommodate them if we are running under Jenkins.
# https://issues.jenkins-ci.org/browse/JENKINS-28428
if "JENKINS_URL" in os.environ:
with open(os.path.join(directory, "status.dat"), "w") as dat:
dat.write("https://issues.jenkins-ci.org/browse/JENKINS-28428\n")
def settings_hash(self):
"""Get the hash of the coverage.py settings."""
return self.settings
def set_settings_hash(self, settings):
"""Set the hash of the coverage.py settings."""
self.settings = settings
def file_hash(self, fname):
"""Get the hash of `fname`'s contents."""
return self.files.get(fname, {}).get('hash', '')
def set_file_hash(self, fname, val):
"""Set the hash of `fname`'s contents."""
self.files.setdefault(fname, {})['hash'] = val
def index_info(self, fname):
"""Get the information for index.html for `fname`."""
return self.files.get(fname, {}).get('index', {})
def set_index_info(self, fname, info):
"""Set the information for index.html for `fname`."""
self.files.setdefault(fname, {})['index'] = info
# Helpers for templates and generating HTML
def escape(t):
"""HTML-escape the text in `t`.
This is only suitable for HTML text, not attributes.
"""
# Convert HTML special chars into HTML entities.
return t.replace("&", "&").replace("<", "<")
def pair(ratio):
"""Format a pair of numbers so JavaScript can read them in an attribute."""
return "%s %s" % ratio
| mit |
coreboot/chrome-ec | util/ec3po/__init__.py | 1 | 1163 | # Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The EC console interpreter.
EC-3PO is a console interpreter which migrates the rich debug console from the
EC itself to the host. This allows for a rich debug console without impacting
EC image sizes while also allowing the development of new console features.
The package consists of two modules: console and interpreter. The console
module provides the interactive console interface between the user and the
interpreter. It handles the presentation of the EC console including editing
methods as well as session-persistent command history.
The interpreter module provides the interpretation layer between the EC UART and
the user. The user does not necessarily have to be the interactive console, but
could be something like autotest. The interpreter is also responsible for the
automatic command retrying if the EC drops a character in a command. This is a
stopgap until all commands are communicated via host commands.
"""
import console
import interpreter
import threadproc_shim
| bsd-3-clause |
binghongcha08/pyQMD | QMC/MC_exchange/permute4d/dissipation/1.5/en.py | 15 | 1291 | import numpy as np
import pylab as plt
import matplotlib.pyplot as plt
import matplotlib as mpl
#data = np.genfromtxt(fname='/home/bing/dissipation/energy.dat')
data = np.genfromtxt(fname='energy.dat')
fig, (ax1,ax2) = plt.subplots(ncols=1, nrows=2, sharex=True)
#font = {'family' : 'ubuntu',
# 'weight' : 'normal',
# 'size' : '16'}
#mpl.rc('font', **font) # pass in the font dict as kwargs
mpl.rcParams['font.size'] = 12
#mpl.rcParams['figure.figsize'] = 8,6
#pl.title('two-steps fitting alg')
ax1.set_ylabel('Energy [hartree]')
ax1.plot(data[:,0],data[:,2],'b--',linewidth=2,label='Potential')
#pl.plot(dat[:,0],dat[:,2],'r-',linewidth=2)
ax1.plot(data[:,0],data[:,3],'g-.',linewidth=2,label='Quantum Potential')
ax1.plot(data[:,0],data[:,4],'k-',linewidth=2,label='Energy')
#pl.legend(bbox_to_anchor=(0.5, 0.38, 0.42, .302), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#ax1.set_yticks((0.4,0.6,0.8))
ax1.legend(loc=0)
#ax1.set_ylim(0,5)
ax2.set_xlabel('time [a.u.]')
ax2.set_ylabel('Energy [hartree]')
ax2.plot(data[:,0],data[:,1],'r--',linewidth=2,label='$Kinetic$')
#pl.plot(dat[:,0],dat[:,1],'k-',linewidth=2)
ax2.set_yscale('log')
#ax2.set_xticks((0,4,8))
#ax2.set_yticks((1e-7,1e-5,1e-3))
plt.legend(loc=0)
plt.subplots_adjust(hspace=0.)
plt.show()
| gpl-3.0 |
Mixser/django | tests/model_inheritance_regress/tests.py | 150 | 18806 | """
Regression tests for Model inheritance behavior.
"""
from __future__ import unicode_literals
import datetime
from operator import attrgetter
from unittest import expectedFailure
from django import forms
from django.test import TestCase
from .models import (
ArticleWithAuthor, BachelorParty, BirthdayParty, BusStation, Child,
DerivedM, InternalCertificationAudit, ItalianRestaurant, M2MChild,
MessyBachelorParty, ParkingLot, ParkingLot2, ParkingLot3, ParkingLot4A,
ParkingLot4B, Person, Place, Profile, QualityControl, Restaurant,
SelfRefChild, SelfRefParent, Senator, Supplier, TrainStation, User,
Wholesaler,
)
class ModelInheritanceTest(TestCase):
def test_model_inheritance(self):
# Regression for #7350, #7202
# Check that when you create a Parent object with a specific reference
# to an existent child instance, saving the Parent doesn't duplicate
# the child. This behavior is only activated during a raw save - it
# is mostly relevant to deserialization, but any sort of CORBA style
# 'narrow()' API would require a similar approach.
# Create a child-parent-grandparent chain
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
# Create a child-parent chain with an explicit parent link
place2 = Place(name='Main St', address='111 Main St')
place2.save_base(raw=True)
park = ParkingLot(parent=place2, capacity=100)
park.save_base(raw=True)
# Check that no extra parent objects have been created.
places = list(Place.objects.all())
self.assertEqual(places, [place1, place2])
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_hot_dogs': True
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_gnocchi': True,
'serves_hot_dogs': True,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 100,
'name': 'Main St',
}])
# You can also update objects when using a raw save.
place1.name = "Guido's All New House of Pasta"
place1.save_base(raw=True)
restaurant.serves_hot_dogs = False
restaurant.save_base(raw=True)
italian_restaurant.serves_gnocchi = False
italian_restaurant.save_base(raw=True)
place2.name = 'Derelict lot'
place2.save_base(raw=True)
park.capacity = 50
park.save_base(raw=True)
# No extra parent objects after an update, either.
places = list(Place.objects.all())
self.assertEqual(places, [place2, place1])
self.assertEqual(places[0].name, 'Derelict lot')
self.assertEqual(places[1].name, "Guido's All New House of Pasta")
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_hot_dogs': False,
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 50,
'name': 'Derelict lot',
}])
# If you try to raw_save a parent attribute onto a child object,
# the attribute will be ignored.
italian_restaurant.name = "Lorenzo's Pasta Hut"
italian_restaurant.save_base(raw=True)
# Note that the name has not changed
# - name is an attribute of Place, not ItalianRestaurant
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
def test_issue_7105(self):
# Regressions tests for #7105: dates() queries should be able to use
# fields from the parent model as easily as the child.
Child.objects.create(
name='child',
created=datetime.datetime(2008, 6, 26, 17, 0, 0))
datetimes = list(Child.objects.datetimes('created', 'month'))
self.assertEqual(datetimes, [datetime.datetime(2008, 6, 1, 0, 0)])
def test_issue_7276(self):
# Regression test for #7276: calling delete() on a model with
# multi-table inheritance should delete the associated rows from any
# ancestor tables, as well as any descendent objects.
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
ident = ItalianRestaurant.objects.all()[0].id
self.assertEqual(Place.objects.get(pk=ident), place1)
Restaurant.objects.create(
name='a',
address='xx',
serves_hot_dogs=True,
serves_pizza=False)
# This should delete both Restaurants, plus the related places, plus
# the ItalianRestaurant.
Restaurant.objects.all().delete()
self.assertRaises(
Place.DoesNotExist,
Place.objects.get,
pk=ident)
self.assertRaises(
ItalianRestaurant.DoesNotExist,
ItalianRestaurant.objects.get,
pk=ident)
def test_issue_6755(self):
"""
Regression test for #6755
"""
r = Restaurant(serves_pizza=False, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, r.place_ptr_id)
orig_id = r.id
r = Restaurant(place_ptr_id=orig_id, serves_pizza=True, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, orig_id)
self.assertEqual(r.id, r.place_ptr_id)
def test_issue_7488(self):
# Regression test for #7488. This looks a little crazy, but it's the
# equivalent of what the admin interface has to do for the edit-inline
# case.
suppliers = Supplier.objects.filter(
restaurant=Restaurant(name='xx', address='yy'))
suppliers = list(suppliers)
self.assertEqual(suppliers, [])
def test_issue_11764(self):
"""
Regression test for #11764
"""
wholesalers = list(Wholesaler.objects.all().select_related())
self.assertEqual(wholesalers, [])
def test_issue_7853(self):
"""
Regression test for #7853
If the parent class has a self-referential link, make sure that any
updates to that link via the child update the right table.
"""
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
obj.delete()
def test_get_next_previous_by_date(self):
"""
Regression tests for #8076
get_(next/previous)_by_date should work
"""
c1 = ArticleWithAuthor(
headline='ArticleWithAuthor 1',
author="Person 1",
pub_date=datetime.datetime(2005, 8, 1, 3, 0))
c1.save()
c2 = ArticleWithAuthor(
headline='ArticleWithAuthor 2',
author="Person 2",
pub_date=datetime.datetime(2005, 8, 1, 10, 0))
c2.save()
c3 = ArticleWithAuthor(
headline='ArticleWithAuthor 3',
author="Person 3",
pub_date=datetime.datetime(2005, 8, 2))
c3.save()
self.assertEqual(c1.get_next_by_pub_date(), c2)
self.assertEqual(c2.get_next_by_pub_date(), c3)
self.assertRaises(
ArticleWithAuthor.DoesNotExist,
c3.get_next_by_pub_date)
self.assertEqual(c3.get_previous_by_pub_date(), c2)
self.assertEqual(c2.get_previous_by_pub_date(), c1)
self.assertRaises(
ArticleWithAuthor.DoesNotExist,
c1.get_previous_by_pub_date)
def test_inherited_fields(self):
"""
Regression test for #8825 and #9390
Make sure all inherited fields (esp. m2m fields, in this case) appear
on the child class.
"""
m2mchildren = list(M2MChild.objects.filter(articles__isnull=False))
self.assertEqual(m2mchildren, [])
# Ordering should not include any database column more than once (this
# is most likely to occur naturally with model inheritance, so we
# check it here). Regression test for #9390. This necessarily pokes at
# the SQL string for the query, since the duplicate problems are only
# apparent at that late stage.
qs = ArticleWithAuthor.objects.order_by('pub_date', 'pk')
sql = qs.query.get_compiler(qs.db).as_sql()[0]
fragment = sql[sql.find('ORDER BY'):]
pos = fragment.find('pub_date')
self.assertEqual(fragment.find('pub_date', pos + 1), -1)
def test_queryset_update_on_parent_model(self):
"""
Regression test for #10362
It is possible to call update() and only change a field in
an ancestor model.
"""
article = ArticleWithAuthor.objects.create(
author="fred",
headline="Hey there!",
pub_date=datetime.datetime(2009, 3, 1, 8, 0, 0))
update = ArticleWithAuthor.objects.filter(
author="fred").update(headline="Oh, no!")
self.assertEqual(update, 1)
update = ArticleWithAuthor.objects.filter(
pk=article.pk).update(headline="Oh, no!")
self.assertEqual(update, 1)
derivedm1 = DerivedM.objects.create(
customPK=44,
base_name="b1",
derived_name="d1")
self.assertEqual(derivedm1.customPK, 44)
self.assertEqual(derivedm1.base_name, 'b1')
self.assertEqual(derivedm1.derived_name, 'd1')
derivedms = list(DerivedM.objects.all())
self.assertEqual(derivedms, [derivedm1])
def test_use_explicit_o2o_to_parent_as_pk(self):
"""
Regression tests for #10406
If there's a one-to-one link between a child model and the parent and
no explicit pk declared, we can use the one-to-one link as the pk on
the child.
"""
self.assertEqual(ParkingLot2._meta.pk.name, "parent")
# However, the connector from child to parent need not be the pk on
# the child at all.
self.assertEqual(ParkingLot3._meta.pk.name, "primary_key")
# the child->parent link
self.assertEqual(
ParkingLot3._meta.get_ancestor_link(Place).name,
"parent")
def test_use_explicit_o2o_to_parent_from_abstract_model(self):
self.assertEqual(ParkingLot4A._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4A",
address='21 Jump Street',
)
self.assertEqual(ParkingLot4B._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4B",
address='21 Jump Street',
)
def test_all_fields_from_abstract_base_class(self):
"""
Regression tests for #7588
"""
# All fields from an ABC, including those inherited non-abstractly
# should be available on child classes (#7588). Creating this instance
# should work without error.
QualityControl.objects.create(
headline="Problems in Django",
pub_date=datetime.datetime.now(),
quality=10,
assignee="adrian")
def test_abstract_base_class_m2m_relation_inheritance(self):
# Check that many-to-many relations defined on an abstract base class
# are correctly inherited (and created) on the child class.
p1 = Person.objects.create(name='Alice')
p2 = Person.objects.create(name='Bob')
p3 = Person.objects.create(name='Carol')
p4 = Person.objects.create(name='Dave')
birthday = BirthdayParty.objects.create(
name='Birthday party for Alice')
birthday.attendees = [p1, p3]
bachelor = BachelorParty.objects.create(name='Bachelor party for Bob')
bachelor.attendees = [p2, p4]
parties = list(p1.birthdayparty_set.all())
self.assertEqual(parties, [birthday])
parties = list(p1.bachelorparty_set.all())
self.assertEqual(parties, [])
parties = list(p2.bachelorparty_set.all())
self.assertEqual(parties, [bachelor])
# Check that a subclass of a subclass of an abstract model doesn't get
# its own accessor.
self.assertFalse(hasattr(p2, 'messybachelorparty_set'))
# ... but it does inherit the m2m from its parent
messy = MessyBachelorParty.objects.create(
name='Bachelor party for Dave')
messy.attendees = [p4]
messy_parent = messy.bachelorparty_ptr
parties = list(p4.bachelorparty_set.all())
self.assertEqual(parties, [bachelor, messy_parent])
def test_abstract_verbose_name_plural_inheritance(self):
"""
verbose_name_plural correctly inherited from ABC if inheritance chain
includes an abstract model.
"""
# Regression test for #11369: verbose_name_plural should be inherited
# from an ABC even when there are one or more intermediate
# abstract models in the inheritance chain, for consistency with
# verbose_name.
self.assertEqual(
InternalCertificationAudit._meta.verbose_name_plural,
'Audits'
)
def test_inherited_nullable_exclude(self):
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
self.assertQuerysetEqual(
SelfRefParent.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
self.assertQuerysetEqual(
SelfRefChild.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
def test_concrete_abstract_concrete_pk(self):
"""
Primary key set correctly with concrete->abstract->concrete inheritance.
"""
# Regression test for #13987: Primary key is incorrectly determined
# when more than one model has a concrete->abstract->concrete
# inheritance hierarchy.
self.assertEqual(
len([field for field in BusStation._meta.local_fields if field.primary_key]),
1
)
self.assertEqual(
len([field for field in TrainStation._meta.local_fields if field.primary_key]),
1
)
self.assertIs(BusStation._meta.pk.model, BusStation)
self.assertIs(TrainStation._meta.pk.model, TrainStation)
def test_inherited_unique_field_with_form(self):
"""
Test that a model which has different primary key for the parent model
passes unique field checking correctly. Refs #17615.
"""
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = '__all__'
User.objects.create(username="user_only")
p = Profile.objects.create(username="user_with_profile")
form = ProfileForm({'username': "user_with_profile", 'extra': "hello"},
instance=p)
self.assertTrue(form.is_valid())
def test_inheritance_joins(self):
# Test for #17502 - check that filtering through two levels of
# inheritance chain doesn't generate extra joins.
qs = ItalianRestaurant.objects.all()
self.assertEqual(str(qs.query).count('JOIN'), 2)
qs = ItalianRestaurant.objects.filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 2)
@expectedFailure
def test_inheritance_values_joins(self):
# It would be nice (but not too important) to skip the middle join in
# this case. Skipping is possible as nothing from the middle model is
# used in the qs and top contains direct pointer to the bottom model.
qs = ItalianRestaurant.objects.values_list('serves_gnocchi').filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_issue_21554(self):
senator = Senator.objects.create(
name='John Doe', title='X', state='Y'
)
senator = Senator.objects.get(pk=senator.pk)
self.assertEqual(senator.name, 'John Doe')
self.assertEqual(senator.title, 'X')
self.assertEqual(senator.state, 'Y')
def test_inheritance_resolve_columns(self):
Restaurant.objects.create(name='Bobs Cafe', address="Somewhere",
serves_pizza=True, serves_hot_dogs=True)
p = Place.objects.all().select_related('restaurant')[0]
self.assertIsInstance(p.restaurant.serves_pizza, bool)
def test_inheritance_select_related(self):
# Regression test for #7246
r1 = Restaurant.objects.create(
name="Nobu", serves_hot_dogs=True, serves_pizza=False
)
r2 = Restaurant.objects.create(
name="Craft", serves_hot_dogs=False, serves_pizza=True
)
Supplier.objects.create(name="John", restaurant=r1)
Supplier.objects.create(name="Jane", restaurant=r2)
self.assertQuerysetEqual(
Supplier.objects.order_by("name").select_related(), [
"Jane",
"John",
],
attrgetter("name")
)
jane = Supplier.objects.order_by("name").select_related("restaurant")[0]
self.assertEqual(jane.restaurant.name, "Craft")
| bsd-3-clause |
tschaume/pymatgen | pymatgen/util/serialization.py | 4 | 4016 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Most features of this module has been moved to monty. Please refer to
monty.json and monty.serialization documentation.
"""
import json
import functools
import pickle
from pymatgen.core.periodic_table import Element
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Apr 30, 2012"
def pmg_serialize(method):
"""
Decorator for methods that add MSON serializations keys
to the dictionary. See documentation of MSON for more details
"""
@functools.wraps(method)
def wrapper(*args, **kwargs):
self = args[0]
d = method(*args, **kwargs)
# Add @module and @class
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
return wrapper
def json_pretty_dump(obj, filename):
"""
Serialize obj as a JSON formatted stream to the given filename (
pretty printing version)
"""
with open(filename, "wt") as fh:
json.dump(obj, fh, indent=4, sort_keys=4)
class PmgPickler(pickle.Pickler):
"""
Persistence of External Objects as described in section 12.1.5.1 of
https://docs.python.org/3/library/pickle.html
"""
def persistent_id(self, obj):
"""Instead of pickling as a regular class instance, we emit a
persistent ID."""
if isinstance(obj, Element):
# Here, our persistent ID is simply a tuple, containing a tag and
# a key
return obj.__class__.__name__, obj.symbol
else:
# If obj does not have a persistent ID, return None. This means obj
# needs to be pickled as usual.
return None
class PmgUnpickler(pickle.Unpickler):
"""
Persistence of External Objects as described in section 12.1.5.1 of
https://docs.python.org/3/library/pickle.html
"""
def persistent_load(self, pid):
"""
This method is invoked whenever a persistent ID is encountered.
Here, pid is the tuple returned by PmgPickler.
"""
try:
type_tag, key_id = pid
except Exception:
# Sometimes we get a string such as ('Element', u'C') instead
# of a real tuple. Use ast to evalute the expression (much safer
# than eval).
import ast
type_tag, key_id = ast.literal_eval(pid)
if type_tag == "Element":
return Element(key_id)
else:
# Always raises an error if you cannot return the correct object.
# Otherwise, the unpickler will think None is the object referenced
# by the persistent ID.
raise pickle.UnpicklingError(
"unsupported persistent object with pid %s" % pid)
def pmg_pickle_load(filobj, **kwargs):
r"""
Loads a pickle file and deserialize it with PmgUnpickler.
Args:
filobj: File-like object
**kwargs: Any of the keyword arguments supported by PmgUnpickler
Returns:
Deserialized object.
"""
return PmgUnpickler(filobj, **kwargs).load()
def pmg_pickle_dump(obj, filobj, **kwargs):
r"""
Dump an object to a pickle file using PmgPickler.
Args:
obj : Object to dump.
fileobj: File-like object
**kwargs: Any of the keyword arguments supported by PmgPickler
"""
return PmgPickler(filobj, **kwargs).dump(obj)
class SlotPickleMixin:
"""
This mixin makes it possible to pickle/unpickle objects with __slots__
defined.
"""
def __getstate__(self):
return dict(
(slot, getattr(self, slot))
for slot in self.__slots__ if hasattr(self, slot)
)
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
| mit |
antonyc/django-rest-framework | tests/test_validators.py | 65 | 12626 | import datetime
from django.db import models
from django.test import TestCase
from rest_framework import serializers
def dedent(blocktext):
return '\n'.join([line[12:] for line in blocktext.splitlines()[1:-1]])
# Tests for `UniqueValidator`
# ---------------------------
class UniquenessModel(models.Model):
username = models.CharField(unique=True, max_length=100)
class UniquenessSerializer(serializers.ModelSerializer):
class Meta:
model = UniquenessModel
class AnotherUniquenessModel(models.Model):
code = models.IntegerField(unique=True)
class AnotherUniquenessSerializer(serializers.ModelSerializer):
class Meta:
model = AnotherUniquenessModel
class TestUniquenessValidation(TestCase):
def setUp(self):
self.instance = UniquenessModel.objects.create(username='existing')
def test_repr(self):
serializer = UniquenessSerializer()
expected = dedent("""
UniquenessSerializer():
id = IntegerField(label='ID', read_only=True)
username = CharField(max_length=100, validators=[<UniqueValidator(queryset=UniquenessModel.objects.all())>])
""")
assert repr(serializer) == expected
def test_is_not_unique(self):
data = {'username': 'existing'}
serializer = UniquenessSerializer(data=data)
assert not serializer.is_valid()
assert serializer.errors == {'username': ['This field must be unique.']}
def test_is_unique(self):
data = {'username': 'other'}
serializer = UniquenessSerializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'username': 'other'}
def test_updated_instance_excluded(self):
data = {'username': 'existing'}
serializer = UniquenessSerializer(self.instance, data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'username': 'existing'}
def test_doesnt_pollute_model(self):
instance = AnotherUniquenessModel.objects.create(code='100')
serializer = AnotherUniquenessSerializer(instance)
self.assertEqual(
AnotherUniquenessModel._meta.get_field('code').validators, [])
# Accessing data shouldn't effect validators on the model
serializer.data
self.assertEqual(
AnotherUniquenessModel._meta.get_field('code').validators, [])
# Tests for `UniqueTogetherValidator`
# -----------------------------------
class UniquenessTogetherModel(models.Model):
race_name = models.CharField(max_length=100)
position = models.IntegerField()
class Meta:
unique_together = ('race_name', 'position')
class NullUniquenessTogetherModel(models.Model):
"""
Used to ensure that null values are not included when checking
unique_together constraints.
Ignoring items which have a null in any of the validated fields is the same
behavior that database backends will use when they have the
unique_together constraint added.
Example case: a null position could indicate a non-finisher in the race,
there could be many non-finishers in a race, but all non-NULL
values *should* be unique against the given `race_name`.
"""
date_of_birth = models.DateField(null=True) # Not part of the uniqueness constraint
race_name = models.CharField(max_length=100)
position = models.IntegerField(null=True)
class Meta:
unique_together = ('race_name', 'position')
class UniquenessTogetherSerializer(serializers.ModelSerializer):
class Meta:
model = UniquenessTogetherModel
class NullUniquenessTogetherSerializer(serializers.ModelSerializer):
class Meta:
model = NullUniquenessTogetherModel
class TestUniquenessTogetherValidation(TestCase):
def setUp(self):
self.instance = UniquenessTogetherModel.objects.create(
race_name='example',
position=1
)
UniquenessTogetherModel.objects.create(
race_name='example',
position=2
)
UniquenessTogetherModel.objects.create(
race_name='other',
position=1
)
def test_repr(self):
serializer = UniquenessTogetherSerializer()
expected = dedent("""
UniquenessTogetherSerializer():
id = IntegerField(label='ID', read_only=True)
race_name = CharField(max_length=100, required=True)
position = IntegerField(required=True)
class Meta:
validators = [<UniqueTogetherValidator(queryset=UniquenessTogetherModel.objects.all(), fields=('race_name', 'position'))>]
""")
assert repr(serializer) == expected
def test_is_not_unique_together(self):
"""
Failing unique together validation should result in non field errors.
"""
data = {'race_name': 'example', 'position': 2}
serializer = UniquenessTogetherSerializer(data=data)
assert not serializer.is_valid()
assert serializer.errors == {
'non_field_errors': [
'The fields race_name, position must make a unique set.'
]
}
def test_is_unique_together(self):
"""
In a unique together validation, one field may be non-unique
so long as the set as a whole is unique.
"""
data = {'race_name': 'other', 'position': 2}
serializer = UniquenessTogetherSerializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {
'race_name': 'other',
'position': 2
}
def test_updated_instance_excluded_from_unique_together(self):
"""
When performing an update, the existing instance does not count
as a match against uniqueness.
"""
data = {'race_name': 'example', 'position': 1}
serializer = UniquenessTogetherSerializer(self.instance, data=data)
assert serializer.is_valid()
assert serializer.validated_data == {
'race_name': 'example',
'position': 1
}
def test_unique_together_is_required(self):
"""
In a unique together validation, all fields are required.
"""
data = {'position': 2}
serializer = UniquenessTogetherSerializer(data=data, partial=True)
assert not serializer.is_valid()
assert serializer.errors == {
'race_name': ['This field is required.']
}
def test_ignore_excluded_fields(self):
"""
When model fields are not included in a serializer, then uniqueness
validators should not be added for that field.
"""
class ExcludedFieldSerializer(serializers.ModelSerializer):
class Meta:
model = UniquenessTogetherModel
fields = ('id', 'race_name',)
serializer = ExcludedFieldSerializer()
expected = dedent("""
ExcludedFieldSerializer():
id = IntegerField(label='ID', read_only=True)
race_name = CharField(max_length=100)
""")
assert repr(serializer) == expected
def test_ignore_validation_for_null_fields(self):
# None values that are on fields which are part of the uniqueness
# constraint cause the instance to ignore uniqueness validation.
NullUniquenessTogetherModel.objects.create(
date_of_birth=datetime.date(2000, 1, 1),
race_name='Paris Marathon',
position=None
)
data = {
'date': datetime.date(2000, 1, 1),
'race_name': 'Paris Marathon',
'position': None
}
serializer = NullUniquenessTogetherSerializer(data=data)
assert serializer.is_valid()
def test_do_not_ignore_validation_for_null_fields(self):
# None values that are not on fields part of the uniqueness constraint
# do not cause the instance to skip validation.
NullUniquenessTogetherModel.objects.create(
date_of_birth=datetime.date(2000, 1, 1),
race_name='Paris Marathon',
position=1
)
data = {'date': None, 'race_name': 'Paris Marathon', 'position': 1}
serializer = NullUniquenessTogetherSerializer(data=data)
assert not serializer.is_valid()
# Tests for `UniqueForDateValidator`
# ----------------------------------
class UniqueForDateModel(models.Model):
slug = models.CharField(max_length=100, unique_for_date='published')
published = models.DateField()
class UniqueForDateSerializer(serializers.ModelSerializer):
class Meta:
model = UniqueForDateModel
class TestUniquenessForDateValidation(TestCase):
def setUp(self):
self.instance = UniqueForDateModel.objects.create(
slug='existing',
published='2000-01-01'
)
def test_repr(self):
serializer = UniqueForDateSerializer()
expected = dedent("""
UniqueForDateSerializer():
id = IntegerField(label='ID', read_only=True)
slug = CharField(max_length=100)
published = DateField(required=True)
class Meta:
validators = [<UniqueForDateValidator(queryset=UniqueForDateModel.objects.all(), field='slug', date_field='published')>]
""")
assert repr(serializer) == expected
def test_is_not_unique_for_date(self):
"""
Failing unique for date validation should result in field error.
"""
data = {'slug': 'existing', 'published': '2000-01-01'}
serializer = UniqueForDateSerializer(data=data)
assert not serializer.is_valid()
assert serializer.errors == {
'slug': ['This field must be unique for the "published" date.']
}
def test_is_unique_for_date(self):
"""
Passing unique for date validation.
"""
data = {'slug': 'existing', 'published': '2000-01-02'}
serializer = UniqueForDateSerializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {
'slug': 'existing',
'published': datetime.date(2000, 1, 2)
}
def test_updated_instance_excluded_from_unique_for_date(self):
"""
When performing an update, the existing instance does not count
as a match against unique_for_date.
"""
data = {'slug': 'existing', 'published': '2000-01-01'}
serializer = UniqueForDateSerializer(instance=self.instance, data=data)
assert serializer.is_valid()
assert serializer.validated_data == {
'slug': 'existing',
'published': datetime.date(2000, 1, 1)
}
class HiddenFieldUniqueForDateModel(models.Model):
slug = models.CharField(max_length=100, unique_for_date='published')
published = models.DateTimeField(auto_now_add=True)
class TestHiddenFieldUniquenessForDateValidation(TestCase):
def test_repr_date_field_not_included(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = HiddenFieldUniqueForDateModel
fields = ('id', 'slug')
serializer = TestSerializer()
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
slug = CharField(max_length=100)
published = HiddenField(default=CreateOnlyDefault(<function now>))
class Meta:
validators = [<UniqueForDateValidator(queryset=HiddenFieldUniqueForDateModel.objects.all(), field='slug', date_field='published')>]
""")
assert repr(serializer) == expected
def test_repr_date_field_included(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = HiddenFieldUniqueForDateModel
fields = ('id', 'slug', 'published')
serializer = TestSerializer()
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
slug = CharField(max_length=100)
published = DateTimeField(default=CreateOnlyDefault(<function now>), read_only=True)
class Meta:
validators = [<UniqueForDateValidator(queryset=HiddenFieldUniqueForDateModel.objects.all(), field='slug', date_field='published')>]
""")
assert repr(serializer) == expected
| bsd-2-clause |
cuboxi/android_external_chromium_org | ppapi/native_client/src/tools/srpcgen.py | 79 | 15629 | #!/usr/bin/env python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Build "SRPC" interfaces from specifications.
SRPC interfaces consist of one or more interface classes, typically defined
in a set of .srpc files. The specifications are Python dictionaries, with a
top level 'name' element and an 'rpcs' element. The rpcs element is a list
containing a number of rpc methods, each of which has a 'name', an 'inputs',
and an 'outputs' element. These elements are lists of input or output
parameters, which are lists pairs containing a name and type. The set of
types includes all the SRPC basic types.
These SRPC specifications are used to generate a header file and either a
server or client stub file, as determined by the command line flag -s or -c.
"""
import getopt
import sys
import os
COPYRIGHT_AND_AUTOGEN_COMMENT = """\
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
//
// Automatically generated code. See srpcgen.py
//
// NaCl Simple Remote Procedure Call interface abstractions.
"""
HEADER_INCLUDE_GUARD_START = """\
#ifndef %(include_guard)s
#define %(include_guard)s
"""
HEADER_INCLUDE_GUARD_END = """\
\n\n#endif // %(include_guard)s
"""
HEADER_FILE_INCLUDES = """\
#ifndef __native_client__
#include "native_client/src/include/portability.h"
#endif // __native_client__
%(EXTRA_INCLUDES)s
"""
SOURCE_FILE_INCLUDES = """\
#include "%(srpcgen_h)s"
#ifdef __native_client__
#ifndef UNREFERENCED_PARAMETER
#define UNREFERENCED_PARAMETER(P) do { (void) P; } while (0)
#endif // UNREFERENCED_PARAMETER
#else
#include "native_client/src/include/portability.h"
#endif // __native_client__
%(EXTRA_INCLUDES)s
"""
# For both .cc and .h files.
EXTRA_INCLUDES = [
'#include "native_client/src/shared/srpc/nacl_srpc.h"',
]
types = {'bool': ['b', 'bool', 'u.bval', ''],
'char[]': ['C', 'char*', 'arrays.carr', 'u.count'],
'double': ['d', 'double', 'u.dval', ''],
'double[]': ['D', 'double*', 'arrays.darr', 'u.count'],
'handle': ['h', 'NaClSrpcImcDescType', 'u.hval', ''],
'int32_t': ['i', 'int32_t', 'u.ival', ''],
'int32_t[]': ['I', 'int32_t*', 'arrays.iarr', 'u.count'],
'int64_t': ['l', 'int64_t', 'u.lval', ''],
'int64_t[]': ['L', 'int64_t', 'arrays.larr', 'u.count'],
'PP_Instance': ['i', 'PP_Instance', 'u.ival', ''],
'PP_Module': ['i', 'PP_Module', 'u.ival', ''],
'PP_Resource': ['i', 'PP_Resource', 'u.ival', ''],
'string': ['s', 'const char*', 'arrays.str', ''],
}
def AddInclude(name):
"""Adds an include to the include section of both .cc and .h files."""
EXTRA_INCLUDES.append('#include "%s"' % name)
def HeaderFileIncludes():
"""Includes are sorted alphabetically."""
EXTRA_INCLUDES.sort()
return HEADER_FILE_INCLUDES % {
'EXTRA_INCLUDES': '\n'.join(EXTRA_INCLUDES),
}
def SourceFileIncludes(srpcgen_h_file):
"""Includes are sorted alphabetically."""
EXTRA_INCLUDES.sort()
return SOURCE_FILE_INCLUDES % {
'EXTRA_INCLUDES': '\n'.join(EXTRA_INCLUDES),
'srpcgen_h': srpcgen_h_file
}
def PrintHeaderFileTop(output, include_guard):
"""Prints the header of the .h file including copyright,
header comment, include guard and includes."""
print >>output, COPYRIGHT_AND_AUTOGEN_COMMENT
print >>output, HEADER_INCLUDE_GUARD_START % {'include_guard': include_guard}
print >>output, HeaderFileIncludes()
def PrintHeaderFileBottom(output, include_guard):
"""Prints the footer of the .h file including copyright,
header comment, include guard and includes."""
print >>output, HEADER_INCLUDE_GUARD_END % {'include_guard': include_guard}
def PrintSourceFileTop(output, srpcgen_h_file):
"""Prints the header of the .cc file including copyright,
header comment and includes."""
print >>output, COPYRIGHT_AND_AUTOGEN_COMMENT
print >>output, SourceFileIncludes(srpcgen_h_file)
def CountName(name):
"""Returns the name of the auxiliary count member used for array typed."""
return '%s_bytes' % name
def FormatRpcPrototype(is_server, class_name, indent, rpc):
"""Returns a string for the prototype of an individual RPC."""
def FormatArgs(is_output, args):
"""Returns a string containing the formatted arguments for an RPC."""
def FormatArg(is_output, arg):
"""Returns a string containing a formatted argument to an RPC."""
if is_output:
suffix = '* '
else:
suffix = ' '
s = ''
type_info = types[arg[1]]
if type_info[3]:
s += 'nacl_abi_size_t%s%s, %s %s' % (suffix,
CountName(arg[0]),
type_info[1],
arg[0])
else:
s += '%s%s%s' % (type_info[1], suffix, arg[0])
return s
s = ''
for arg in args:
s += ',\n %s%s' % (indent, FormatArg(is_output, arg))
return s
if is_server:
ret_type = 'void'
else:
ret_type = 'NaClSrpcError'
s = '%s %s%s(\n' % (ret_type, class_name, rpc['name'])
# Until SRPC uses RPC/Closure on the client side, these must be different.
if is_server:
s += ' %sNaClSrpcRpc* rpc,\n' % indent
s += ' %sNaClSrpcClosure* done' % indent
else:
s += ' %sNaClSrpcChannel* channel' % indent
s += '%s' % FormatArgs(False, rpc['inputs'])
s += '%s' % FormatArgs(True, rpc['outputs'])
s += ')'
return s
def PrintHeaderFile(output, is_server, guard_name, interface_name, specs):
"""Prints out the header file containing the prototypes for the RPCs."""
PrintHeaderFileTop(output, guard_name)
s = ''
# iterate over all the specified interfaces
if is_server:
suffix = 'Server'
else:
suffix = 'Client'
for spec in specs:
class_name = spec['name'] + suffix
rpcs = spec['rpcs']
s += 'class %s {\n public:\n' % class_name
for rpc in rpcs:
s += ' static %s;\n' % FormatRpcPrototype(is_server, '', ' ', rpc)
s += '\n private:\n %s();\n' % class_name
s += ' %s(const %s&);\n' % (class_name, class_name)
s += ' void operator=(const %s);\n' % class_name
s += '}; // class %s\n\n' % class_name
if is_server:
s += 'class %s {\n' % interface_name
s += ' public:\n'
s += ' static NaClSrpcHandlerDesc srpc_methods[];\n'
s += '}; // class %s' % interface_name
print >>output, s
PrintHeaderFileBottom(output, guard_name)
def PrintServerFile(output, header_name, interface_name, specs):
"""Print the server (stub) .cc file."""
def FormatDispatchPrototype(indent, rpc):
"""Format the prototype of a dispatcher method."""
s = '%sstatic void %sDispatcher(\n' % (indent, rpc['name'])
s += '%s NaClSrpcRpc* rpc,\n' % indent
s += '%s NaClSrpcArg** inputs,\n' % indent
s += '%s NaClSrpcArg** outputs,\n' % indent
s += '%s NaClSrpcClosure* done\n' % indent
s += '%s)' % indent
return s
def FormatMethodString(rpc):
"""Format the SRPC text string for a single rpc method."""
def FormatTypes(args):
s = ''
for arg in args:
s += types[arg[1]][0]
return s
s = ' { "%s:%s:%s", %sDispatcher },\n' % (rpc['name'],
FormatTypes(rpc['inputs']),
FormatTypes(rpc['outputs']),
rpc['name'])
return s
def FormatCall(class_name, indent, rpc):
"""Format a call from a dispatcher method to its stub."""
def FormatArgs(is_output, args):
"""Format the arguments passed to the stub."""
def FormatArg(is_output, num, arg):
"""Format an argument passed to a stub."""
if is_output:
prefix = 'outputs[' + str(num) + ']->'
addr_prefix = '&('
addr_suffix = ')'
else:
prefix = 'inputs[' + str(num) + ']->'
addr_prefix = ''
addr_suffix = ''
type_info = types[arg[1]]
if type_info[3]:
s = '%s%s%s%s, %s%s' % (addr_prefix,
prefix,
type_info[3],
addr_suffix,
prefix,
type_info[2])
else:
s = '%s%s%s%s' % (addr_prefix, prefix, type_info[2], addr_suffix)
return s
# end FormatArg
s = ''
num = 0
for arg in args:
s += ',\n%s %s' % (indent, FormatArg(is_output, num, arg))
num += 1
return s
# end FormatArgs
s = '%s::%s(\n%s rpc,\n' % (class_name, rpc['name'], indent)
s += '%s done' % indent
s += FormatArgs(False, rpc['inputs'])
s += FormatArgs(True, rpc['outputs'])
s += '\n%s)' % indent
return s
# end FormatCall
PrintSourceFileTop(output, header_name)
s = 'namespace {\n\n'
for spec in specs:
class_name = spec['name'] + 'Server'
rpcs = spec['rpcs']
for rpc in rpcs:
s += '%s {\n' % FormatDispatchPrototype('', rpc)
if rpc['inputs'] == []:
s += ' UNREFERENCED_PARAMETER(inputs);\n'
if rpc['outputs'] == []:
s += ' UNREFERENCED_PARAMETER(outputs);\n'
s += ' %s;\n' % FormatCall(class_name, ' ', rpc)
s += '}\n\n'
s += '} // namespace\n\n'
s += 'NaClSrpcHandlerDesc %s::srpc_methods[] = {\n' % interface_name
for spec in specs:
class_name = spec['name'] + 'Server'
rpcs = spec['rpcs']
for rpc in rpcs:
s += FormatMethodString(rpc)
s += ' { NULL, NULL }\n};\n'
print >>output, s
def PrintClientFile(output, header_name, specs, thread_check):
"""Prints the client (proxy) .cc file."""
def InstanceInputArg(rpc):
"""Returns the name of the PP_Instance arg or None if there is none."""
for arg in rpc['inputs']:
if arg[1] == 'PP_Instance':
return arg[0]
return None
def DeadNexeHandling(rpc, retval):
"""Generates the code necessary to handle death of a nexe during the rpc
call. This is only possible if PP_Instance arg is present, otherwise"""
instance = InstanceInputArg(rpc);
if instance is not None:
check = (' if (%s == NACL_SRPC_RESULT_INTERNAL)\n'
' ppapi_proxy::CleanUpAfterDeadNexe(%s);\n')
return check % (retval, instance)
return '' # No handling
def FormatCall(rpc):
"""Format a call to the generic dispatcher, NaClSrpcInvokeBySignature."""
def FormatTypes(args):
"""Format a the type signature string for either inputs or outputs."""
s = ''
for arg in args:
s += types[arg[1]][0]
return s
def FormatArgs(args):
"""Format the arguments for the call to the generic dispatcher."""
def FormatArg(arg):
"""Format a single argument for the call to the generic dispatcher."""
s = ''
type_info = types[arg[1]]
if type_info[3]:
s += '%s, ' % CountName(arg[0])
s += arg[0]
return s
# end FormatArg
s = ''
for arg in args:
s += ',\n %s' % FormatArg(arg)
return s
#end FormatArgs
s = '(\n channel,\n "%s:%s:%s"' % (rpc['name'],
FormatTypes(rpc['inputs']),
FormatTypes(rpc['outputs']))
s += FormatArgs(rpc['inputs'])
s += FormatArgs(rpc['outputs']) + '\n )'
return s
# end FormatCall
# We need this to handle dead nexes.
if header_name.startswith('trusted'):
AddInclude('native_client/src/shared/ppapi_proxy/browser_globals.h')
if thread_check:
AddInclude('native_client/src/shared/ppapi_proxy/plugin_globals.h')
AddInclude('ppapi/c/ppb_core.h')
AddInclude('native_client/src/shared/platform/nacl_check.h')
PrintSourceFileTop(output, header_name)
s = ''
for spec in specs:
class_name = spec['name'] + 'Client'
rpcs = spec['rpcs']
for rpc in rpcs:
s += '%s {\n' % FormatRpcPrototype('', class_name + '::', '', rpc)
if thread_check and rpc['name'] not in ['PPB_GetInterface',
'PPB_Core_CallOnMainThread']:
error = '"%s: PPAPI calls are not supported off the main thread\\n"'
s += ' VCHECK(ppapi_proxy::PPBCoreInterface()->IsMainThread(),\n'
s += ' (%s,\n' % error
s += ' __FUNCTION__));\n'
s += ' NaClSrpcError retval;\n'
s += ' retval = NaClSrpcInvokeBySignature%s;\n' % FormatCall(rpc)
if header_name.startswith('trusted'):
s += DeadNexeHandling(rpc, 'retval')
s += ' return retval;\n'
s += '}\n\n'
print >>output, s
def MakePath(name):
paths = name.split(os.sep)
path = os.sep.join(paths[:-1])
try:
os.makedirs(path)
except OSError:
return
def main(argv):
usage = 'Usage: srpcgen.py <-c | -s> [--include=<name>] [--ppapi]'
usage = usage + ' <iname> <gname> <.h> <.cc> <specs>'
mode = None
ppapi = False
thread_check = False
try:
long_opts = ['include=', 'ppapi', 'thread-check']
opts, pargs = getopt.getopt(argv[1:], 'cs', long_opts)
except getopt.error, e:
print >>sys.stderr, 'Illegal option:', str(e)
print >>sys.stderr, usage
return 1
# Get the class name for the interface.
interface_name = pargs[0]
# Get the name for the token used as a multiple inclusion guard in the header.
include_guard_name = pargs[1]
# Get the name of the header file to be generated.
h_file_name = pargs[2]
MakePath(h_file_name)
# Note we open output files in binary mode so that on Windows the files
# will always get LF line-endings rather than CRLF.
h_file = open(h_file_name, 'wb')
# Get the name of the source file to be generated. Depending upon whether
# -c or -s is generated, this file contains either client or server methods.
cc_file_name = pargs[3]
MakePath(cc_file_name)
cc_file = open(cc_file_name, 'wb')
# The remaining arguments are the spec files to be compiled.
spec_files = pargs[4:]
for opt, val in opts:
if opt == '-c':
mode = 'client'
elif opt == '-s':
mode = 'server'
elif opt == '--include':
h_file_name = val
elif opt == '--ppapi':
ppapi = True
elif opt == '--thread-check':
thread_check = True
if ppapi:
AddInclude("ppapi/c/pp_instance.h")
AddInclude("ppapi/c/pp_module.h")
AddInclude("ppapi/c/pp_resource.h")
# Convert to forward slash paths if needed
h_file_name = "/".join(h_file_name.split("\\"))
# Verify we picked server or client mode
if not mode:
print >>sys.stderr, 'Neither -c nor -s specified'
usage()
return 1
# Combine the rpc specs from spec_files into rpcs.
specs = []
for spec_file in spec_files:
code_obj = compile(open(spec_file, 'r').read(), 'file', 'eval')
specs.append(eval(code_obj))
# Print out the requested files.
if mode == 'client':
PrintHeaderFile(h_file, False, include_guard_name, interface_name, specs)
PrintClientFile(cc_file, h_file_name, specs, thread_check)
elif mode == 'server':
PrintHeaderFile(h_file, True, include_guard_name, interface_name, specs)
PrintServerFile(cc_file, h_file_name, interface_name, specs)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
mx3L/enigma2 | lib/python/Components/Converter/StringList.py | 132 | 1509 | from Converter import Converter
from enigma import eListboxPythonStringContent
from Components.Element import cached
class StringList(Converter):
"""Turns a simple python list into a list which can be used in a listbox."""
def __init__(self, type):
Converter.__init__(self, type)
self.content = None
def changed(self, what):
if not self.content:
self.content = eListboxPythonStringContent()
if self.source:
self.content.setList(self.source.list)
self.downstream_elements.changed(what)
def selectionChanged(self, index):
self.source.selectionChanged(index)
def setIndex(self, index):
# update all non-master targets
print "changed selection in listbox!"
for x in self.downstream_elements:
print "downstream element", x
if x is not self.master:
print "is not master, so update to index", index
x.index = index
def getIndex(self, index):
return None
index = property(getIndex, setIndex)
@cached
def getCurrent(self):
if self.source is None or self.index is None or self.index >= len(self.source.list):
return None
return self.source.list[self.index]
current = property(getCurrent)
# pass through: getIndex / setIndex to master
@cached
def getIndex(self):
if self.master is None:
return None
return self.master.index
def setIndex(self, index):
if self.master is not None:
self.master.index = index
index = property(getIndex, setIndex)
def entry_changed(self, index):
if self.content:
self.content.invalidateEntry(index)
| gpl-2.0 |
giorgiop/scikit-learn | examples/cluster/plot_face_segmentation.py | 71 | 2839 | """
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
# load the raccoon face as a numpy array
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| bsd-3-clause |
rafaeltg/pydl | examples/mlp.py | 2 | 3190 | import os
import numpy as np
from pydl.model_selection.scorer import r2_score, rmse
from pydl.models.layers import Dense, Dropout
from pydl.models import MLP, load_model, model_from_json
from dataset import create_multivariate_data, train_test_split
def run_mlp():
"""
MLP example
"""
n_features = 4
data = create_multivariate_data(n_features=n_features)
x, y, x_test, y_test = train_test_split(data[:, :-1], data[:, -1])
print('Creating MLP')
model = MLP(
name='mlp',
layers=[
Dense(units=16, activation='relu'),
Dropout(0.1),
Dense(units=8, activation='relu')
],
epochs=200)
print('Training')
model.fit(x=x, y=y)
print(model.summary())
train_score = model.score(x=x, y=y)
print('Train {} = {}'.format(model.get_loss_func().upper(), train_score))
test_score = model.score(x=x_test, y=y_test)
print('Test {} = {}'.format(model.get_loss_func().upper(), test_score))
print('Predicting test data')
y_test_pred = model.predict(x_test)
y_test_rmse = rmse(y_test, y_test_pred)
print('y_test RMSE = {}'.format(y_test_rmse))
y_test_r2 = r2_score(y_test, y_test_pred)
print('y_test R2 = {}'.format(y_test_r2))
print('Saving model')
model.save('models/mlp.h5')
model.save_json('models/mlp.json')
model.save_weights('models/mlp_weights.h5')
assert os.path.exists('models/mlp.json')
assert os.path.exists('models/mlp.h5')
assert os.path.exists('models/mlp_weights.h5')
del model
print('Loading model from .h5 file')
model = load_model('models/mlp.h5')
assert isinstance(model, MLP)
assert model.name == 'mlp'
print('Calculating train score')
np.testing.assert_equal(train_score, model.score(x=x, y=y))
print('Calculating test score')
np.testing.assert_equal(test_score, model.score(x=x_test, y=y_test))
print('Predicting test data')
y_test_pred_new = model.predict(x_test)
np.testing.assert_allclose(y_test_pred, y_test_pred_new, atol=1e-6)
print('Calculating RMSE for test set')
np.testing.assert_equal(y_test_rmse, rmse(y_test, y_test_pred_new))
print('Calculating R2 for test set')
np.testing.assert_equal(y_test_r2, r2_score(y_test, y_test_pred_new))
del model
print('Loading model from json and weights files')
model = model_from_json('models/mlp.json', weights_filepath='models/mlp_weights.h5', compile=True)
assert isinstance(model, MLP)
assert model.name == 'mlp'
print('Calculating train score')
np.testing.assert_equal(train_score, model.score(x=x, y=y))
print('Calculating test score')
np.testing.assert_equal(test_score, model.score(x=x_test, y=y_test))
print('Predicting test data')
y_test_pred_new = model.predict(x_test)
np.testing.assert_allclose(y_test_pred, y_test_pred_new, atol=1e-6)
print('Calculating RMSE for test set')
np.testing.assert_equal(y_test_rmse, rmse(y_test, y_test_pred_new))
print('Calculating R2 for test set')
np.testing.assert_equal(y_test_r2, r2_score(y_test, y_test_pred_new))
if __name__ == '__main__':
run_mlp()
| mit |
dkodnik/paperbod | bot/bot_token.py | 2 | 3244 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#ver:2014.04.01-1
import diaspy.client as dsc
import MySQLdb as mdb
import hashlib
from datetime import datetime, date, time
import time as tm
import sys
def activBotTokenAPI():
try:
con = mdb.connect('localhost', '**!**', '**!**', '**!**')
except Exception:
print "Error: connect db"
return False
with con:
cur = con.cursor(mdb.cursors.DictCursor)
cur.execute("SELECT * FROM sites WHERE status='1' AND feed_type='token'")
rows = cur.fetchall()
for row in rows:
curFdMsg = con.cursor(mdb.cursors.DictCursor)
curFdMsg.execute("SELECT * FROM tkn_post WHERE idSites='%s' AND status='0'",(row["id"]))
rowsFM = curFdMsg.fetchall()
try:
c = dsc.Client(row["pod_url"], row["usrnm"], row["pswrd"])
except Exception as inst:
print "Error: connect pod ", sys.exc_info()[0]
print inst
continue #Оператор continue начинает следующий проход цикла, минуя оставшееся тело цикла
insertNewFeedAmnt=0
for rowFM in rowsFM:
urlLink = "pd://%s/post/%s" % (row["feed_url"], rowFM["id"])
oneURLcheck = hashlib.sha256(urlLink).hexdigest()
curPost = con.cursor(mdb.cursors.DictCursor)
curPost.execute("SELECT COUNT(*) as amnt FROM feeds WHERE hash=%s",(oneURLcheck))
rowsPost = curPost.fetchall()
for rowPost in rowsPost:
if rowPost["amnt"] == 0:
curPostEx = con.cursor(mdb.cursors.DictCursor)
curPostEx.execute("INSERT INTO feeds (`hash`, `idusr`, `idst`) VALUES (%s, %s, %s)", (oneURLcheck, row["idusr"], row["id"]))
insertNewFeedAmnt+=1
strTxt = rowFM["message"]
strTxt = strTxt.replace('\n', " ")
strTxt = strTxt.replace("\n", " ")
try:
c.post(strTxt);
except Exception:
print "Error: post message"
continue #Оператор continue начинает следующий проход цикла, минуя оставшееся тело цикла
curPostEx.execute("UPDATE feeds SET addfeed = %s WHERE hash = %s AND idusr = %s", ("1", oneURLcheck, row["idusr"]))
curPostEx.execute("UPDATE tkn_post SET status=%s WHERE id = %s", ("1", rowFM["id"]))
if insertNewFeedAmnt!=0:
print "....add post api = %s" % (insertNewFeedAmnt)
return True
timeOut = 600
timeOutBad = 1200
iStep = 1
while True:
if activBotTokenAPI()==True:
print "step №%s time: %s" % (iStep, datetime.now())
iStep += 1
print "...pause 10min..."
tm.sleep(timeOut)
else:
print "step №%s time: %s" % (iStep, datetime.now())
iStep += 1
print "...pause 20min..."
tm.sleep(timeOutBad) | agpl-3.0 |
shuggiefisher/potato | django/contrib/gis/gdal/tests/test_driver.py | 330 | 1207 | import os, os.path, unittest
from django.contrib.gis.gdal import Driver, OGRException
valid_drivers = ('ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN',
'Memory', 'CSV', 'GML', 'KML')
invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp')
aliases = {'eSrI' : 'ESRI Shapefile',
'TigER/linE' : 'TIGER',
'SHAPE' : 'ESRI Shapefile',
'sHp' : 'ESRI Shapefile',
}
class DriverTest(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid OGR Data Source Drivers."
for i in invalid_drivers:
self.assertRaises(OGRException, Driver, i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DriverTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| bsd-3-clause |
Phonemetra/TurboCoin | test/functional/p2p_feefilter.py | 1 | 3528 | #!/usr/bin/env python3
# Copyright (c) 2016-2019 TurboCoin
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import TurbocoinTestFramework
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(TurbocoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
self.sync_blocks()
self.nodes[0].add_p2p_connection(TestP2PConn())
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte
self.nodes[0].p2p.send_and_ping(msg_feefilter(15000))
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| mit |
Mhynlo/SickRage | lib/requests/packages/urllib3/fields.py | 200 | 5872 | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| gpl-3.0 |
drawks/ansible | lib/ansible/modules/network/f5/bigip_gtm_topology_record.py | 25 | 32848 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_topology_record
short_description: Manages GTM Topology Records
description:
- Manages GTM Topology Records. Once created, only topology record C(weight) can be modified.
version_added: 2.8
options:
source:
description:
- Specifies the origination of an incoming DNS request.
suboptions:
negate:
description:
- When set to c(yes) the system selects this topology record, when the request source does not match.
type: bool
default: no
subnet:
description:
- An IP address and network mask in the CIDR format.
type: str
region:
description:
- Specifies the name of region already defined in the configuration.
type: str
continent:
description:
- Specifies one of the seven continents, along with the C(Unknown) setting.
- Specifying C(Unknown) forces the system to use a default resolution
if the system cannot determine the location of the local DNS making the request.
- Full continent names and their abbreviated versions are supported.
type: str
country:
description:
- Specifies a country.
- In addition to the country full names, you may also specify their abbreviated
form, such as C(US) instead of C(United States).
- Valid country codes can be found here https://countrycode.org/.
type: str
state:
description:
- Specifies a state in a given country.
- This parameter requires country option to be provided.
type: str
isp:
description:
- Specifies an Internet service provider.
type: str
choices:
- AOL
- BeijingCNC
- CNC
- ChinaEducationNetwork
- ChinaMobilNetwork
- ChinaRailwayTelcom
- ChinaTelecom
- ChinaUnicom
- Comcast
- Earthlink
- ShanghaiCNC
- ShanghaiTelecom
geo_isp:
description:
- Specifies a geolocation ISP
type: str
type: dict
required: True
destination:
description:
- Specifies where the system directs the incoming DNS request.
suboptions:
negate:
description:
- When set to c(yes) the system selects this topology record, when the request destination does not match.
type: bool
default: no
subnet:
description:
- An IP address and network mask in the CIDR format.
type: str
region:
description:
- Specifies the name of region already defined in the configuration.
type: str
continent:
description:
- Specifies one of the seven continents, along with the C(Unknown) setting.
- Specifying C(Unknown) forces the system to use a default resolution
if the system cannot determine the location of the local DNS making the request.
- Full continent names and their abbreviated versions are supported.
type: str
country:
description:
- Specifies a country.
- Full continent names and their abbreviated versions are supported.
type: str
state:
description:
- Specifies a state in a given country.
- This parameter requires country option to be provided.
type: str
pool:
description:
- Specifies the name of GTM pool already defined in the configuration.
type: str
datacenter:
description:
- Specifies the name of GTM data center already defined in the configuration.
type: str
isp:
description:
- Specifies an Internet service provider.
type: str
choices:
- AOL
- BeijingCNC
- CNC
- ChinaEducationNetwork
- ChinaMobilNetwork
- ChinaRailwayTelcom
- ChinaTelecom
- ChinaUnicom
- Comcast
- Earthlink
- ShanghaiCNC
- ShanghaiTelecom
geo_isp:
description:
- Specifies a geolocation ISP
type: str
type: dict
required: True
weight:
description:
- Specifies the weight of the topology record.
- The system finds the weight of the first topology record that matches the server object (pool or pool member)
and the local DNS. The system then assigns that weight as the topology score for that server object.
- The system load balances to the server object with the highest topology score.
- If the system finds no topology record that matches both the server object and the local DNS,
then the system assigns that server object a zero score.
- If the option is not specified when the record is created the system will set it at a default value of C(1)
- Valid range is (0 - 4294967295)
type: int
partition:
description:
- Device partition to manage resources on.
- Partition parameter is taken into account when used in conjunction with C(pool), C(data_center),
and C(region) parameters, it is ignored otherwise.
type: str
default: Common
state:
description:
- When C(state) is C(present), ensures that the record exists.
- When C(state) is C(absent), ensures that the record is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create an IP Subnet and an ISP based topology record
bigip_gtm_topology_record:
source:
- subnet: 192.168.1.0/24
destination:
- isp: AOL
weight: 10
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create a region and a pool based topology record
bigip_gtm_topology_record:
source:
- region: Foo
destination:
- pool: FooPool
partition: FooBar
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create a negative region and a negative data center based topology record
bigip_gtm_topology_record:
source:
- region: Baz
- negate: yes
destination:
- datacenter: Baz-DC
- negate: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
weight:
description: The weight of the topology record.
returned: changed
type: int
sample: 20
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.ipaddress import is_valid_ip_network
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.ipaddress import is_valid_ip_network
class Parameters(AnsibleF5Parameters):
api_map = {
'score': 'weight',
}
api_attributes = [
'score',
]
returnables = [
'weight',
'name'
]
updatables = [
'weight',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
countries = {
'Afghanistan': 'AF',
'Aland Islands': 'AX',
'Albania': 'AL',
'Algeria': 'DZ',
'American Samoa': 'AS',
'Andorra': 'AD',
'Angola': 'AO',
'Anguilla': 'AI',
'Antarctica': 'AQ',
'Antigua and Barbuda': 'AG',
'Argentina': 'AR',
'Armenia': 'AM',
'Aruba': 'AW',
'Australia': 'AU',
'Austria': 'AT',
'Azerbaijan': 'AZ',
'Bahamas': 'BS',
'Bahrain': 'BH',
'Bangladesh': 'BD',
'Barbados': 'BB',
'Belarus': 'BY',
'Belgium': 'BE',
'Belize': 'BZ',
'Benin': 'BJ',
'Bermuda': 'BM',
'Bhutan': 'BT',
'Bolivia': 'BO',
'Bonaire, Sint Eustatius and Saba': 'BQ',
'Bosnia and Herzegovina': 'BA',
'Botswana': 'BW',
'Bouvet Island': 'BV',
'Brazil': 'BR',
'British Indian Ocean Territory': 'IO',
'Brunei Darussalam': 'BN',
'Bulgaria': 'BG',
'Burkina Faso': 'BF',
'Burundi': 'BI',
'Cape Verde': 'CV',
'Cambodia': 'KH',
'Cameroon': 'CM',
'Canada': 'CA',
'Cayman Islands': 'KY',
'Central African Republic': 'CF',
'Chad': 'TD',
'Chile': 'CL',
'China': 'CN',
'Christmas Island': 'CX',
'Cocos (Keeling) Islands': 'CC',
'Colombia': 'CO',
'Comoros': 'KM',
'Congo': 'CG',
'Congo, The Democratic Republic of the': 'CD',
'Cook Islands': 'CK',
'Costa Rica': 'CR',
"Cote D'Ivoire": 'CI',
'Croatia': 'HR',
'Cuba': 'CU',
'Curaçao': 'CW',
'Cyprus': 'CY',
'Czech Republic': 'CZ',
'Denmark': 'DK',
'Djibouti': 'DJ',
'Dominica': 'DM',
'Dominican Republic': 'DO',
'Ecuador': 'EC',
'Egypt': 'EG',
'El Salvador': 'SV',
'Equatorial Guinea': 'GQ',
'Eritrea': 'ER',
'Estonia': 'EE',
'Ethiopia': 'ET',
'Falkland Islands (Malvinas)': 'FK',
'Faroe Islands': 'FO',
'Fiji': 'FJ',
'Finland': 'FI',
'France': 'FR',
'French Guiana': 'GF',
'French Polynesia': 'PF',
'French Southern Territories': 'TF',
'Gabon': 'GA',
'Gambia': 'GM',
'Georgia': 'GE',
'Germany': 'DE',
'Ghana': 'GH',
'Gibraltar': 'GI',
'Greece': 'GR',
'Greenland': 'GL',
'Grenada': 'GD',
'Guadeloupe': 'GP',
'Guam': 'GU',
'Guatemala': 'GT',
'Guernsey': 'GG',
'Guinea': 'GN',
'Guinea-Bissau': 'GW',
'Guyana': 'GY',
'Haiti': 'HT',
'Heard Island and McDonald Islands': 'HM',
'Holy See (Vatican City State)': 'VA',
'Honduras': 'HN',
'Hong Kong': 'HK',
'Hungary': 'HU',
'Iceland': 'IS',
'India': 'IN',
'Indonesia': 'ID',
'Iran, Islamic Republic of': 'IR',
'Iraq': 'IQ',
'Ireland': 'IE',
'Isle of Man': 'IM',
'Israel': 'IL',
'Italy': 'IT',
'Jamaica': 'JM',
'Japan': 'JP',
'Jersey': 'JE',
'Jordan': 'JO',
'Kazakhstan': 'KZ',
'Kenya': 'KE',
'Kiribati': 'KI',
"Korea, Democratic People's Republic of": 'KP',
'Korea, Republic of': 'KR',
'Kuwait': 'KW',
'Kyrgyzstan': 'KG',
"Lao People's Democratic Republic": 'LA',
'Latvia': 'LV',
'Lebanon': 'LB',
'Lesotho': 'LS',
'Liberia': 'LR',
'Libyan Arab Jamahiriya': 'LY',
'Liechtenstein': 'LI',
'Lithuania': 'LT',
'Luxembourg': 'LU',
'Macau': 'MO',
'Macedonia': 'MK',
'Madagascar': 'MG',
'Malawi': 'MW',
'Malaysia': 'MY',
'Maldives': 'MV',
'Mali': 'ML',
'Malta': 'MT',
'Marshall Islands': 'MH',
'Martinique': 'MQ',
'Mauritania': 'MR',
'Mauritius': 'MU',
'Mayotte': 'YT',
'Mexico': 'MX',
'Micronesia, Federated States of': 'FM',
'Moldova, Republic of': 'MD',
'Monaco': 'MC',
'Mongolia': 'MN',
'Montenegro': 'ME',
'Montserrat': 'MS',
'Morocco': 'MA',
'Mozambique': 'MZ',
'Myanmar': 'MM',
'Namibia': 'NA',
'Nauru': 'NR',
'Nepal': 'NP',
'Netherlands': 'NL',
'New Caledonia': 'NC',
'New Zealand': 'NZ',
'Nicaragua': 'NI',
'Niger': 'NE',
'Nigeria': 'NG',
'Niue': 'NU',
'Norfolk Island': 'NF',
'Northern Mariana Islands': 'MP',
'Norway': 'NO',
'Oman': 'OM',
'Pakistan': 'PK',
'Palau': 'PW',
'Palestinian Territory': 'PS',
'Panama': 'PA',
'Papua New Guinea': 'PG',
'Paraguay': 'PY',
'Peru': 'PE',
'Philippines': 'PH',
'Pitcairn Islands': 'PN',
'Poland': 'PL',
'Portugal': 'PT',
'Puerto Rico': 'PR',
'Qatar': 'QA',
'Reunion': 'RE',
'Romania': 'RO',
'Russian Federation': 'RU',
'Rwanda': 'RW',
'Saint Barthelemy': 'BL',
'Saint Helena': 'SH',
'Saint Kitts and Nevis': 'KN',
'Saint Lucia': 'LC',
'Saint Martin': 'MF',
'Saint Pierre and Miquelon': 'PM',
'Saint Vincent and the Grenadines': 'VC',
'Samoa': 'WS',
'San Marino': 'SM',
'Sao Tome and Principe': 'ST',
'Saudi Arabia': 'SA',
'Senegal': 'SN',
'Serbia': 'RS',
'Seychelles': 'SC',
'Sierra Leone': 'SL',
'Singapore': 'SG',
'Sint Maarten (Dutch part)': 'SX',
'Slovakia': 'SK',
'Slovenia': 'SI',
'Solomon Islands': 'SB',
'Somalia': 'SO',
'South Africa': 'ZA',
'South Georgia and the South Sandwich Islands': 'GS',
'South Sudan': 'SS',
'Spain': 'ES',
'Sri Lanka': 'LK',
'Sudan': 'SD',
'Suriname': 'SR',
'Svalbard and Jan Mayen': 'SJ',
'Swaziland': 'SZ',
'Sweden': 'SE',
'Switzerland': 'CH',
'Syrian Arab Republic': 'SY',
'Taiwan': 'TW',
'Tajikistan': 'TJ',
'Tanzania, United Republic of': 'TZ',
'Thailand': 'TH',
'Timor-Leste': 'TL',
'Togo': 'TG',
'Tokelau': 'TK',
'Tonga': 'TO',
'Trinidad and Tobago': 'TT',
'Tunisia': 'TN',
'Turkey': 'TR',
'Turkmenistan': 'TM',
'Turks and Caicos Islands': 'TC',
'Tuvalu': 'TV',
'Uganda': 'UG',
'Ukraine': 'UA',
'United Arab Emirates': 'AE',
'United Kingdom': 'GB',
'United States': 'US',
'United States Minor Outlying Islands': 'UM',
'Uruguay': 'UY',
'Uzbekistan': 'UZ',
'Vanuatu': 'VU',
'Venezuela': 'VE',
'Vietnam': 'VN',
'Virgin Islands, British': 'VG',
'Virgin Islands, U.S.': 'VI',
'Wallis and Futuna': 'WF',
'Western Sahara': 'EH',
'Yemen': 'YE',
'Zambia': 'ZM',
'Zimbabwe': 'ZW',
'Unrecognized': 'N/A',
'Asia/Pacific Region': 'AP',
'Europe': 'EU',
'Netherlands Antilles': 'AN',
'France, Metropolitan': 'FX',
'Anonymous Proxy': 'A1',
'Satellite Provider': 'A2',
'Other': 'O1',
}
continents = {
'Antarctica': 'AN',
'Asia': 'AS',
'Africa': 'AF',
'Europe': 'EU',
'North America': 'NA',
'South America': 'SA',
'Oceania': 'OC',
'Unknown': '--',
}
@property
def src_negate(self):
src_negate = self._values['source'].get('negate', None)
result = flatten_boolean(src_negate)
if result == 'yes':
return 'not'
return None
@property
def src_subnet(self):
src_subnet = self._values['source'].get('subnet', None)
if src_subnet is None:
return None
if is_valid_ip_network(src_subnet):
return src_subnet
raise F5ModuleError(
"Specified 'subnet' is not a valid subnet."
)
@property
def src_region(self):
src_region = self._values['source'].get('region', None)
if src_region is None:
return None
return fq_name(self.partition, src_region)
@property
def src_continent(self):
src_continent = self._values['source'].get('continent', None)
if src_continent is None:
return None
result = self.continents.get(src_continent, src_continent)
return result
@property
def src_country(self):
src_country = self._values['source'].get('country', None)
if src_country is None:
return None
result = self.countries.get(src_country, src_country)
return result
@property
def src_state(self):
src_country = self._values['source'].get('country', None)
src_state = self._values['source'].get('state', None)
if src_state is None:
return None
if src_country is None:
raise F5ModuleError(
'Country needs to be provided when specifying state'
)
result = '{0}/{1}'.format(src_country, src_state)
return result
@property
def src_isp(self):
src_isp = self._values['source'].get('isp', None)
if src_isp is None:
return None
return fq_name('Common', src_isp)
@property
def src_geo_isp(self):
src_geo_isp = self._values['source'].get('geo_isp', None)
return src_geo_isp
@property
def dst_negate(self):
dst_negate = self._values['destination'].get('negate', None)
result = flatten_boolean(dst_negate)
if result == 'yes':
return 'not'
return None
@property
def dst_subnet(self):
dst_subnet = self._values['destination'].get('subnet', None)
if dst_subnet is None:
return None
if is_valid_ip_network(dst_subnet):
return dst_subnet
raise F5ModuleError(
"Specified 'subnet' is not a valid subnet."
)
@property
def dst_region(self):
dst_region = self._values['destination'].get('region', None)
if dst_region is None:
return None
return fq_name(self.partition, dst_region)
@property
def dst_continent(self):
dst_continent = self._values['destination'].get('continent', None)
if dst_continent is None:
return None
result = self.continents.get(dst_continent, dst_continent)
return result
@property
def dst_country(self):
dst_country = self._values['destination'].get('country', None)
if dst_country is None:
return None
result = self.countries.get(dst_country, dst_country)
return result
@property
def dst_state(self):
dst_country = self.dst_country
dst_state = self._values['destination'].get('state', None)
if dst_state is None:
return None
if dst_country is None:
raise F5ModuleError(
'Country needs to be provided when specifying state'
)
result = '{0}/{1}'.format(dst_country, dst_state)
return result
@property
def dst_isp(self):
dst_isp = self._values['destination'].get('isp', None)
if dst_isp is None:
return None
return fq_name('Common', dst_isp)
@property
def dst_geo_isp(self):
dst_geo_isp = self._values['destination'].get('geo_isp', None)
return dst_geo_isp
@property
def dst_pool(self):
dst_pool = self._values['destination'].get('pool', None)
if dst_pool is None:
return None
return fq_name(self.partition, dst_pool)
@property
def dst_datacenter(self):
dst_datacenter = self._values['destination'].get('datacenter', None)
if dst_datacenter is None:
return None
return fq_name(self.partition, dst_datacenter)
@property
def source(self):
options = {
'negate': self.src_negate,
'subnet': self.src_subnet,
'region': self.src_region,
'continent': self.src_continent,
'country': self.src_country,
'state': self.src_state,
'isp': self.src_isp,
'geoip-isp': self.src_geo_isp,
}
result = 'ldns: {0}'.format(self._format_options(options))
return result
@property
def destination(self):
options = {
'negate': self.dst_negate,
'subnet': self.dst_subnet,
'region': self.dst_region,
'continent': self.dst_continent,
'country': self.dst_country,
'state': self.dst_state,
'datacenter': self.dst_datacenter,
'pool': self.dst_pool,
'isp': self.dst_isp,
'geoip-isp': self.dst_geo_isp,
}
result = 'server: {0}'.format(self._format_options(options))
return result
@property
def name(self):
result = '{0} {1}'.format(self.source, self.destination)
return result
def _format_options(self, options):
negate = None
cleaned = dict((k, v) for k, v in iteritems(options) if v is not None)
if 'country' and 'state' in cleaned.keys():
del cleaned['country']
if 'negate' in cleaned.keys():
negate = cleaned['negate']
del cleaned['negate']
name, value = cleaned.popitem()
if negate:
result = '{0} {1} {2}'.format(negate, name, value)
return result
result = '{0} {1}'.format(name, value)
return result
@property
def weight(self):
weight = self._values['weight']
if weight is None:
return None
if 0 <= weight <= 4294967295:
return weight
raise F5ModuleError(
"Valid weight must be in range 0 - 4294967295"
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
name = self.want.name
uri = "https://{0}:{1}/mgmt/tm/gtm/topology/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
name.replace(' ', '%20').replace('/', '~')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
uri = "https://{0}:{1}/mgmt/tm/gtm/topology/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
name = self.want.name
uri = "https://{0}:{1}/mgmt/tm/gtm/topology/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
name.replace(' ', '%20').replace('/', '~')
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
name = self.want.name
uri = "https://{0}:{1}/mgmt/tm/gtm/topology/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
name.replace(' ', '%20').replace('/', '~')
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
name = self.want.name
uri = "https://{0}:{1}/mgmt/tm/gtm/topology/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
name.replace(' ', '%20').replace('/', '~')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.choices = [
'AOL', 'BeijingCNC', 'CNC', 'ChinaEducationNetwork',
'ChinaMobilNetwork', 'ChinaRailwayTelcom', 'ChinaTelecom',
'ChinaUnicom', 'Comcast', 'Earthlink', 'ShanghaiCNC',
'ShanghaiTelecom',
]
argument_spec = dict(
source=dict(
required=True,
type='dict',
options=dict(
subnet=dict(),
region=dict(),
continent=dict(),
country=dict(),
state=dict(),
isp=dict(
choices=self.choices
),
geo_isp=dict(),
negate=dict(
type='bool',
default='no'
),
),
mutually_exclusive=[
['subnet', 'region', 'continent', 'country', 'isp', 'geo_isp']
]
),
destination=dict(
required=True,
type='dict',
options=dict(
subnet=dict(),
region=dict(),
continent=dict(),
country=dict(),
state=dict(),
pool=dict(),
datacenter=dict(),
isp=dict(
choices=self.choices
),
geo_isp=dict(),
negate=dict(
type='bool',
default='no'
),
),
mutually_exclusive=[
['subnet', 'region', 'continent', 'country', 'pool', 'datacenter', 'isp', 'geo_isp']
]
),
weight=dict(type='int'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
dcoredump/zynthian-recipe | zynthian-stage/bin/pedalpi.py | 1 | 1514 | #!/usr/bin/python3
from pluginsmanager.banks_manager import BanksManager
from pluginsmanager.observer.mod_host.mod_host import ModHost
from pluginsmanager.model.bank import Bank
from pluginsmanager.model.pedalboard import Pedalboard
from pluginsmanager.model.connection import Connection
from pluginsmanager.model.lv2.lv2_effect_builder import Lv2EffectBuilder
from pluginsmanager.model.system.system_effect import SystemEffect
from pluginsmanager.jack.jack_client import JackClient
from pluginsmanager.model.system.system_effect_builder import SystemEffectBuilder
from pluginsmanager.observer.autosaver.autosaver import Autosaver
client = JackClient()
#sys_effect = SystemEffectBuilder(client)
sys_effect = SystemEffect('system', [], ['playback_1', 'playback_2'])
modhost = SystemEffect('mod-host', ['midi_in'], [])
autosaver = Autosaver('/root/pedalpi')
manager=autosaver.load(sys_effect)
#manager = BanksManager()
# Mod-Host
mod_host = ModHost('localhost')
mod_host.connect()
manager.register(mod_host)
pedalboard = Pedalboard('MDA-EP')
modhost.pedalboard = pedalboard
builder = Lv2EffectBuilder()
ep = builder.build('http://moddevices.com/plugins/mda/EPiano')
pedalboard.connections.append(Connection(sys_effect.inputs[0], ep.outputs[0]))
pedalboard.connections.append(Connection(sys_effect.inputs[1], ep.outputs[1]))
pedalboard.connections.append(Connection(sys_effect.outputs[0], ep.inputs[0]))
# Safe close
from signal import pause
try:
pause()
except KeyboardInterrupt:
mod_host.close()
| gpl-3.0 |
varunarya10/contrail-generateDS | java_api.py | 3 | 14986 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import re
from ifmap_global import CamelCase
def getLinkInfoType(ident, link_info):
xlink = ident.getLink(link_info)
if xlink.getXsdType():
return xlink.getCType().getName()
return 'ApiPropertyBase'
class JavaApiGenerator(object):
def __init__(self, parser, type_map, identifiers, metadata):
self._parser = parser
self._type_map = type_map
self._top_level_map = {
'SubnetType': self._type_map['SubnetType']
}
self._identifier_map = identifiers
self._metadata_map = metadata
self._type_count = {}
def _FileWrite(self, file, multiline, indent_level):
lines = multiline.split('\n')
for line in lines:
line = ' ' * indent_level + line + '\n'
file.write(line)
#end _FileWrite
def _GenerateTypeClass(self, ctype, filename):
file = self._parser.makeFile(filename)
header = """//
// Automatically generated.
//
package net.juniper.contrail.api.types;
import java.util.List;
import java.util.ArrayList;
import net.juniper.contrail.api.ApiPropertyBase;
"""
file.write(header)
self._GenerateType(ctype, file, 0, {})
def _GenerateType(self, ctype, file, indent_level, inner_map):
if inner_map.get(ctype.getName()):
return
inner_map[ctype.getName()] = ctype
if indent_level and self._top_level_map.get(ctype.getName()):
return
count = self._type_count.get(ctype)
if count:
self._type_count[ctype] = count + 1
else:
self._type_count[ctype] = 1
if indent_level:
file.write(' ' * indent_level)
file.write('public ')
if indent_level:
file.write('static ')
file.write('class %s ' % ctype.getName())
if indent_level == 0:
file.write('extends ApiPropertyBase ')
file.write('{\n')
indent_level += 4
for dep in ctype.getDependentTypes():
self._GenerateType(dep, file, indent_level, inner_map)
for member in ctype.getDataMembers():
file.write(' ' * indent_level)
if (member.jtypename == "java.util.Date"):
file.write('volatile %s %s;\n' % (member.jtypename, member.membername))
else:
file.write('%s %s;\n' % (member.jtypename, member.membername))
# default constructor
file.write(' ' * indent_level)
file.write('public %s() {\n' % ctype.getName())
file.write(' ' * indent_level)
file.write('}\n')
# constructor with all properties
file.write(' ' * indent_level)
file.write('public %s(' % ctype.getName())
index = 0
for member in ctype.getDataMembers():
if index > 0:
file.write(', ')
file.write('%s %s' % (member.jtypename, member.membername))
index += 1
file.write(') {\n')
indent_level += 4
for member in ctype.getDataMembers():
file.write(' ' * indent_level)
file.write('this.%s = %s;\n' %
(member.membername, member.membername))
indent_level -= 4
file.write(' ' * indent_level)
file.write('}\n')
self._GenerateTypePropertyAccessors(file, ctype, indent_level);
self._GenerateTypePropertyConvinience(file, ctype, indent_level)
indent_level -= 4
if indent_level > 0:
file.write(' ' * indent_level)
file.write('}\n')
# _GenerateType
def _InnerPropertyArgument(self, inner, member):
decl = ''
if member.isComplex and not self._top_level_map.get(member.jtypename):
decl = inner.getName() + '.'
decl += member.jtypename
decl += ' ' + member.membername
return decl
def _GenerateTypePropertyAccessors(self, file, ctype, indent_level):
for prop in ctype.getDataMembers():
if prop.isSequence:
continue
decl = """
public %(type)s get%(caml)s() {
return %(field)s;
}
public void set%(caml)s(%(type)s %(field)s) {
this.%(field)s = %(field)s;
}
""" % {'caml': CamelCase(prop.membername), 'type': prop.jtypename,
'field': prop.membername}
self._FileWrite(file, decl, indent_level)
# _GenerateTypePropertyAccessors
def _GenerateTypePropertyConvinience(self, file, ctype, indent_level):
for member in ctype.getDataMembers():
if member.isSequence:
m = re.search(r'\<(.*)\>', member.jtypename)
if m:
innertype = m.group(1)
else:
print 'Unable to determine inner type for Collection: ' + member.jtypename
continue
methodname = CamelCase(member.membername)
decl = """
public List<%(typename)s> get%(caml)s() {
return %(field)s;
}
""" % { 'caml': methodname, 'typename': innertype, 'field': member.membername }
self._FileWrite(file, decl, indent_level)
if methodname.endswith('List'):
methodname = methodname[:-len('List')]
decl = """
public void add%(caml)s(%(typename)s obj) {
if (%(field)s == null) {
%(field)s = new ArrayList<%(typename)s>();
}
%(field)s.add(obj);
}
public void clear%(caml)s() {
%(field)s = null;
}
""" % {'caml': methodname, 'typename': innertype, 'field': member.membername}
self._FileWrite(file, decl, indent_level)
# convinience method that uses the inner type constructor
# arguments
inner = self._type_map.get(innertype)
if not inner or len(inner.getDataMembers()) > 4:
continue
decl = """
public void add%(caml)s(%(argdecl)s) {
if (%(field)s == null) {
%(field)s = new ArrayList<%(typename)s>();
}
%(field)s.add(new %(typename)s(%(arglist)s));
}
""" % {'caml': methodname, 'typename': innertype, 'field': member.membername,
'argdecl': ', '.join(
map(lambda x: self._InnerPropertyArgument(inner, x),
inner.getDataMembers())),
'arglist': ', '.join(
map(lambda x: x.membername, inner.getDataMembers()))
}
self._FileWrite(file, decl, indent_level)
# _GenerateTypePropertyConvinience
def _GenerateClass(self, ident, filename):
file = self._parser.makeFile(filename)
header = """//
// Automatically generated.
//
package net.juniper.contrail.api.types;
import java.util.List;
import java.util.ArrayList;
import com.google.common.collect.ImmutableList;
import net.juniper.contrail.api.ApiObjectBase;
import net.juniper.contrail.api.ApiPropertyBase;
import net.juniper.contrail.api.ObjectReference;
public class %(cls)s extends ApiObjectBase {
""" % {'cls': ident.getCppName() }
file.write(header)
for prop in ident.getProperties():
decl = ' private %s %s;\n' % (prop.getJavaTypename(), prop.getCIdentifierName())
file.write(decl)
ctype = prop.getCType()
if ctype:
ctypename = ctype.getName()
self._top_level_map[ctypename] = self._type_map[ctypename]
for link_info in ident.getLinksInfo():
link_type = getLinkInfoType(ident, link_info)
if ident.isLinkRef(link_info):
link_to = ident.getLinkTo(link_info)
decl = ' private List<ObjectReference<%s>> %s_refs;\n' % (link_type, link_to.getCIdentifierName())
file.write(decl)
elif ident.isLinkHas(link_info):
child = ident.getLinkTo(link_info)
decl = ' private List<ObjectReference<%s>> %ss;\n' % (link_type, child.getCIdentifierName())
file.write(decl)
for back_link in ident.getBackLinksInfo():
link_from = ident.getBackLinkFrom(back_link)
link_type = getLinkInfoType(ident, back_link)
decl = ' private transient List<ObjectReference<%s>> %s_back_refs;\n' % (link_type, link_from.getCIdentifierName())
file.write(decl)
self._GenerateTypename(file, ident)
self._GenerateDefaultParent(file, ident)
self._GenerateDefaultParentType(file, ident)
self._GeneratePropertyAccessors(file, ident, 4)
for link_info in ident.getLinksInfo():
if ident.isLinkRef(link_info):
self._GenerateLinkRefAccessors(file, ident, link_info)
elif ident.isLinkHas(link_info):
self._GenerateLinkHasAccessors(file, ident, link_info)
for back_link in ident.getBackLinksInfo():
self._GenerateBackRefAccessors(file, ident, back_link)
file.write('}')
def _GenerateTypename(self, file, ident):
decl = """
@Override
public String getObjectType() {
return "%s";
}
""" % ident.getName()
file.write(decl)
# _GenerateTypename
def _GenerateDefaultParent(self, file, ident):
fq_name = ''
parents = ident.getParents()
if parents:
(parent, meta) = parents[0]
quoted_list = map(lambda x: '"%s"' % x, parent.getDefaultFQName())
fq_name = ', '.join(quoted_list)
decl = """
@Override
public List<String> getDefaultParent() {
return ImmutableList.of(%s);
}
""" % fq_name
file.write(decl)
# _GenerateDefaultParent
def _GenerateDefaultParentType(self, file, ident):
def quote(s):
return '"' + s + '"'
typename = 'null';
parents = ident.getParents()
if parents:
(parent, meta) = parents[0]
typename = quote(parent.getName())
decl = """
@Override
public String getDefaultParentType() {
return %s;
}
""" % typename
file.write(decl)
# _GenerateDefaultParentType
def _GeneratePropertyAccessors(self, file, ident, indent_level):
for prop in ident.getProperties():
gsname = prop.getCppName()
if gsname.startswith(ident.getCppName()):
gsname = gsname[len(ident.getCppName()):]
decl = """
public %(type)s get%(caml)s() {
return %(field)s;
}
public void set%(caml)s(%(type)s %(field)s) {
this.%(field)s = %(field)s;
}
""" % {'caml': gsname, 'type': prop.getJavaTypename(),
'field': prop.getCIdentifierName()}
self._FileWrite(file, decl, indent_level)
# _GeneratePropertyAccessors
def _GenerateLinkRefAccessors(self, file, ident, link_info):
link_to = ident.getLinkTo(link_info)
getter = """
public List<ObjectReference<%(attrtype)s>> get%(caml)s() {
return %(id)s_refs;
}
""" % {'attrtype': getLinkInfoType(ident, link_info), 'caml': link_to.getCppName(), 'id': link_to.getCIdentifierName() }
file.write(getter)
xlink = ident.getLink(link_info)
if xlink.getXsdType():
attrtype = xlink.getCType().getName()
self._top_level_map[attrtype] = self._type_map[attrtype]
setters = """
public void set%(caml)s(%(linktype)s obj, %(datatype)s data) {
%(field)s_refs = new ArrayList<ObjectReference<%(datatype)s>>();
%(field)s_refs.add(new ObjectReference<%(datatype)s>(obj.getQualifiedName(), data));
}
public void add%(caml)s(%(linktype)s obj, %(datatype)s data) {
if (%(field)s_refs == null) {
%(field)s_refs = new ArrayList<ObjectReference<%(datatype)s>>();
}
%(field)s_refs.add(new ObjectReference<%(datatype)s>(obj.getQualifiedName(), data));
}
public void remove%(caml)s(%(linktype)s obj, %(datatype)s data) {
if (%(field)s_refs != null) {
%(field)s_refs.remove(new ObjectReference<%(datatype)s>(obj.getQualifiedName(), data));
}
}
public void clear%(caml)s() {
if (%(field)s_refs != null) {
%(field)s_refs.clear();
return;
}
%(field)s_refs = null;
}
""" % {'caml': link_to.getCppName(), 'linktype': link_to.getCppName(),
'datatype': attrtype, 'field': link_to.getCIdentifierName()}
file.write(setters)
else:
setters = """
public void set%(caml)s(%(linktype)s obj) {
%(field)s_refs = new ArrayList<ObjectReference<ApiPropertyBase>>();
%(field)s_refs.add(new ObjectReference<ApiPropertyBase>(obj.getQualifiedName(), null));
}
public void add%(caml)s(%(linktype)s obj) {
if (%(field)s_refs == null) {
%(field)s_refs = new ArrayList<ObjectReference<ApiPropertyBase>>();
}
%(field)s_refs.add(new ObjectReference<ApiPropertyBase>(obj.getQualifiedName(), null));
}
public void clear%(caml)s() {
if (%(field)s_refs != null) {
%(field)s_refs.clear();
return;
}
%(field)s_refs = null;
}
""" % {'caml': link_to.getCppName(), 'linktype': link_to.getCppName(),
'field': link_to.getCIdentifierName()}
file.write(setters)
# _GenerateLinkRefAccessors
def _GenerateLinkHasAccessors(self, file, ident, link_info):
child = ident.getLinkTo(link_info)
getter = """
public List<ObjectReference<%(attrtype)s>> get%(caml)ss() {
return %(id)ss;
}
""" % {'attrtype': getLinkInfoType(ident, link_info), 'caml': child.getCppName(), 'id': child.getCIdentifierName() }
file.write(getter)
# _GenerateLinkHasAccessors
def _GenerateBackRefAccessors(self, file, ident, back_link):
link_from = ident.getBackLinkFrom(back_link)
decl = """
public List<ObjectReference<%(attrtype)s>> get%(caml)sBackRefs() {
return %(field)s_back_refs;
}
""" % {'attrtype': getLinkInfoType(ident, back_link), 'caml': link_from.getCppName(), 'field': link_from.getCIdentifierName()}
file.write(decl)
# _GenerateBackRefAccessors
def Generate(self, dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
elif not os.path.isdir(dirname):
print "-o option must specify directory"
sys.exit(1)
for ident in self._identifier_map.values():
filename = os.path.join(dirname, ident.getCppName() + ".java")
self._GenerateClass(ident, filename)
for ctype in self._top_level_map.values():
filename = os.path.join(dirname, ctype.getName() + ".java")
self._GenerateTypeClass(ctype, filename)
for cname, count in self._type_count.items():
if count > 1:
print 'type %s count: %d' % (cname.getName(), count)
| mit |
fighterlyt/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/gdata/tlslite/utils/hmac.py | 403 | 3286 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
(This file is modified from the standard library version to do faster
copying)
"""
def _strxor(s1, s2):
"""Utility method. XOR the two strings s1 and s2 (must have same length).
"""
return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used.
digest_size = None
class HMAC:
"""RFC2104 HMAC class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod is None:
import md5
digestmod = md5
if key == None: #TREVNEW - for faster copying
return #TREVNEW
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
self.digest_size = digestmod.digest_size
blocksize = 64
ipad = "\x36" * blocksize
opad = "\x5C" * blocksize
if len(key) > blocksize:
key = digestmod.new(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(_strxor(key, opad))
self.inner.update(_strxor(key, ipad))
if msg is not None:
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = HMAC(None) #TREVNEW - for faster copying
other.digest_size = self.digest_size #TREVNEW
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([hex(ord(x))[2:].zfill(2)
for x in tuple(self.digest())])
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| apache-2.0 |
izelaciman/node_production | node_modules/meanio/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
ThomasMcVay/MediaApp | MediaAppKnobs/KnobElements/IntWidget.py | 1 | 1799 | #===============================================================================
# @Author: Madison Aster
# @ModuleDescription:
# @License:
# MediaApp Library - Python Package framework for developing robust Media
# Applications with Qt Library
# Copyright (C) 2013 Madison Aster
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 2.1 as published by the Free Software Foundation;
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See LICENSE in the root directory of this library for copy of
# GNU Lesser General Public License and other license details.
#===============================================================================
from Qt import QtGui, QtCore, QtWidgets
class IntWidget(QtWidgets.QLineEdit):
def __init__(self):
super(IntWidget, self).__init__()
self.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
self.setAlignment(QtCore.Qt.AlignLeft)
def setValue(self, value):
self.setText(str(value))
self.textChanged.emit
self.update()
def getValue(self):
return int(float(self.text()))
def sizeHint(self):
return QtCore.QSize(150,16) | lgpl-2.1 |
cerrno/neurokernel | neurokernel/LPU/utils/visualizer.py | 1 | 29782 | #!/usr/bin/env python
"""
LPU output visualization.
"""
import collections
from collections import OrderedDict
import itertools
import os
import matplotlib
from matplotlib import cm
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
from matplotlib.animation import FFMpegFileWriter, AVConvFileWriter
from matplotlib.colors import hsv_to_rgb
import networkx as nx
import numpy as np
from scipy.interpolate import griddata
from shutilwhich import which
import simpleio as sio
class visualizer(object):
"""
Visualize the output produced by LPU models.
Examples
--------
>>> import neurokernel.LPU.utils.visualizer as vis
>>> V = vis.visualizer()
>>> config1 = {}
>>> config1['type'] = 'image'
>>> config1['shape'] = [32,24]
>>> config1['clim'] = [-0.6,0.5]
>>> config2 = config1.copy()
>>> config2['clim'] = [-0.55,-0.45]
>>> V.add_LPU('lamina_output.h5', 'lamina.gexf.gz','lamina')
>>> V.add_plot(config1, 'lamina', 'R1')
>>> V.add_plot(config2, 'lamina', 'L1')
>>> V.update_interval = 50
>>> V.out_filename = 'test.avi'
>>> V.run()
"""
def __init__(self):
self._xlim = [0,1]
self._ylim = [-1,1]
self._imlim = [-1, 1]
self._update_interval = 50
self._out_file = None
self._fps = 5
self._codec = 'libtheora'
self._config = OrderedDict()
self._rows = 0
self._cols = 0
self._figsize = (16,9)
self._fontsize = 18
self._t = 1
self._dt = 1
self._data = {}
self._graph = {}
self._id_to_data_idx = {}
self._maxt = None
self._title = None
self._FFMpeg = None
def add_LPU(self, data_file, gexf_file=None, LPU=None, win=None,
is_input=False):
'''
Add data associated with a specific LPU to a visualization.
To add a plot containing neurons from a particular LPU,
the LPU needs to be added to the visualization using this
function. Note that outputs from multiple neurons can
be visualized using the same visualizer object.
Parameters
----------
data_file: str
Location of the h5 file generated by neurokernel
containing the output of the LPU
gexf_file: str
Location of the gexf file describing the LPU.
If not specified, it will be assumed that the h5 file
contains input.
LPU: str
Name of the LPU. Will be used as identifier to add plots.
For input signals, the name of the LPU will be prepended
with 'input_'. For example::
V.add_LPU('vision_in.h5', LPU='vision')
will create the LPU identifier 'input_vision'.
Therefore, adding a plot depicting this input can be done by::
V.add_plot({''type':'image',imlim':[-0.5,0.5]},LPU='input_vision)
win: slice/list
Can be used to limit the visualization to a specific time window.
'''
if gexf_file and not is_input:
self._graph[LPU] = nx.read_gexf(gexf_file)
# Map neuron ids to index into output data array:
self._id_to_data_idx[LPU] = {m:i for i, m in \
enumerate(sorted([int(n) for n, k in \
self._graph[LPU].nodes_iter(True) if k['spiking']]))}
else:
if LPU:
LPU = 'input_' + str(LPU)
else:
LPU = 'input_' + str(len(self._data))
if gexf_file:
self._graph[LPU] = nx.read_gexf(gexf_file)
if not LPU:
LPU = len(self._data)
self._data[LPU] = np.transpose(sio.read_array(data_file))
if win is not None:
self._data[LPU] = self._data[LPU][:,win]
if self._maxt:
self._maxt = min(self._maxt, self._data[LPU].shape[1])
else:
self._maxt = self._data[LPU].shape[1]
def run(self, final_frame_name=None, dpi=300):
'''
Starts the visualization process. If the property out_filename is set,
the visualization is saved as a video to the disk. If it is not
specified, the animation will be displayed on screen.
Please refer to documentation of add_LPU, add_plot and
the properties of this class on how to configure the visualizer before call this
method. An example can be found in the class doc string.
Parameters
----------
final_frame_name: str
Optional. If specified, the final frame of the animation will be saved
to disk.
dpi: int
Default(300). If final_frame_name is specified, this parameter will control
the resolution at which the final frame is saved to disk.
Note:
-----
If update_interval is set to 0 or None, it will be replaced by the
index of the final time step. As a result, the visualizer will only
generate the final frame.
'''
self._initialize()
if not self._update_interval:
self._update_interval = self._maxt - 1
self._t = self._update_interval + 1
for _ in range(self._update_interval,
self._maxt, self._update_interval):
self._update()
if final_frame_name is not None:
self.f.savefig(final_frame_name, dpi=dpi)
if self.out_filename:
self._close()
def _set_wrapper(self, obj, name, value):
name = name.lower()
func = getattr(obj, 'set_'+name, None)
if func:
try:
func(value, fontsize=self._fontsize, weight='bold')
except:
try:
func(value)
except:
pass
def _initialize(self):
# Count number of plots to create:
num_plots = 0
for config in self._config.itervalues():
num_plots += len(config)
# Set default grid of plot positions:
if not self._rows*self._cols == num_plots:
self._cols = int(np.ceil(np.sqrt(num_plots)))
self._rows = int(np.ceil(num_plots/float(self._cols)))
self.f, self.axarr = plt.subplots(self._rows, self._cols,
figsize=self._figsize)
# Remove unused subplots:
for i in xrange(num_plots, self._rows*self._cols):
plt.delaxes(self.axarr[np.unravel_index(i, (self._rows, self._cols))])
cnt = 0
self.handles = []
self.types = []
keywds = ['handle', 'ydata', 'fmt', 'type', 'ids', 'shape', 'norm']
# TODO: Irregular grid in U will make the plot better
U, V = np.mgrid[0:np.pi/2:complex(0, 60),
0:2*np.pi:complex(0, 60)]
X = np.cos(V)*np.sin(U)
Y = np.sin(V)*np.sin(U)
Z = np.cos(U)
self._dome_pos_flat = (X.flatten(), Y.flatten(), Z.flatten())
self._dome_pos = (X, Y, Z)
self._dome_arr_shape = X.shape
if not isinstance(self.axarr, np.ndarray):
self.axarr = np.asarray([self.axarr])
for LPU, configs in self._config.iteritems():
for plt_id, config in enumerate(configs):
ind = np.unravel_index(cnt, self.axarr.shape)
cnt+=1
# Some plot types require specific numbers of
# neuron ID arrays:
if 'type' in config:
if config['type'] == 'quiver':
assert len(config['ids'])==2
config['type'] = 0
elif config['type'] == 'hsv':
assert len(config['ids'])==2
config['type'] = 1
elif config['type'] == 'image':
assert len(config['ids'])==1
config['type'] = 2
elif config['type'] == 'waveform':
config['type'] = 3
elif config['type'] == 'raster':
config['type'] = 4
elif config['type'] == 'rate':
config['type'] = 5
elif config['type'] == 'dome':
config['type'] = 6
else:
raise ValueError('Plot type not supported')
else:
if str(LPU).startswith('input') and not self._graph[LPU].node[str(config['ids'][0][0])]['spiking']:
config['type'] = 2
else:
config['type'] = 4
if config['type'] < 3:
if not 'shape' in config:
# XXX This can cause problems when the number
# of neurons is not equal to
# np.prod(config['shape'])
num_neurons = len(config['ids'][0])
config['shape'] = [int(np.ceil(np.sqrt(num_neurons)))]
config['shape'].append(int(np.ceil(num_neurons/float(config['shape'][0]))))
if config['type'] == 0:
config['handle'] = self.axarr[ind].quiver(\
np.reshape(self._data[LPU][config['ids'][0],0],config['shape']),\
np.reshape(self._data[LPU][config['ids'][1],0],config['shape']))
elif config['type'] == 1:
X = np.reshape(self._data[LPU][config['ids'][0],0],config['shape'])
Y = np.reshape(self._data[LPU][config['ids'][1],0],config['shape'])
V = (X**2 + Y**2)**0.5
H = (np.arctan2(X,Y)+np.pi)/(2*np.pi)
S = np.ones_like(V)
HSV = np.dstack((H,S,V))
RGB = hsv_to_rgb(HSV)
config['handle'] = self.axarr[ind].imshow(RGB)
elif config['type'] == 2:
if 'trans' in config:
if config['trans'] is True:
to_transpose = True
else:
to_transpose = False
else:
to_transpose = False
config['trans'] = False
if to_transpose:
temp = self.axarr[ind].imshow(np.transpose(np.reshape(\
self._data[LPU][config['ids'][0],0], config['shape'])))
else:
temp = self.axarr[ind].imshow(np.reshape(\
self._data[LPU][config['ids'][0],0], config['shape']))
temp.set_clim(self._imlim)
temp.set_cmap(plt.cm.gist_gray)
config['handle'] = temp
elif config['type'] == 3:
fmt = config['fmt'] if 'fmt' in config else ''
self.axarr[ind].set_xlim(self._xlim)
self.axarr[ind].set_ylim(self._ylim)
if len(config['ids'][0])==1:
config['handle'] = self.axarr[ind].plot([0], \
[self._data[LPU][config['ids'][0][0],0]], fmt)[0]
config['ydata'] = [self._data[LPU][config['ids'][0][0],0]]
else:
config['handle'] = self.axarr[ind].plot(self._data[LPU][config['ids'][0],0])[0]
elif config['type'] == 4:
config['handle'] = self.axarr[ind]
config['handle'].vlines(0, 0, 0.01)
config['handle'].set_ylim([.5, len(config['ids'][0]) + .5])
config['handle'].set_ylabel('Neurons',
fontsize=self._fontsize-1, weight='bold')
config['handle'].set_xlabel('Time (s)',fontsize=self._fontsize-1, weight='bold')
min_id = min(self._id_to_data_idx[LPU].keys())
min_idx = self._id_to_data_idx[LPU][min_id]
config['handle'].set_xlim([0,len(self._data[LPU][min_idx,:])*self._dt])
config['handle'].axes.set_yticks([])
config['handle'].axes.set_xticks([])
elif config['type'] == 6:
self.axarr[ind].axes.set_yticks([])
self.axarr[ind].axes.set_xticks([])
self.axarr[ind] = self.f.add_subplot(self._rows,
self._cols,
cnt,
projection='3d')
config['handle' ] = self.axarr[ind]
config['handle'].axes.set_yticks([])
config['handle'].axes.set_xticks([])
config['handle'].xaxis.set_ticks([])
config['handle'].yaxis.set_ticks([])
config['handle'].zaxis.set_ticks([])
if 'norm' not in config.keys():
config['norm'] = Normalize(vmin=-70, vmax=0, clip=True)
elif config['norm'] == 'auto':
if self._data[LPU].shape[1] > 100:
config['norm'] = Normalize(vmin = np.min(self._data[LPU][config['ids'][0],100:]),
vmax = np.max(self._data[LPU][config['ids'][0],100:]),
clip = True)
else:
config['norm'] = Normalize(vmin = np.min(self._data[LPU][config['ids'][0],:]),
vmax = np.max(self._data[LPU][config['ids'][0],:]),
clip = True)
node_dict = self._graph[LPU].node
if str(LPU).startswith('input'):
latpositions = np.asarray([ node_dict[str(nid)]['lat'] \
for nid in range(len(node_dict)) \
if node_dict[str(nid)]['extern'] ])
longpositions = np.asarray([ node_dict[str(nid)]['long'] \
for nid in range(len(node_dict)) \
if node_dict[str(nid)]['extern'] ])
else:
latpositions = np.asarray([ node_dict[str(nid)]['lat']
for nid in config['ids'][0] ])
longpositions = np.asarray([ node_dict[str(nid)]['long']
for nid in config['ids'][0] ])
xx = np.cos(longpositions) * np.sin(latpositions)
yy = np.sin(longpositions) * np.sin(latpositions)
zz = np.cos(latpositions)
config['positions'] = (xx, yy, zz)
colors = griddata(config['positions'], self._data[LPU][config['ids'][0],0],
self._dome_pos_flat, 'nearest').reshape(self._dome_arr_shape)
colors = config['norm'](colors).data
colors = np.tile(np.reshape(colors,
[self._dome_arr_shape[0],self._dome_arr_shape[1],1])
,[1,1,4])
colors[:,:,3] = 1.0
config['handle'].plot_surface(self._dome_pos[0], self._dome_pos[1],
self._dome_pos[2], rstride=1, cstride=1,
facecolors=colors, antialiased=False,
shade=False)
for key in config.iterkeys():
if key not in keywds:
try:
self._set_wrapper(self.axarr[ind],key, config[key])
except:
pass
try:
self._set_wrapper(config['handle'],key, config[key])
except:
pass
if config['type']<3:
config['handle'].axes.set_xticks([])
config['handle'].axes.set_yticks([])
if self.suptitle is not None:
self.f.suptitle(self._title, fontsize=self._fontsize+1, x=0.5,y=0.03, weight='bold')
plt.tight_layout()
if self.out_filename:
if self.FFMpeg is None:
if which(matplotlib.rcParams['animation.ffmpeg_path']):
self.writer = FFMpegFileWriter(fps=self.fps, codec=self.codec)
elif which(matplotlib.rcParams['animation.avconv_path']):
self.writer = AVConvFileWriter(fps=self.fps, codec=self.codec)
else:
raise RuntimeError('cannot find ffmpeg or avconv')
elif self.FFMpeg:
if which(matplotlib.rcParams['animation.ffmpeg_path']):
self.writer = FFMpegFileWriter(fps=self.fps, codec=self.codec)
else:
raise RuntimeError('cannot find ffmpeg')
else:
if which(matplotlib.rcParams['animation.avconv_path']):
self.writer = AVConvFileWriter(fps=self.fps, codec=self.codec)
else:
raise RuntimeError('cannot find avconv')
# Use the output file to determine the name of the temporary frame
# files so that two concurrently run visualizations don't clobber
# each other's frames:
self.writer.setup(self.f, self.out_filename, dpi=80,
frame_prefix=os.path.splitext(self.out_filename)[0]+'_')
self.writer.frame_format = 'png'
self.writer.grab_frame()
else:
self.f.show()
def _update(self):
dt = self._dt
t = self._t
for key, configs in self._config.iteritems():
data = self._data[key]
for config in configs:
if config['type'] == 3:
if len(config['ids'][0])==1:
config['ydata'].extend(np.reshape(np.double(\
data[config['ids'][0], \
max(0,t-self._update_interval):t]),(-1,)))
config['handle'].set_xdata(dt*np.arange(0, t))
config['handle'].set_ydata(np.asarray(config['ydata']))
else:
config['handle'].set_ydata(\
data[config['ids'][0], t])
elif config['type']==4:
for j, id in enumerate(config['ids'][0]):
# Convert neuron id to index into array of generated outputs:
try:
idx = self._id_to_data_idx[key][id]
except:
continue
else:
for time in np.where(data[idx, max(0,t-self._update_interval):t])[0]:
config['handle'].vlines(float(t-time)*self._dt,j+0.75, j+1.25)
elif config['type'] == 0:
shape = config['shape']
ids = config['ids']
config['handle'].U = np.reshape(data[ids[0], t],shape)
config['handle'].V = np.reshape(data[ids[1], t],shape)
elif config['type']==1:
shape = config['shape']
ids = config['ids']
X = np.reshape(data[ids[0], t],shape)
Y = np.reshape(data[ids[1], t],shape)
V = (X**2 + Y**2)**0.5
H = (np.arctan2(X,Y)+np.pi)/(2*np.pi)
S = np.ones_like(V)
HSV = np.dstack((H,S,V))
RGB = hsv_to_rgb(HSV)
config['handle'].set_data(RGB)
elif config['type'] == 2:
ids = config['ids']
if config['trans']:
config['handle'].set_data(
np.transpose(np.reshape(data[ids[0], t], config['shape'
])))
else:
config['handle'].set_data(
np.reshape(data[ids[0], t], config['shape']))
elif config['type'] == 6:
ids = config['ids']
d = data[ids[0], t]
colors = griddata(config['positions'], d,
self._dome_pos_flat, 'nearest').reshape(self._dome_arr_shape)
colors = config['norm'](colors).data
colors = np.tile(np.reshape(colors,
[self._dome_arr_shape[0],self._dome_arr_shape[1],1])
,[1,1,4])
colors[:,:,3] = 1.0
config['handle'].clear()
config['handle'].xaxis.set_ticks([])
config['handle'].yaxis.set_ticks([])
config['handle'].zaxis.set_ticks([])
config['handle'].plot_surface(self._dome_pos[0], self._dome_pos[1],
self._dome_pos[2], rstride=1, cstride=1,
facecolors=colors, antialiased=False,
shade=False)
keywds = ['handle', 'ydata', 'fmt', 'type', 'ids', 'shape', 'norm']
for key in config.iterkeys():
if key not in keywds:
try:
self._set_wrapper(self.axarr[ind],key, config[key])
except:
pass
try:
self._set_wrapper(config['handle'],key, config[key])
except:
pass
self.f.canvas.draw()
if self.out_filename:
self.writer.grab_frame()
self._t+=self._update_interval
def add_plot(self, config_dict, LPU, names=[''], shift=0):
'''
Add a plot to the visualizer
Parameters
----------
config_dict: dict
A dictionary specifying the plot attributes. The attribute
names should be the keys.
The following are the plot attributes that can be specfied using
this dict.
type - str
This specifies the type of the plot. Has to be one of
['waveform', 'raster', 'image','hsv','quiver', 'dome']
For plots of type 'dome', lat and long are required
to be specified in the gexf file.
ids - dict with either 1 or 2 entries
Specifies the neuron ids from the associated LPU.
The keys should be in [0,1] and the values
should be a list of ids.
For example::
{'ids':{0:[1,2]}}
will plot neurons with ids 1 and 2.
Two entries in the dictionary are needed if the plot is
of type 'hsv' or 'quiver'
For example::
{'ids':{0:[:768],1:[768:1536]},'type':'HSV'}
can be used to generate a HSV plot where the hue channel is
controlled by the angle of the vector defined by the membrane
potentials of the neurons with ids [:768] and [768:1536] and
the value will be the magnitude of the same vector.
This parameter is optional for the following cases::
1) The plot is associated with input signals.
2) The names parameter is specified.
If the above doesn't hold, this attribute needs to be specified.
shape - list or tuple with two entries
This attribute specifies the dimensions for plots of type image,
hsv or quiver.
title - str
Optional. Can be used to control the title of the plot.
In addition to the above, any parameter supported by matlpotlib
for the particular type of plot can be specified.
For example - 'imlim','clim','xlim','ylim' etc.
LPU: str
The name of the LPU associated to this plot.
names: list
Optional. A list of str specifying the neurons
to plot. Can be used instead of specifying ids in the
config_dict. The gexf file of the LPU needs to have
the name attribute in order for this to be used.
'''
config = config_dict.copy()
if not isinstance(names, list):
names = [names]
if not LPU in self._config:
self._config[LPU] = []
if 'ids' in config:
# XXX should check whether the specified ids are within range
self._config[LPU].append(config)
elif str(LPU).startswith('input'):
config['ids'] = [range(0, self._data[LPU].shape[0])]
self._config[LPU].append(config)
else:
config['ids'] = {}
for i,name in enumerate(names):
config['ids'][i]=[]
for id in range(len(self._graph[LPU].node)):
if self._graph[LPU].node[str(id)]['name'] == name:
config['ids'][i].append(id-shift)
self._config[LPU].append(config)
if not 'title' in config:
if names[0]:
config['title'] = "{0} - {1}".format(str(LPU),str(names[0]))
else:
if str(LPU).startswith('input_'):
config['title'] = LPU.split('_',1)[1] + ' - ' + 'Input'
else:
config['title'] = str(LPU)
def _close(self):
self.writer.finish()
plt.close(self.f)
@property
def xlim(self):
'''
Get or set the limits of the x-axis for all the raster and waveform plots.
Can be superseded for individual plots by specifying xlim in the confid_dict
for that plot.
See also
--------
add_plot
'''
return self._xlim
@xlim.setter
def xlim(self, value):
self._xlim = value
@property
def ylim(self):
'''
Get or set the limits of the y-axis for all the raster and waveform plots.
Can be superseded for individual plots by specifying xlim in the confid_dict
for that plot.
See also
--------
add_plot
'''
return self._ylim
@ylim.setter
def ylim(self, value):
self._ylim = value
@property
def FFMpeg(self): return self._FFMpeg
@FFMpeg.setter
def FFMpeg(self, value):
self._FFMpeg = value
@property
def imlim(self): return self._imlim
@imlim.setter
def imlim(self, value):
self._imlim = value
@property
def out_filename(self): return self._out_file
@out_filename.setter
def out_filename(self, value):
assert(isinstance(value, str))
self._out_file = value
@property
def fps(self): return self._fps
@fps.setter
def fps(self, value):
assert(isinstance(value, int))
self._fps = value
@property
def codec(self): return self._codec
@codec.setter
def codec(self, value):
assert(isinstance(value, str))
self._codec = value
@property
def rows(self): return self._rows
@rows.setter
def rows(self, value):
self._rows = value
@property
def cols(self): return self._cols
@cols.setter
def cols(self, value):
self._cols = value
@property
def dt(self): return self._dt
@dt.setter
def dt(self, value):
self._dt = value
@property
def figsize(self): return self._figsize
@figsize.setter
def figsize(self, value):
assert(isinstance(value, tuple) and len(value)==2)
self._figsize = value
@property
def fontsize(self): return self._fontsize
@fontsize.setter
def fontsize(self, value):
self._fontsize = value
@property
def suptitle(self): return self._title
@suptitle.setter
def suptitle(self, value):
self._title = value
@property
def update_interval(self):
"""
Gets or sets the update interval(in terms of time steps) for the animation.
If value is 0 or None, update_interval will be set to the index of the
final step. As a consequence, only the final frame will be generated.
"""
return self._update_interval
@update_interval.setter
def update_interval(self, value):
self._update_interval = value
| bsd-3-clause |
etuna-SBF-kog/Stadsparken | env/lib/python2.7/site-packages/django/contrib/gis/utils/wkt.py | 419 | 1846 | """
Utilities for manipulating Geometry WKT.
"""
def precision_wkt(geom, prec):
"""
Returns WKT text of the geometry according to the given precision (an
integer or a string). If the precision is an integer, then the decimal
places of coordinates WKT will be truncated to that number:
>>> pnt = Point(5, 23)
>>> pnt.wkt
'POINT (5.0000000000000000 23.0000000000000000)'
>>> precision(geom, 1)
'POINT (5.0 23.0)'
If the precision is a string, it must be valid Python format string
(e.g., '%20.7f') -- thus, you should know what you're doing.
"""
if isinstance(prec, int):
num_fmt = '%%.%df' % prec
elif isinstance(prec, basestring):
num_fmt = prec
else:
raise TypeError
# TODO: Support 3D geometries.
coord_fmt = ' '.join([num_fmt, num_fmt])
def formatted_coords(coords):
return ','.join([coord_fmt % c[:2] for c in coords])
def formatted_poly(poly):
return ','.join(['(%s)' % formatted_coords(r) for r in poly])
def formatted_geom(g):
gtype = str(g.geom_type).upper()
yield '%s(' % gtype
if gtype == 'POINT':
yield formatted_coords((g.coords,))
elif gtype in ('LINESTRING', 'LINEARRING'):
yield formatted_coords(g.coords)
elif gtype in ('POLYGON', 'MULTILINESTRING'):
yield formatted_poly(g)
elif gtype == 'MULTIPOINT':
yield formatted_coords(g.coords)
elif gtype == 'MULTIPOLYGON':
yield ','.join(['(%s)' % formatted_poly(p) for p in g])
elif gtype == 'GEOMETRYCOLLECTION':
yield ','.join([''.join([wkt for wkt in formatted_geom(child)]) for child in g])
else:
raise TypeError
yield ')'
return ''.join([wkt for wkt in formatted_geom(geom)])
| gpl-3.0 |
goshow-jp/Kraken | Python/kraken/core/objects/object_3d.py | 1 | 33360 | """Kraken - objects.object_3d module.
Classes:
Object3D - Base Object3D Object.
"""
import re
import logging
from kraken.log import getLogger
from kraken.core.configs.config import Config
from kraken.core.objects.scene_item import SceneItem
from kraken.core.maths.xfo import Xfo
from kraken.core.maths.rotation_order import RotationOrder
from kraken.core.objects.attributes.attribute_group import AttributeGroup
from kraken.core.objects.attributes.bool_attribute import BoolAttribute
from kraken.core.objects.constraints.constraint import Constraint
from kraken.core.objects.constraints.orientation_constraint import OrientationConstraint
from kraken.core.objects.constraints.pose_constraint import PoseConstraint
from kraken.core.objects.constraints.position_constraint import PositionConstraint
from kraken.core.objects.constraints.scale_constraint import ScaleConstraint
from kraken.core.objects.operators.operator import Operator
logger = getLogger('kraken')
logger.setLevel(logging.INFO)
class Object3D(SceneItem):
"""Kraken base object type for any 3D object."""
def __init__(self, name, parent=None, flags=None):
super(Object3D, self).__init__(name, parent)
self._children = []
self._flags = {}
self._attributeGroups = []
self._constraints = []
self._xfo = Xfo()
self._ro = RotationOrder()
self._color = None
self._implicitAttrGrp = AttributeGroup("implicitAttrGrp", self)
self._visibility = BoolAttribute('visibility',
True,
self._implicitAttrGrp)
self._shapeVisibility = BoolAttribute('ShapeVisibility',
True,
self._implicitAttrGrp)
if parent is not None:
parent.addChild(self)
if flags is not None:
assert type(flags) is str, "Flags argument must be a comma separated string."
for flag in flags.replace(' ', '').split(','):
if not re.match("[\w]*$", flag):
msg = "{} '{}' {} ({}: {}) {}\n".format("Invalid flag", flag, "set on", self.getName(), self.getPath(), ". Alphanumeric and underscores only!")
logger.warn(msg)
continue
#
self.setFlag(flag)
# ==================
# Property Methods
# ==================
@property
def xfo(self):
"""Gets xfo property of this Object3D.
Returns:
Xfo: Xfo property of this Object3D.
"""
return self._xfo
@xfo.setter
def xfo(self, value):
"""Sets xfo of this Object3D.
Note:
In Python, objects are always referenced, meaning to get a unique
instance, an explicit clone is required. In KL, structs are passed
by value, meaning that every assignment of a struct causes a clone.
This means that in KL it is impossible for 2 objects to reference
the same KL math object. This is an important performance feature
of KL.
The members of the KL Math objects have this property. 2 Xfos
cannot share the same tr value. Here we implcitly clone the math
object to ensure the same behavior as in KL.
Args:
value (Xfo): Vector to set the xfo by.
Returns:
bool: True if successful.
"""
self._xfo = value.clone()
return True
@property
def ro(self):
"""Gets Rotation Order property of this Object3D.
Returns:
RotationOrder: Rotation Order property of this Object3D.
"""
return self._ro
@ro.setter
def ro(self, value):
"""Sets Rotation Order of this Object3D.
Note:
In Python, objects are always referenced, meaning to get a unique
instance, an explicit clone is required. In KL, structs are passed
by value, meaning that every assignment of a struct causes a clone.
This means that in KL it is impossible for 2 objects to reference
the same KL math object. This is an important performance feature
of KL.
The members of the KL Math objects have this property. 2 Xfos
cannot share the same tr value. Here we implcitly clone the math
object to ensure the same behavior as in KL.
Args:
value (RotationOrder): New rotation order.
Returns:
bool: True if successful.
"""
self._ro = value.clone()
return True
@property
def localXfo(self):
"""Gets local transform of this Object3D
Returns:
Xfo: Local Xfo of the object.
"""
globalXfo = self.globalXfo
parent = self.getParent()
if not isinstance(parent, SceneItem):
return globalXfo
parentXfo = parent.globalXfo
return parentXfo.inverse().multiply(globalXfo)
@property
def globalXfo(self):
"""Gets global transform of this Object3D
Returns:
Xfo: Global Xfo
"""
for source in self.getSources():
if isinstance(source, Object3D):
continue
if isinstance(source, Constraint):
return source.compute()
if isinstance(source, Operator):
source.evaluate()
break
return self._xfo
# =============
# Name Methods
# =============
def getBuildName(self):
"""Returns the build name for the object.
Returns:
str: Name to be used in the DCC.
"""
typeNameHierarchy = self.getTypeHierarchyNames()
config = Config.getInstance()
# If flag is set on object to use explicit name, return it.
if config.getExplicitNaming() is True or \
self.testFlag('EXPLICIT_NAME'):
return self.getName()
nameTemplate = config.getNameTemplate()
# Get the token list for this type of object
format = None
for typeName in nameTemplate['formats'].keys():
if typeName in typeNameHierarchy:
format = nameTemplate['formats'][typeName]
break
if format is None:
format = nameTemplate['formats']['default']
objectType = None
for eachType in typeNameHierarchy:
if eachType in nameTemplate['types'].keys():
objectType = eachType
break
if objectType is None:
objectType = 'default'
# Generate a name by concatenating the resolved tokens together.
builtName = ""
skipSep = False
for token in format:
if token is 'sep':
if not skipSep:
builtName += nameTemplate['separator']
elif token is 'location':
if self.isTypeOf('Component'):
location = self.getLocation()
else:
location = self.getComponent().getLocation()
if location not in nameTemplate['locations']:
msg = "Invalid location on '{}'. Location: {}. Valid locations: {}".format(self.getPath(), location, nameTemplate['locations'])
raise ValueError(msg)
builtName += location
elif token is 'type':
if objectType == 'Locator' and self.testFlag('inputObject'):
objectType = 'ComponentInput'
elif objectType == 'Locator' and self.testFlag('outputObject'):
objectType = 'ComponentOutput'
builtName += nameTemplate['types'][objectType]
elif token is 'name':
builtName += self.getName()
elif token is 'component':
if self.getComponent() is None:
skipSep = True
continue
builtName += self.getComponent().getName()
elif token is 'container':
if self.getContainer() is None:
skipSep = True
continue
builtName += self.getContainer().getName()
else:
raise ValueError("Unresolvabled token '" + token +
"' used on: " + self.getPath())
return builtName
def setName(self, name):
"""Sets the name of the object with a string.
Args:
name (str): The new name for the item.
Returns:
bool: True if successful.
"""
# check for name collision and adjust the name if they exist
if self.getParent() is not None:
# Increment name if it already exists
initName = name
suffix = 1
collision = True
while collision:
child = self.getParent().getChildByDecoratedName(name + self.getNameDecoration())
collision = child is not None and child is not self
if not collision:
break
result = re.split(r"(\d+)$", initName, 1)
if len(result) > 1:
initName = result[0]
suffix = int(result[1])
name = initName + str(suffix).zfill(2)
suffix += 1
super(Object3D, self).setName(name)
return True
# ==================
# Hierarchy Methods
# ==================
def getContainer(self):
"""Returns the Container the object belongs to.
Returns:
Object: Container.
"""
parent = self.getParent()
while (parent is not None and 'Container' not in
parent.getTypeHierarchyNames()):
parent = parent.getParent()
return parent
def getLayer(self):
"""Returns the Layer the object belongs to.
Returns:
Object: Layer this object belongs to.
"""
parent = self.getParent()
while (parent is not None and not parent.isTypeOf('Layer')):
parent = parent.getParent()
return parent
# ==============
# Child Methods
# ==============
def hasChild(self, child):
"""Checks the supplied item is a child
Args:
child (Object): Object to check if is is a child of this object.
"""
for i, eachChild in enumerate(self.getChildren()):
if eachChild == child:
return True
return False
def _checkChildIndex(self, index):
"""Checks the supplied index is valid.
Args:
index (int): Child index to check.
"""
if index > len(self.getChildren()):
raise IndexError("'" + str(index) +
"' is out of the range of the 'children' array.")
return True
def addChild(self, child):
"""Adds a child to this object.
Note:
We allow for duplicate child names as long as the types differ.
Args:
child (Object): Object that will be a child of this object.
Returns:
bool: True if successful.
"""
SceneItem.setParent(child, self)
if child.getParent() is not None:
parent = child.getParent()
if child in parent.getChildren():
parent.getChildren().remove(child)
child.setName(child.getName())
self.getChildren().append(child)
# Assign the child the same component.
if self._component is not None:
child.setComponent(self._component)
return True
def setParent(self, parent):
"""Sets the parent of this object.
Arguments:
parent (Object): Object that is the parent of this one.
Returns:
bool: True if successful.
"""
if parent:
parent.addChild(self)
else:
if self._parent is not None:
parent.removeChild(self)
SceneItem.setParent(self, None)
return True
def removeChildByIndex(self, index):
"""Removes a child from this object by index.
Args:
index (int): Index of child to remove.
Returns:
bool: True if successful.
"""
if self._checkChildIndex(index) is not True:
return False
self.removeChild(self.getChildren()[index])
return True
def removeChildByName(self, name):
"""Removes a child from this object by name.
Args:
name (str): Name of child to remove.
Returns:
bool: True if successful.
"""
removeIndex = None
for i, eachChild in enumerate(self.getChildren()):
if eachChild.getName() == name:
removeIndex = i
if removeIndex is None:
raise ValueError("'" + name +
"' is not a valid child of this object.")
self.removeChildByIndex(removeIndex)
return True
def removeChild(self, child):
"""Removed the child as an child item of this object.
Returns:
bool: True if successful.
"""
try:
self._children.remove(child)
except:
names = []
for c in self._children:
names.append(c.getName())
raise Exception("Object '" + self.getPath() +
"' does not have child:" + child.getPath() +
". it does have:" + str(names))
SceneItem.setParent(child, None)
# Un-assign the child the component.
if self._component is not None:
child.setComponent(None)
return True
def getDescendents(self, nodeList=None, classType=None, inheritedClass=False):
"""Gets the children of this object.
Args:
nodeList: (list): optional list to append children to
classType (str): Name of the type of class to limit the search to
inheritedClass (bool): Match nodes that is a sub-class of type.
Returns:
list: Child objects.
"""
if nodeList is None:
nodeList = []
for child in self._children:
if classType is not None:
if inheritedClass is not None and child.isTypeOf(classType):
nodeList.append(child)
elif child.getTypeName() == classType:
nodeList.append(child)
else:
nodeList.append(child)
child.getDescendents(classType=classType,
nodeList=nodeList,
inheritedClass=inheritedClass)
return nodeList
def getChildren(self):
"""Gets the children of this object.
Returns:
list: Child objects.
"""
return self._children
def getNumChildren(self):
"""Returns the number of children this object has.
Returns:
int: Number of children of this object.
"""
return len(self.getChildren())
def getChildByIndex(self, index):
"""Returns the child object at specified index.
Args:
index (int): Index of the child to find.
Returns:
Object: Child object at specified index.
"""
if self._checkChildIndex(index) is not True:
return False
return self.getChildren()[index]
def getChildByName(self, name):
"""Returns the child object with the specified name.
Args:
name (str): Name of the child to return.
Returns:
Object: Object if found.
"""
for eachChild in self.getChildren():
if eachChild.getName() == name:
return eachChild
return None
def getChildByDecoratedName(self, decoratedName):
"""Returns the child object with the specified name.
Args:
decoratedName (str): Decorated name of the child to find.
Returns:
Object: Object if found.
"""
for eachChild in self.getChildren():
if eachChild.getDecoratedName() == decoratedName:
return eachChild
return None
def getChildrenByType(self, childType):
"""Returns all children that are of the specified type.
Args:
childType (str): Type of children to find.
Returns:
list: Array of child objects of the specified type.
"""
childrenOfType = []
for eachChild in self.getChildren():
if eachChild.isTypeOf(childType):
childrenOfType.append(eachChild)
return childrenOfType
# =============
# Flag Methods
# =============
def setFlag(self, name):
"""Sets the flag of the specified name.
Returns:
bool: True if successful.
"""
self._flags[name] = True
return True
def testFlag(self, name):
"""Tests if the specified flag is set.
Args:
name (str): Name of the flag to test.
Returns:
bool: True if flag is set.
"""
return name in self._flags
def clearFlag(self, name):
"""Clears the flag of the specified name.
Args:
name (str): Name of the flag to clear.
Returns:
bool: True if successful.
"""
if name in self._flags:
del self._flags[name]
return True
return False
def getFlags(self):
"""Returns all flags set on this object.
Returns:
list: Flags set on this object.
"""
return self._flags.keys()
# ========================
# Attribute Group Methods
# ========================
def _checkAttributeGroupIndex(self, index):
"""Checks the supplied index is valid.
Args:
index (int): Attribute index to check.
Returns:
bool: True if successful.
"""
if index > len(self._attributeGroups):
raise IndexError("'" + str(index) +
"' is out of the range of 'attributeGroups' array.")
return True
def addAttributeGroup(self, attributeGroup):
"""Adds an attributeGroup to this object.
Args:
attributeGroup (Object): Attribute Group object to add to this
object.
Returns:
bool: True if successful.
"""
if attributeGroup.getName() in [x.getName() for x in self._attributeGroups]:
raise IndexError("Child with " + attributeGroup.getName() +
" already exists as a attributeGroup.")
self._attributeGroups.append(attributeGroup)
attributeGroup.setParent(self)
return True
def removeAttributeGroupByIndex(self, index):
"""Removes attribute at specified index.
Args:
index (int): Index of attribute to remove.
Returns:
bool: True if successful.
"""
if self._checkAttributeGroupIndex(index) is not True:
return False
del self._attributeGroups[index]
return True
def removeAttributeGroupByName(self, name):
"""Removes the attribute with the specified name.
Args:
name (str): Name of the attribute to remove.
Returns:
bool: True if successful.
"""
removeIndex = None
for i, eachAttributeGroup in enumerate(self._attributeGroups):
if eachAttributeGroup.getName() == name:
removeIndex = i
if removeIndex is None:
return False
self.removeAttributeGroupByIndex(removeIndex)
return True
def getNumAttributeGroups(self):
"""Returns the number of attributeGroups as an integer.
Returns:
int: Number of attributeGroups on this object.
"""
return len(self._attributeGroups)
def getAttributeGroupByIndex(self, index):
"""Returns the attribute at the specified index.
Args:
index (int): Index of the attribute to return.
Returns:
AttributeGroup: Attribute Group at the specified index.
"""
if self._checkAttributeGroupIndex(index) is not True:
return False
return self._attributeGroups[index]
def getAttributeGroupByName(self, name):
"""Return the attribute group with the specified name.
Args:
name (str): Name of the attribute group to return.
Returns:
Attribute: Attribute with the specified name.
"""
for eachAttributeGroup in self._attributeGroups:
if eachAttributeGroup.getName() == name:
return eachAttributeGroup
return None
# ===================
# Constraint Methods
# ===================
def checkConstraintIndex(self, index):
"""Checks the supplied index is valid.
Args:
index (int): Constraint index to check.
Returns:
bool: True if successful.
"""
if index > len(self._constraints):
raise IndexError("'" + str(index) +
"' is out of the range of 'constraints' array.")
return True
def constrainTo(self, constrainers, constraintType="Pose", maintainOffset=False, name=None):
"""Adds an constraint to this object.
Args:
constrainers (Object or Object list): Constraint object to add to
this object or objects.
constraintType (str): String name of the constraint type.
maintainOffset (bool): Sets the constraint to maintain offset when
creating the constraint.
name (str): Name of the constraint. If set to None, a name is
automatically generated.
Returns:
string: Constraint object
"""
if name is None:
constraintName = ""
if hasattr(constrainers, '__iter__'):
constraintName = '_'.join([self.getName(), 'To', constrainers[0].getName(), constraintType + 'Constraint'])
else:
constraintName = '_'.join([self.getName(), 'To', constrainers.getName(), constraintType + 'Constraint'])
else:
constraintName = name
constraint = None
if constraintType == "Orientation":
constraint = OrientationConstraint(constraintName)
elif constraintType == "Pose":
constraint = PoseConstraint(constraintName)
elif constraintType == "Position":
constraint = PositionConstraint(constraintName)
elif constraintType == "Scale":
constraint = ScaleConstraint(constraintName)
else:
raise ValueError("'" + constraintType +
"' is not a valid constraint type. Valid types are Orientation, Pose, Position, or Scale")
# Accept a single object or a list of objects
if hasattr(constrainers, '__iter__'):
pass
else:
constrainers = [constrainers]
for constrainer in constrainers:
constraint.addConstrainer(constrainer)
constraint.setMaintainOffset(maintainOffset)
self.addConstraint(constraint)
return constraint
def addConstraint(self, constraint):
"""Adds an constraint to this object.
Args:
constraint (Object): Constraint object to add to this object.
Returns:
bool: True if successful.
"""
if constraint.getName() in [x.getName() for x in self._constraints]:
raise IndexError("Constraint with name '" + constraint.getName() +
"'' already exists as a constraint.")
self._constraints.append(constraint)
constraint.setParent(self)
constraint.setConstrainee(self)
return True
def removeConstraintByIndex(self, index):
"""Removes constraint at specified index.
Args:
index (int): Index of constraint to remove.
Returns:
bool: True if successful.
"""
if self.checkConstraintIndex(index) is not True:
return False
del self._constraints[index]
return True
def removeConstraintByName(self, name):
"""Removes the constraint with the specified name.
Args:
name (str): Name of the constraint to remove.
Returns:
bool: True if successful.
"""
removeIndex = None
for i, eachConstraint in enumerate(self._constraints):
if eachConstraint.getName() == name:
removeIndex = i
if removeIndex is None:
return False
self.removeConstraintByIndex(removeIndex)
return True
def removeAllConstraints(self):
"""Removes all of the constraints for this object.
Returns:
bool: True if successful.
"""
del self._constraints[:]
return True
def getNumConstraints(self):
"""Returns the number of constraints as an integer.
Returns:
int: Number of constraints on this object.
"""
return len(self._constraints)
def getConstraintByIndex(self, index):
"""Returns the constraint at the specified index.
Args:
index (int): Index of the constraint to return.
Returns:
Constraint: Constraint at the specified index.
"""
if self.checkConstraintIndex(index) is not True:
return False
return self._constraints[index]
def getConstraintByName(self, name):
"""Return the constraint group with the specified name.
Args:
name (str): Name of the constraint group to return.
Returns:
Attribute: Attribute with the specified name.
"""
for eachConstraint in self._constraints:
if eachConstraint.getName() == name:
return eachConstraint
return None
# ===================
# Visibility Methods
# ===================
def getVisibilityAttr(self):
"""Returns the Visibility attribute object.
Returns:
BoolAttribute: Attribute that holds the value of the visibility.
"""
return self._visibility
def getVisibility(self):
"""Returns the visibility status of the scene item.
Returns:
bool: Visible or not.
"""
return self._visibility.getValue()
def setVisibility(self, value):
"""Sets the visibility of the scene object.
Args:
value (bool): value of the visibility of the object.
Returns:
bool: True if successful.
"""
self._visibility.setValue(value)
return True
def getShapeVisibilityAttr(self):
"""Returns the Shape Visibility attribute object.
Returns:
BoolAttribute: Attribute that holds the value of the shape
visibility.
"""
return self._shapeVisibility
def getShapeVisibility(self):
"""Returns the shape visibility status of the scene item.
Returns:
bool: Visible or not.
"""
return self._shapeVisibility.getValue()
def setShapeVisibility(self, value):
"""Sets the shape visibility of the scene object.
Args:
value (bool): Value of the visibility of the object.
Returns:
bool: True if successful.
"""
self._shapeVisibility.setValue(value)
return True
# ================
# Display Methods
# ================
def setColor(self, color):
"""Sets the color of this object.
Args:
color (str, Color): Name of the color from the Config or a Color() object.
Returns:
bool: True if successful.
"""
assert type(color).__name__ in ('str', 'Color'), self.getPath() + \
".setColor(), 'color' argument type is not of type 'str' or 'Color'."
self._color = color
return True
def getColor(self):
"""Returns the color of the object.
Returns:
str: Color of the object.
"""
return self._color
# ==========================
# Parameter Locking Methods
# ==========================
def lockRotation(self, x=False, y=False, z=False):
"""Sets flags for locking rotation parameters.
Args:
x (bool): Lock x axis.
y (bool): Lock y axis.
z (bool): Lock z axis.
Returns:
bool: True if successful.
"""
if x is True:
self.setFlag("lockXRotation")
if y is True:
self.setFlag("lockYRotation")
if z is True:
self.setFlag("lockZRotation")
return True
def lockScale(self, x=False, y=False, z=False):
"""Sets flags for locking scale parameters.
Args:
x (bool): Lock x axis.
y (bool): Lock y axis.
z (bool): Lock z axis.
Returns:
bool: True if successful.
"""
if x is True:
self.setFlag("lockXScale")
if y is True:
self.setFlag("lockYScale")
if z is True:
self.setFlag("lockZScale")
return True
def lockTranslation(self, x=False, y=False, z=False):
"""Sets flags for locking translation parameters.
Args:
x (bool): Lock x axis.
y (bool): Lock x axis.
z (bool): Lock x axis.
Returns:
bool: True if successful.
"""
if x is True:
self.setFlag("lockXTranslation")
if y is True:
self.setFlag("lockYTranslation")
if z is True:
self.setFlag("lockZTranslation")
return True
# ====================
# Persistence Methods
# ====================
def jsonEncode(self, saver):
"""Encodes the object to a JSON structure.
Args:
saver (Object): saver object.
Returns:
Dict: A JSON structure containing the data for this SceneItem.
"""
classHierarchy = self.getTypeHierarchyNames()
jsonData = {
'__typeHierarchy__': classHierarchy,
'name': self.getName(),
'parent': None,
'children': [],
'flags': self._flags,
'attributeGroups': [],
'constraints': [],
'xfo': self.xfo.jsonEncode(),
'color': self.getColor(),
'visibility': self._visibility,
'shapeVisibility': self._shapeVisibility,
}
if self.getParent() is not None:
jsonData['parent'] = self.getParent().getName()
if self.getColor() is not None:
jsonData['color'] = saver.encodeValue(self.getColor())
for child in self.getChildren():
jsonData['children'].append(child.jsonEncode(saver))
for attrGroup in self._attributeGroups:
jsonData['attributeGroups'].append(attrGroup.jsonEncode(saver))
for constr in self._constraints:
jsonData['constraints'].append(constr.jsonEncode(saver))
return jsonData
def jsonDecode(self, loader, jsonData):
"""Returns the color of the object..
Args:
loader (Object): Loader object.
jsonData (Dict): JSON object structure.
Returns:
bool: True if successful.
"""
self._flags = jsonData['flags']
self.xfo = loader.decodeValue(jsonData['xfo'])
if 'color' in jsonData and jsonData['color'] is not None:
self.setColor(loader.decodeValue(jsonData['color']))
self._visibility = jsonData['visibility']
self._shapeVisibility = jsonData['shapeVisibility']
for child in jsonData['children']:
self.addChild(loader.construct(child))
for attrGroup in jsonData['attributeGroups']:
# There is one default attribute group assigned to each scene item.
# Load data into the existing item instead of constructing a new
# one.
if attrGroup['name'] == '':
loader.registerItem(self._attributeGroups[0])
self._attributeGroups[0].jsonDecode(loader, attrGroup)
else:
self.addAttributeGroup(loader.construct(attrGroup))
for constr in jsonData['constraints']:
self.addConstraint(loader.construct(constr))
return True
| bsd-3-clause |
1n5aN1aC/Octoprint-MQTT-Progress | setup.py | 1 | 3941 | # coding=utf-8
########################################################################################################################
### Do not forget to adjust the following variables to your own plugin.
# The plugin's identifier, has to be unique
plugin_identifier = "mqttprogress"
# The plugin's python package, should be "octoprint_<plugin identifier>", has to be unique
plugin_package = "Octoprint_MQTT_Progress"
# The plugin's human readable name. Can be overwritten within OctoPrint's internal data via __plugin_name__ in the
# plugin module
plugin_name = "OctoPrint-MQTT-Progress"
# The plugin's version. Can be overwritten within OctoPrint's internal data via __plugin_version__ in the plugin module
plugin_version = "0.2.0"
# The plugin's description. Can be overwritten within OctoPrint's internal data via __plugin_description__ in the plugin
# module
plugin_description = """Publishes printer progress via MQTT"""
# The plugin's author. Can be overwritten within OctoPrint's internal data via __plugin_author__ in the plugin module
plugin_author = "1n5aN1aC (Joshua Villwock)"
# The plugin's author's mail address.
plugin_author_email = "[email protected]"
# The plugin's homepage URL. Can be overwritten within OctoPrint's internal data via __plugin_url__ in the plugin module
plugin_url = "https://github.com/1n5aN1aC/OctoPrint-MQTT-Progress"
# The plugin's license. Can be overwritten within OctoPrint's internal data via __plugin_license__ in the plugin module
plugin_license = "AGPLv3"
### --------------------------------------------------------------------------------------------------------------------
### More advanced options that you usually shouldn't have to touch follow after this point
### --------------------------------------------------------------------------------------------------------------------
# Any additional requirements besides OctoPrint should be listed here
plugin_requires = ["OctoPrint-MQTT >= 0.2"]
# Additional package data to install for this plugin. The subfolders "templates", "static" and "translations" will
# already be installed automatically if they exist.
plugin_additional_data = []
# Any additional python packages you need to install with your plugin that are not contained in <plugin_package>.*
plugin_additional_packages = []
# Any python packages within <plugin_package>.* you do NOT want to install with your plugin
plugin_ignored_packages = []
# Additional parameters for the call to setuptools.setup. If your plugin wants to register additional entry points,
# define dependency links or other things like that, this is the place to go. Will be merged recursively with the
# default setup parameters as provided by octoprint_setuptools.create_plugin_setup_parameters using
# octoprint.util.dict_merge.
#
additional_setup_parameters = {"dependency_links": ["https://github.com/OctoPrint/OctoPrint-MQTT/archive/master.zip"]}
########################################################################################################################
from setuptools import setup
try:
import octoprint_setuptools
except:
print("Could not import OctoPrint's setuptools, are you sure you are running that under "
"the same python installation that OctoPrint is installed under?")
import sys
sys.exit(-1)
setup_parameters = octoprint_setuptools.create_plugin_setup_parameters(
identifier=plugin_identifier,
package=plugin_package,
name=plugin_name,
version=plugin_version,
description=plugin_description,
author=plugin_author,
mail=plugin_author_email,
url=plugin_url,
license=plugin_license,
requires=plugin_requires,
additional_packages=plugin_additional_packages,
ignored_packages=plugin_ignored_packages,
additional_data=plugin_additional_data
)
if len(additional_setup_parameters):
from octoprint.util import dict_merge
setup_parameters = dict_merge(setup_parameters, additional_setup_parameters)
setup(**setup_parameters)
| agpl-3.0 |
tswast/google-cloud-python | error_reporting/google/cloud/errorreporting_v1beta1/proto/error_stats_service_pb2.py | 2 | 56991 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/devtools/clouderrorreporting_v1beta1/proto/error_stats_service.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.errorreporting_v1beta1.proto import (
common_pb2 as google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_common__pb2,
)
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/devtools/clouderrorreporting_v1beta1/proto/error_stats_service.proto",
package="google.devtools.clouderrorreporting.v1beta1",
syntax="proto3",
serialized_options=_b(
"\n/com.google.devtools.clouderrorreporting.v1beta1B\026ErrorStatsServiceProtoP\001Z^google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1;clouderrorreporting\252\002#Google.Cloud.ErrorReporting.V1Beta1\312\002#Google\\Cloud\\ErrorReporting\\V1beta1"
),
serialized_pb=_b(
'\nKgoogle/devtools/clouderrorreporting_v1beta1/proto/error_stats_service.proto\x12+google.devtools.clouderrorreporting.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a>google/devtools/clouderrorreporting_v1beta1/proto/common.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xa1\x04\n\x15ListGroupStatsRequest\x12\x14\n\x0cproject_name\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x03(\t\x12Y\n\x0eservice_filter\x18\x03 \x01(\x0b\x32\x41.google.devtools.clouderrorreporting.v1beta1.ServiceContextFilter\x12O\n\ntime_range\x18\x05 \x01(\x0b\x32;.google.devtools.clouderrorreporting.v1beta1.QueryTimeRange\x12\x37\n\x14timed_count_duration\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12S\n\talignment\x18\x07 \x01(\x0e\x32@.google.devtools.clouderrorreporting.v1beta1.TimedCountAlignment\x12\x32\n\x0e\x61lignment_time\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12K\n\x05order\x18\t \x01(\x0e\x32<.google.devtools.clouderrorreporting.v1beta1.ErrorGroupOrder\x12\x11\n\tpage_size\x18\x0b \x01(\x05\x12\x12\n\npage_token\x18\x0c \x01(\t"\xc0\x01\n\x16ListGroupStatsResponse\x12W\n\x11\x65rror_group_stats\x18\x01 \x03(\x0b\x32<.google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x34\n\x10time_range_begin\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x86\x04\n\x0f\x45rrorGroupStats\x12\x46\n\x05group\x18\x01 \x01(\x0b\x32\x37.google.devtools.clouderrorreporting.v1beta1.ErrorGroup\x12\r\n\x05\x63ount\x18\x02 \x01(\x03\x12\x1c\n\x14\x61\x66\x66\x65\x63ted_users_count\x18\x03 \x01(\x03\x12M\n\x0ctimed_counts\x18\x04 \x03(\x0b\x32\x37.google.devtools.clouderrorreporting.v1beta1.TimedCount\x12\x33\n\x0f\x66irst_seen_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elast_seen_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12V\n\x11\x61\x66\x66\x65\x63ted_services\x18\x07 \x03(\x0b\x32;.google.devtools.clouderrorreporting.v1beta1.ServiceContext\x12\x1d\n\x15num_affected_services\x18\x08 \x01(\x05\x12O\n\x0erepresentative\x18\t \x01(\x0b\x32\x37.google.devtools.clouderrorreporting.v1beta1.ErrorEvent"y\n\nTimedCount\x12\r\n\x05\x63ount\x18\x01 \x01(\x03\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x8e\x02\n\x11ListEventsRequest\x12\x14\n\x0cproject_name\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12Y\n\x0eservice_filter\x18\x03 \x01(\x0b\x32\x41.google.devtools.clouderrorreporting.v1beta1.ServiceContextFilter\x12O\n\ntime_range\x18\x04 \x01(\x0b\x32;.google.devtools.clouderrorreporting.v1beta1.QueryTimeRange\x12\x11\n\tpage_size\x18\x06 \x01(\x05\x12\x12\n\npage_token\x18\x07 \x01(\t"\xb2\x01\n\x12ListEventsResponse\x12M\n\x0c\x65rror_events\x18\x01 \x03(\x0b\x32\x37.google.devtools.clouderrorreporting.v1beta1.ErrorEvent\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x34\n\x10time_range_begin\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xe7\x01\n\x0eQueryTimeRange\x12R\n\x06period\x18\x01 \x01(\x0e\x32\x42.google.devtools.clouderrorreporting.v1beta1.QueryTimeRange.Period"\x80\x01\n\x06Period\x12\x16\n\x12PERIOD_UNSPECIFIED\x10\x00\x12\x11\n\rPERIOD_1_HOUR\x10\x01\x12\x12\n\x0ePERIOD_6_HOURS\x10\x02\x12\x10\n\x0cPERIOD_1_DAY\x10\x03\x12\x11\n\rPERIOD_1_WEEK\x10\x04\x12\x12\n\x0ePERIOD_30_DAYS\x10\x05"O\n\x14ServiceContextFilter\x12\x0f\n\x07service\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x15\n\rresource_type\x18\x04 \x01(\t"+\n\x13\x44\x65leteEventsRequest\x12\x14\n\x0cproject_name\x18\x01 \x01(\t"\x16\n\x14\x44\x65leteEventsResponse*u\n\x13TimedCountAlignment\x12%\n!ERROR_COUNT_ALIGNMENT_UNSPECIFIED\x10\x00\x12\x1b\n\x17\x41LIGNMENT_EQUAL_ROUNDED\x10\x01\x12\x1a\n\x16\x41LIGNMENT_EQUAL_AT_END\x10\x02*}\n\x0f\x45rrorGroupOrder\x12\x1b\n\x17GROUP_ORDER_UNSPECIFIED\x10\x00\x12\x0e\n\nCOUNT_DESC\x10\x01\x12\x12\n\x0eLAST_SEEN_DESC\x10\x02\x12\x10\n\x0c\x43REATED_DESC\x10\x03\x12\x17\n\x13\x41\x46\x46\x45\x43TED_USERS_DESC\x10\x04\x32\xf2\x04\n\x11\x45rrorStatsService\x12\xd0\x01\n\x0eListGroupStats\x12\x42.google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest\x1a\x43.google.devtools.clouderrorreporting.v1beta1.ListGroupStatsResponse"5\x82\xd3\xe4\x93\x02/\x12-/v1beta1/{project_name=projects/*}/groupStats\x12\xc0\x01\n\nListEvents\x12>.google.devtools.clouderrorreporting.v1beta1.ListEventsRequest\x1a?.google.devtools.clouderrorreporting.v1beta1.ListEventsResponse"1\x82\xd3\xe4\x93\x02+\x12)/v1beta1/{project_name=projects/*}/events\x12\xc6\x01\n\x0c\x44\x65leteEvents\x12@.google.devtools.clouderrorreporting.v1beta1.DeleteEventsRequest\x1a\x41.google.devtools.clouderrorreporting.v1beta1.DeleteEventsResponse"1\x82\xd3\xe4\x93\x02+*)/v1beta1/{project_name=projects/*}/eventsB\xf7\x01\n/com.google.devtools.clouderrorreporting.v1beta1B\x16\x45rrorStatsServiceProtoP\x01Z^google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1;clouderrorreporting\xaa\x02#Google.Cloud.ErrorReporting.V1Beta1\xca\x02#Google\\Cloud\\ErrorReporting\\V1beta1b\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_common__pb2.DESCRIPTOR,
google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_TIMEDCOUNTALIGNMENT = _descriptor.EnumDescriptor(
name="TimedCountAlignment",
full_name="google.devtools.clouderrorreporting.v1beta1.TimedCountAlignment",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="ERROR_COUNT_ALIGNMENT_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGNMENT_EQUAL_ROUNDED",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGNMENT_EQUAL_AT_END",
index=2,
number=2,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=2508,
serialized_end=2625,
)
_sym_db.RegisterEnumDescriptor(_TIMEDCOUNTALIGNMENT)
TimedCountAlignment = enum_type_wrapper.EnumTypeWrapper(_TIMEDCOUNTALIGNMENT)
_ERRORGROUPORDER = _descriptor.EnumDescriptor(
name="ErrorGroupOrder",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupOrder",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="GROUP_ORDER_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="COUNT_DESC", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="LAST_SEEN_DESC", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CREATED_DESC", index=3, number=3, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="AFFECTED_USERS_DESC",
index=4,
number=4,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=2627,
serialized_end=2752,
)
_sym_db.RegisterEnumDescriptor(_ERRORGROUPORDER)
ErrorGroupOrder = enum_type_wrapper.EnumTypeWrapper(_ERRORGROUPORDER)
ERROR_COUNT_ALIGNMENT_UNSPECIFIED = 0
ALIGNMENT_EQUAL_ROUNDED = 1
ALIGNMENT_EQUAL_AT_END = 2
GROUP_ORDER_UNSPECIFIED = 0
COUNT_DESC = 1
LAST_SEEN_DESC = 2
CREATED_DESC = 3
AFFECTED_USERS_DESC = 4
_QUERYTIMERANGE_PERIOD = _descriptor.EnumDescriptor(
name="Period",
full_name="google.devtools.clouderrorreporting.v1beta1.QueryTimeRange.Period",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="PERIOD_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="PERIOD_1_HOUR", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PERIOD_6_HOURS", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PERIOD_1_DAY", index=3, number=3, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PERIOD_1_WEEK", index=4, number=4, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PERIOD_30_DAYS", index=5, number=5, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=2228,
serialized_end=2356,
)
_sym_db.RegisterEnumDescriptor(_QUERYTIMERANGE_PERIOD)
_LISTGROUPSTATSREQUEST = _descriptor.Descriptor(
name="ListGroupStatsRequest",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project_name",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest.project_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="group_id",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest.group_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="service_filter",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest.service_filter",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="time_range",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest.time_range",
index=3,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="timed_count_duration",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest.timed_count_duration",
index=4,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="alignment",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest.alignment",
index=5,
number=7,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="alignment_time",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest.alignment_time",
index=6,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="order",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest.order",
index=7,
number=9,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest.page_size",
index=8,
number=11,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest.page_token",
index=9,
number=12,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=284,
serialized_end=829,
)
_LISTGROUPSTATSRESPONSE = _descriptor.Descriptor(
name="ListGroupStatsResponse",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="error_group_stats",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsResponse.error_group_stats",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="time_range_begin",
full_name="google.devtools.clouderrorreporting.v1beta1.ListGroupStatsResponse.time_range_begin",
index=2,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=832,
serialized_end=1024,
)
_ERRORGROUPSTATS = _descriptor.Descriptor(
name="ErrorGroupStats",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="group",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats.group",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="count",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats.count",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="affected_users_count",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats.affected_users_count",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="timed_counts",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats.timed_counts",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="first_seen_time",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats.first_seen_time",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_seen_time",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats.last_seen_time",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="affected_services",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats.affected_services",
index=6,
number=7,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="num_affected_services",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats.num_affected_services",
index=7,
number=8,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="representative",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats.representative",
index=8,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1027,
serialized_end=1545,
)
_TIMEDCOUNT = _descriptor.Descriptor(
name="TimedCount",
full_name="google.devtools.clouderrorreporting.v1beta1.TimedCount",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="count",
full_name="google.devtools.clouderrorreporting.v1beta1.TimedCount.count",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.devtools.clouderrorreporting.v1beta1.TimedCount.start_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.devtools.clouderrorreporting.v1beta1.TimedCount.end_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1547,
serialized_end=1668,
)
_LISTEVENTSREQUEST = _descriptor.Descriptor(
name="ListEventsRequest",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project_name",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsRequest.project_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="group_id",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsRequest.group_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="service_filter",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsRequest.service_filter",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="time_range",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsRequest.time_range",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsRequest.page_size",
index=4,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsRequest.page_token",
index=5,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1671,
serialized_end=1941,
)
_LISTEVENTSRESPONSE = _descriptor.Descriptor(
name="ListEventsResponse",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="error_events",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsResponse.error_events",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="time_range_begin",
full_name="google.devtools.clouderrorreporting.v1beta1.ListEventsResponse.time_range_begin",
index=2,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1944,
serialized_end=2122,
)
_QUERYTIMERANGE = _descriptor.Descriptor(
name="QueryTimeRange",
full_name="google.devtools.clouderrorreporting.v1beta1.QueryTimeRange",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="period",
full_name="google.devtools.clouderrorreporting.v1beta1.QueryTimeRange.period",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[_QUERYTIMERANGE_PERIOD],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2125,
serialized_end=2356,
)
_SERVICECONTEXTFILTER = _descriptor.Descriptor(
name="ServiceContextFilter",
full_name="google.devtools.clouderrorreporting.v1beta1.ServiceContextFilter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="service",
full_name="google.devtools.clouderrorreporting.v1beta1.ServiceContextFilter.service",
index=0,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="version",
full_name="google.devtools.clouderrorreporting.v1beta1.ServiceContextFilter.version",
index=1,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="resource_type",
full_name="google.devtools.clouderrorreporting.v1beta1.ServiceContextFilter.resource_type",
index=2,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2358,
serialized_end=2437,
)
_DELETEEVENTSREQUEST = _descriptor.Descriptor(
name="DeleteEventsRequest",
full_name="google.devtools.clouderrorreporting.v1beta1.DeleteEventsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project_name",
full_name="google.devtools.clouderrorreporting.v1beta1.DeleteEventsRequest.project_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2439,
serialized_end=2482,
)
_DELETEEVENTSRESPONSE = _descriptor.Descriptor(
name="DeleteEventsResponse",
full_name="google.devtools.clouderrorreporting.v1beta1.DeleteEventsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2484,
serialized_end=2506,
)
_LISTGROUPSTATSREQUEST.fields_by_name[
"service_filter"
].message_type = _SERVICECONTEXTFILTER
_LISTGROUPSTATSREQUEST.fields_by_name["time_range"].message_type = _QUERYTIMERANGE
_LISTGROUPSTATSREQUEST.fields_by_name[
"timed_count_duration"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_LISTGROUPSTATSREQUEST.fields_by_name["alignment"].enum_type = _TIMEDCOUNTALIGNMENT
_LISTGROUPSTATSREQUEST.fields_by_name[
"alignment_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTGROUPSTATSREQUEST.fields_by_name["order"].enum_type = _ERRORGROUPORDER
_LISTGROUPSTATSRESPONSE.fields_by_name[
"error_group_stats"
].message_type = _ERRORGROUPSTATS
_LISTGROUPSTATSRESPONSE.fields_by_name[
"time_range_begin"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ERRORGROUPSTATS.fields_by_name[
"group"
].message_type = (
google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_common__pb2._ERRORGROUP
)
_ERRORGROUPSTATS.fields_by_name["timed_counts"].message_type = _TIMEDCOUNT
_ERRORGROUPSTATS.fields_by_name[
"first_seen_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ERRORGROUPSTATS.fields_by_name[
"last_seen_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ERRORGROUPSTATS.fields_by_name[
"affected_services"
].message_type = (
google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_common__pb2._SERVICECONTEXT
)
_ERRORGROUPSTATS.fields_by_name[
"representative"
].message_type = (
google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_common__pb2._ERROREVENT
)
_TIMEDCOUNT.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TIMEDCOUNT.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTEVENTSREQUEST.fields_by_name["service_filter"].message_type = _SERVICECONTEXTFILTER
_LISTEVENTSREQUEST.fields_by_name["time_range"].message_type = _QUERYTIMERANGE
_LISTEVENTSRESPONSE.fields_by_name[
"error_events"
].message_type = (
google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_common__pb2._ERROREVENT
)
_LISTEVENTSRESPONSE.fields_by_name[
"time_range_begin"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_QUERYTIMERANGE.fields_by_name["period"].enum_type = _QUERYTIMERANGE_PERIOD
_QUERYTIMERANGE_PERIOD.containing_type = _QUERYTIMERANGE
DESCRIPTOR.message_types_by_name["ListGroupStatsRequest"] = _LISTGROUPSTATSREQUEST
DESCRIPTOR.message_types_by_name["ListGroupStatsResponse"] = _LISTGROUPSTATSRESPONSE
DESCRIPTOR.message_types_by_name["ErrorGroupStats"] = _ERRORGROUPSTATS
DESCRIPTOR.message_types_by_name["TimedCount"] = _TIMEDCOUNT
DESCRIPTOR.message_types_by_name["ListEventsRequest"] = _LISTEVENTSREQUEST
DESCRIPTOR.message_types_by_name["ListEventsResponse"] = _LISTEVENTSRESPONSE
DESCRIPTOR.message_types_by_name["QueryTimeRange"] = _QUERYTIMERANGE
DESCRIPTOR.message_types_by_name["ServiceContextFilter"] = _SERVICECONTEXTFILTER
DESCRIPTOR.message_types_by_name["DeleteEventsRequest"] = _DELETEEVENTSREQUEST
DESCRIPTOR.message_types_by_name["DeleteEventsResponse"] = _DELETEEVENTSRESPONSE
DESCRIPTOR.enum_types_by_name["TimedCountAlignment"] = _TIMEDCOUNTALIGNMENT
DESCRIPTOR.enum_types_by_name["ErrorGroupOrder"] = _ERRORGROUPORDER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListGroupStatsRequest = _reflection.GeneratedProtocolMessageType(
"ListGroupStatsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTGROUPSTATSREQUEST,
__module__="google.devtools.clouderrorreporting_v1beta1.proto.error_stats_service_pb2",
__doc__="""Specifies a set of ``ErrorGroupStats`` to return.
Attributes:
project_name:
[Required] The resource name of the Google Cloud Platform
project. Written as projects/ plus the Google Cloud Platform
project ID. Example: projects/my-project-123.
group_id:
[Optional] List all ErrorGroupStats with these IDs.
service_filter:
[Optional] List only ErrorGroupStats which belong to a service
context that matches the filter. Data for all service contexts
is returned if this field is not specified.
time_range:
[Optional] List data for the given time range. If not set a
default time range is used. The field time\_range\_begin in
the response will specify the beginning of this time range.
Only ErrorGroupStats with a non-zero count in the given time
range are returned, unless the request contains an explicit
group\_id list. If a group\_id list is given, also
ErrorGroupStats with zero occurrences are returned.
timed_count_duration:
[Optional] The preferred duration for a single returned
``TimedCount``. If not set, no timed counts are returned.
alignment:
[Optional] The alignment of the timed counts to be returned.
Default is ``ALIGNMENT_EQUAL_AT_END``.
alignment_time:
[Optional] Time where the timed counts shall be aligned if
rounded alignment is chosen. Default is 00:00 UTC.
order:
[Optional] The sort order in which the results are returned.
Default is ``COUNT_DESC``.
page_size:
[Optional] The maximum number of results to return per
response. Default is 20.
page_token:
[Optional] A ``next_page_token`` provided by a previous
response. To view additional results, pass this token along
with the identical query parameters as the first request.
""",
# @@protoc_insertion_point(class_scope:google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest)
),
)
_sym_db.RegisterMessage(ListGroupStatsRequest)
ListGroupStatsResponse = _reflection.GeneratedProtocolMessageType(
"ListGroupStatsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTGROUPSTATSRESPONSE,
__module__="google.devtools.clouderrorreporting_v1beta1.proto.error_stats_service_pb2",
__doc__="""Contains a set of requested error group stats.
Attributes:
error_group_stats:
The error group stats which match the given request.
next_page_token:
If non-empty, more results are available. Pass this token,
along with the same query parameters as the first request, to
view the next page of results.
time_range_begin:
The timestamp specifies the start time to which the request
was restricted. The start time is set based on the requested
time range. It may be adjusted to a later time if a project
has exceeded the storage quota and older data has been
deleted.
""",
# @@protoc_insertion_point(class_scope:google.devtools.clouderrorreporting.v1beta1.ListGroupStatsResponse)
),
)
_sym_db.RegisterMessage(ListGroupStatsResponse)
ErrorGroupStats = _reflection.GeneratedProtocolMessageType(
"ErrorGroupStats",
(_message.Message,),
dict(
DESCRIPTOR=_ERRORGROUPSTATS,
__module__="google.devtools.clouderrorreporting_v1beta1.proto.error_stats_service_pb2",
__doc__="""Data extracted for a specific group based on certain filter criteria,
such as a given time period and/or service filter.
Attributes:
group:
Group data that is independent of the filter criteria.
count:
Approximate total number of events in the given group that
match the filter criteria.
affected_users_count:
Approximate number of affected users in the given group that
match the filter criteria. Users are distinguished by data in
the ``ErrorContext`` of the individual error events, such as
their login name or their remote IP address in case of HTTP
requests. The number of affected users can be zero even if the
number of errors is non-zero if no data was provided from
which the affected user could be deduced. Users are counted
based on data in the request context that was provided in the
error report. If more users are implicitly affected, such as
due to a crash of the whole service, this is not reflected
here.
timed_counts:
Approximate number of occurrences over time. Timed counts
returned by ListGroups are guaranteed to be: - Inside the
requested time interval - Non-overlapping, and - Ordered by
ascending time.
first_seen_time:
Approximate first occurrence that was ever seen for this group
and which matches the given filter criteria, ignoring the
time\_range that was specified in the request.
last_seen_time:
Approximate last occurrence that was ever seen for this group
and which matches the given filter criteria, ignoring the
time\_range that was specified in the request.
affected_services:
Service contexts with a non-zero error count for the given
filter criteria. This list can be truncated if multiple
services are affected. Refer to ``num_affected_services`` for
the total count.
num_affected_services:
The total number of services with a non-zero error count for
the given filter criteria.
representative:
An arbitrary event that is chosen as representative for the
whole group. The representative event is intended to be used
as a quick preview for the whole group. Events in the group
are usually sufficiently similar to each other such that
showing an arbitrary representative provides insight into the
characteristics of the group as a whole.
""",
# @@protoc_insertion_point(class_scope:google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats)
),
)
_sym_db.RegisterMessage(ErrorGroupStats)
TimedCount = _reflection.GeneratedProtocolMessageType(
"TimedCount",
(_message.Message,),
dict(
DESCRIPTOR=_TIMEDCOUNT,
__module__="google.devtools.clouderrorreporting_v1beta1.proto.error_stats_service_pb2",
__doc__="""The number of errors in a given time period. All numbers are approximate
since the error events are sampled before counting them.
Attributes:
count:
Approximate number of occurrences in the given time period.
start_time:
Start of the time period to which ``count`` refers (included).
end_time:
End of the time period to which ``count`` refers (excluded).
""",
# @@protoc_insertion_point(class_scope:google.devtools.clouderrorreporting.v1beta1.TimedCount)
),
)
_sym_db.RegisterMessage(TimedCount)
ListEventsRequest = _reflection.GeneratedProtocolMessageType(
"ListEventsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTEVENTSREQUEST,
__module__="google.devtools.clouderrorreporting_v1beta1.proto.error_stats_service_pb2",
__doc__="""Specifies a set of error events to return.
Attributes:
project_name:
[Required] The resource name of the Google Cloud Platform
project. Written as ``projects/`` plus the `Google Cloud
Platform project ID
<https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
group_id:
[Required] The group for which events shall be returned.
service_filter:
[Optional] List only ErrorGroups which belong to a service
context that matches the filter. Data for all service contexts
is returned if this field is not specified.
time_range:
[Optional] List only data for the given time range. If not set
a default time range is used. The field time\_range\_begin in
the response will specify the beginning of this time range.
page_size:
[Optional] The maximum number of results to return per
response.
page_token:
[Optional] A ``next_page_token`` provided by a previous
response.
""",
# @@protoc_insertion_point(class_scope:google.devtools.clouderrorreporting.v1beta1.ListEventsRequest)
),
)
_sym_db.RegisterMessage(ListEventsRequest)
ListEventsResponse = _reflection.GeneratedProtocolMessageType(
"ListEventsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTEVENTSRESPONSE,
__module__="google.devtools.clouderrorreporting_v1beta1.proto.error_stats_service_pb2",
__doc__="""Contains a set of requested error events.
Attributes:
error_events:
The error events which match the given request.
next_page_token:
If non-empty, more results are available. Pass this token,
along with the same query parameters as the first request, to
view the next page of results.
time_range_begin:
The timestamp specifies the start time to which the request
was restricted.
""",
# @@protoc_insertion_point(class_scope:google.devtools.clouderrorreporting.v1beta1.ListEventsResponse)
),
)
_sym_db.RegisterMessage(ListEventsResponse)
QueryTimeRange = _reflection.GeneratedProtocolMessageType(
"QueryTimeRange",
(_message.Message,),
dict(
DESCRIPTOR=_QUERYTIMERANGE,
__module__="google.devtools.clouderrorreporting_v1beta1.proto.error_stats_service_pb2",
__doc__="""Requests might be rejected or the resulting timed count durations might
be adjusted for lower durations.
Attributes:
period:
Restricts the query to the specified time range.
""",
# @@protoc_insertion_point(class_scope:google.devtools.clouderrorreporting.v1beta1.QueryTimeRange)
),
)
_sym_db.RegisterMessage(QueryTimeRange)
ServiceContextFilter = _reflection.GeneratedProtocolMessageType(
"ServiceContextFilter",
(_message.Message,),
dict(
DESCRIPTOR=_SERVICECONTEXTFILTER,
__module__="google.devtools.clouderrorreporting_v1beta1.proto.error_stats_service_pb2",
__doc__="""Specifies criteria for filtering a subset of service contexts. The
fields in the filter correspond to the fields in ``ServiceContext``.
Only exact, case-sensitive matches are supported. If a field is unset or
empty, it matches arbitrary values.
Attributes:
service:
[Optional] The exact value to match against
```ServiceContext.service`` </error-reporting/reference/rest/v
1beta1/ServiceContext#FIELDS.service>`__.
version:
[Optional] The exact value to match against
```ServiceContext.version`` </error-reporting/reference/rest/v
1beta1/ServiceContext#FIELDS.version>`__.
resource_type:
[Optional] The exact value to match against
```ServiceContext.resource_type`` </error-reporting/reference/
rest/v1beta1/ServiceContext#FIELDS.resource_type>`__.
""",
# @@protoc_insertion_point(class_scope:google.devtools.clouderrorreporting.v1beta1.ServiceContextFilter)
),
)
_sym_db.RegisterMessage(ServiceContextFilter)
DeleteEventsRequest = _reflection.GeneratedProtocolMessageType(
"DeleteEventsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_DELETEEVENTSREQUEST,
__module__="google.devtools.clouderrorreporting_v1beta1.proto.error_stats_service_pb2",
__doc__="""Deletes all events in the project.
Attributes:
project_name:
[Required] The resource name of the Google Cloud Platform
project. Written as ``projects/`` plus the `Google Cloud
Platform project ID
<https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
""",
# @@protoc_insertion_point(class_scope:google.devtools.clouderrorreporting.v1beta1.DeleteEventsRequest)
),
)
_sym_db.RegisterMessage(DeleteEventsRequest)
DeleteEventsResponse = _reflection.GeneratedProtocolMessageType(
"DeleteEventsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_DELETEEVENTSRESPONSE,
__module__="google.devtools.clouderrorreporting_v1beta1.proto.error_stats_service_pb2",
__doc__="""Response message for deleting error events.
""",
# @@protoc_insertion_point(class_scope:google.devtools.clouderrorreporting.v1beta1.DeleteEventsResponse)
),
)
_sym_db.RegisterMessage(DeleteEventsResponse)
DESCRIPTOR._options = None
_ERRORSTATSSERVICE = _descriptor.ServiceDescriptor(
name="ErrorStatsService",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorStatsService",
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=2755,
serialized_end=3381,
methods=[
_descriptor.MethodDescriptor(
name="ListGroupStats",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorStatsService.ListGroupStats",
index=0,
containing_service=None,
input_type=_LISTGROUPSTATSREQUEST,
output_type=_LISTGROUPSTATSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002/\022-/v1beta1/{project_name=projects/*}/groupStats"
),
),
_descriptor.MethodDescriptor(
name="ListEvents",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorStatsService.ListEvents",
index=1,
containing_service=None,
input_type=_LISTEVENTSREQUEST,
output_type=_LISTEVENTSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002+\022)/v1beta1/{project_name=projects/*}/events"
),
),
_descriptor.MethodDescriptor(
name="DeleteEvents",
full_name="google.devtools.clouderrorreporting.v1beta1.ErrorStatsService.DeleteEvents",
index=2,
containing_service=None,
input_type=_DELETEEVENTSREQUEST,
output_type=_DELETEEVENTSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002+*)/v1beta1/{project_name=projects/*}/events"
),
),
],
)
_sym_db.RegisterServiceDescriptor(_ERRORSTATSSERVICE)
DESCRIPTOR.services_by_name["ErrorStatsService"] = _ERRORSTATSSERVICE
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
aESeguridad/GERE | venv/lib/python2.7/site-packages/wtforms/ext/appengine/db.py | 228 | 18588 | """
Form generation utilities for App Engine's ``db.Model`` class.
The goal of ``model_form()`` is to provide a clean, explicit and predictable
way to create forms based on ``db.Model`` classes. No malabarism or black
magic should be necessary to generate a form for models, and to add custom
non-model related fields: ``model_form()`` simply generates a form class
that can be used as it is, or that can be extended directly or even be used
to create other forms using ``model_form()``.
Example usage:
.. code-block:: python
from google.appengine.ext import db
from tipfy.ext.model.form import model_form
# Define an example model and add a record.
class Contact(db.Model):
name = db.StringProperty(required=True)
city = db.StringProperty()
age = db.IntegerProperty(required=True)
is_admin = db.BooleanProperty(default=False)
new_entity = Contact(key_name='test', name='Test Name', age=17)
new_entity.put()
# Generate a form based on the model.
ContactForm = model_form(Contact)
# Get a form populated with entity data.
entity = Contact.get_by_key_name('test')
form = ContactForm(obj=entity)
Properties from the model can be excluded from the generated form, or it can
include just a set of properties. For example:
.. code-block:: python
# Generate a form based on the model, excluding 'city' and 'is_admin'.
ContactForm = model_form(Contact, exclude=('city', 'is_admin'))
# or...
# Generate a form based on the model, only including 'name' and 'age'.
ContactForm = model_form(Contact, only=('name', 'age'))
The form can be generated setting field arguments:
.. code-block:: python
ContactForm = model_form(Contact, only=('name', 'age'), field_args={
'name': {
'label': 'Full name',
'description': 'Your name',
},
'age': {
'label': 'Age',
'validators': [validators.NumberRange(min=14, max=99)],
}
})
The class returned by ``model_form()`` can be used as a base class for forms
mixing non-model fields and/or other model forms. For example:
.. code-block:: python
# Generate a form based on the model.
BaseContactForm = model_form(Contact)
# Generate a form based on other model.
ExtraContactForm = model_form(MyOtherModel)
class ContactForm(BaseContactForm):
# Add an extra, non-model related field.
subscribe_to_news = f.BooleanField()
# Add the other model form as a subform.
extra = f.FormField(ExtraContactForm)
The class returned by ``model_form()`` can also extend an existing form
class:
.. code-block:: python
class BaseContactForm(Form):
# Add an extra, non-model related field.
subscribe_to_news = f.BooleanField()
# Generate a form based on the model.
ContactForm = model_form(Contact, base_class=BaseContactForm)
"""
from wtforms import Form, validators, widgets, fields as f
from wtforms.compat import iteritems
from wtforms.ext.appengine.fields import GeoPtPropertyField, ReferencePropertyField, StringListPropertyField
def get_TextField(kwargs):
"""
Returns a ``TextField``, applying the ``db.StringProperty`` length limit
of 500 bytes.
"""
kwargs['validators'].append(validators.length(max=500))
return f.TextField(**kwargs)
def get_IntegerField(kwargs):
"""
Returns an ``IntegerField``, applying the ``db.IntegerProperty`` range
limits.
"""
v = validators.NumberRange(min=-0x8000000000000000, max=0x7fffffffffffffff)
kwargs['validators'].append(v)
return f.IntegerField(**kwargs)
def convert_StringProperty(model, prop, kwargs):
"""Returns a form field for a ``db.StringProperty``."""
if prop.multiline:
kwargs['validators'].append(validators.length(max=500))
return f.TextAreaField(**kwargs)
else:
return get_TextField(kwargs)
def convert_ByteStringProperty(model, prop, kwargs):
"""Returns a form field for a ``db.ByteStringProperty``."""
return get_TextField(kwargs)
def convert_BooleanProperty(model, prop, kwargs):
"""Returns a form field for a ``db.BooleanProperty``."""
return f.BooleanField(**kwargs)
def convert_IntegerProperty(model, prop, kwargs):
"""Returns a form field for a ``db.IntegerProperty``."""
return get_IntegerField(kwargs)
def convert_FloatProperty(model, prop, kwargs):
"""Returns a form field for a ``db.FloatProperty``."""
return f.FloatField(**kwargs)
def convert_DateTimeProperty(model, prop, kwargs):
"""Returns a form field for a ``db.DateTimeProperty``."""
if prop.auto_now or prop.auto_now_add:
return None
kwargs.setdefault('format', '%Y-%m-%d %H:%M:%S')
return f.DateTimeField(**kwargs)
def convert_DateProperty(model, prop, kwargs):
"""Returns a form field for a ``db.DateProperty``."""
if prop.auto_now or prop.auto_now_add:
return None
kwargs.setdefault('format', '%Y-%m-%d')
return f.DateField(**kwargs)
def convert_TimeProperty(model, prop, kwargs):
"""Returns a form field for a ``db.TimeProperty``."""
if prop.auto_now or prop.auto_now_add:
return None
kwargs.setdefault('format', '%H:%M:%S')
return f.DateTimeField(**kwargs)
def convert_ListProperty(model, prop, kwargs):
"""Returns a form field for a ``db.ListProperty``."""
return None
def convert_StringListProperty(model, prop, kwargs):
"""Returns a form field for a ``db.StringListProperty``."""
return StringListPropertyField(**kwargs)
def convert_ReferenceProperty(model, prop, kwargs):
"""Returns a form field for a ``db.ReferenceProperty``."""
kwargs['reference_class'] = prop.reference_class
kwargs.setdefault('allow_blank', not prop.required)
return ReferencePropertyField(**kwargs)
def convert_SelfReferenceProperty(model, prop, kwargs):
"""Returns a form field for a ``db.SelfReferenceProperty``."""
return None
def convert_UserProperty(model, prop, kwargs):
"""Returns a form field for a ``db.UserProperty``."""
return None
def convert_BlobProperty(model, prop, kwargs):
"""Returns a form field for a ``db.BlobProperty``."""
return f.FileField(**kwargs)
def convert_TextProperty(model, prop, kwargs):
"""Returns a form field for a ``db.TextProperty``."""
return f.TextAreaField(**kwargs)
def convert_CategoryProperty(model, prop, kwargs):
"""Returns a form field for a ``db.CategoryProperty``."""
return get_TextField(kwargs)
def convert_LinkProperty(model, prop, kwargs):
"""Returns a form field for a ``db.LinkProperty``."""
kwargs['validators'].append(validators.url())
return get_TextField(kwargs)
def convert_EmailProperty(model, prop, kwargs):
"""Returns a form field for a ``db.EmailProperty``."""
kwargs['validators'].append(validators.email())
return get_TextField(kwargs)
def convert_GeoPtProperty(model, prop, kwargs):
"""Returns a form field for a ``db.GeoPtProperty``."""
return GeoPtPropertyField(**kwargs)
def convert_IMProperty(model, prop, kwargs):
"""Returns a form field for a ``db.IMProperty``."""
return None
def convert_PhoneNumberProperty(model, prop, kwargs):
"""Returns a form field for a ``db.PhoneNumberProperty``."""
return get_TextField(kwargs)
def convert_PostalAddressProperty(model, prop, kwargs):
"""Returns a form field for a ``db.PostalAddressProperty``."""
return get_TextField(kwargs)
def convert_RatingProperty(model, prop, kwargs):
"""Returns a form field for a ``db.RatingProperty``."""
kwargs['validators'].append(validators.NumberRange(min=0, max=100))
return f.IntegerField(**kwargs)
class ModelConverter(object):
"""
Converts properties from a ``db.Model`` class to form fields.
Default conversions between properties and fields:
+====================+===================+==============+==================+
| Property subclass | Field subclass | datatype | notes |
+====================+===================+==============+==================+
| StringProperty | TextField | unicode | TextArea |
| | | | if multiline |
+--------------------+-------------------+--------------+------------------+
| ByteStringProperty | TextField | str | |
+--------------------+-------------------+--------------+------------------+
| BooleanProperty | BooleanField | bool | |
+--------------------+-------------------+--------------+------------------+
| IntegerProperty | IntegerField | int or long | |
+--------------------+-------------------+--------------+------------------+
| FloatProperty | TextField | float | |
+--------------------+-------------------+--------------+------------------+
| DateTimeProperty | DateTimeField | datetime | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| DateProperty | DateField | date | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| TimeProperty | DateTimeField | time | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| ListProperty | None | list | always skipped |
+--------------------+-------------------+--------------+------------------+
| StringListProperty | TextAreaField | list of str | |
+--------------------+-------------------+--------------+------------------+
| ReferenceProperty | ReferencePropertyF| db.Model | |
+--------------------+-------------------+--------------+------------------+
| SelfReferenceP. | ReferencePropertyF| db.Model | |
+--------------------+-------------------+--------------+------------------+
| UserProperty | None | users.User | always skipped |
+--------------------+-------------------+--------------+------------------+
| BlobProperty | FileField | str | |
+--------------------+-------------------+--------------+------------------+
| TextProperty | TextAreaField | unicode | |
+--------------------+-------------------+--------------+------------------+
| CategoryProperty | TextField | unicode | |
+--------------------+-------------------+--------------+------------------+
| LinkProperty | TextField | unicode | |
+--------------------+-------------------+--------------+------------------+
| EmailProperty | TextField | unicode | |
+--------------------+-------------------+--------------+------------------+
| GeoPtProperty | TextField | db.GeoPt | |
+--------------------+-------------------+--------------+------------------+
| IMProperty | None | db.IM | always skipped |
+--------------------+-------------------+--------------+------------------+
| PhoneNumberProperty| TextField | unicode | |
+--------------------+-------------------+--------------+------------------+
| PostalAddressP. | TextField | unicode | |
+--------------------+-------------------+--------------+------------------+
| RatingProperty | IntegerField | int or long | |
+--------------------+-------------------+--------------+------------------+
| _ReverseReferenceP.| None | <iterable> | always skipped |
+====================+===================+==============+==================+
"""
default_converters = {
'StringProperty': convert_StringProperty,
'ByteStringProperty': convert_ByteStringProperty,
'BooleanProperty': convert_BooleanProperty,
'IntegerProperty': convert_IntegerProperty,
'FloatProperty': convert_FloatProperty,
'DateTimeProperty': convert_DateTimeProperty,
'DateProperty': convert_DateProperty,
'TimeProperty': convert_TimeProperty,
'ListProperty': convert_ListProperty,
'StringListProperty': convert_StringListProperty,
'ReferenceProperty': convert_ReferenceProperty,
'SelfReferenceProperty': convert_SelfReferenceProperty,
'UserProperty': convert_UserProperty,
'BlobProperty': convert_BlobProperty,
'TextProperty': convert_TextProperty,
'CategoryProperty': convert_CategoryProperty,
'LinkProperty': convert_LinkProperty,
'EmailProperty': convert_EmailProperty,
'GeoPtProperty': convert_GeoPtProperty,
'IMProperty': convert_IMProperty,
'PhoneNumberProperty': convert_PhoneNumberProperty,
'PostalAddressProperty': convert_PostalAddressProperty,
'RatingProperty': convert_RatingProperty,
}
# Don't automatically add a required validator for these properties
NO_AUTO_REQUIRED = frozenset(['ListProperty', 'StringListProperty', 'BooleanProperty'])
def __init__(self, converters=None):
"""
Constructs the converter, setting the converter callables.
:param converters:
A dictionary of converter callables for each property type. The
callable must accept the arguments (model, prop, kwargs).
"""
self.converters = converters or self.default_converters
def convert(self, model, prop, field_args):
"""
Returns a form field for a single model property.
:param model:
The ``db.Model`` class that contains the property.
:param prop:
The model property: a ``db.Property`` instance.
:param field_args:
Optional keyword arguments to construct the field.
"""
prop_type_name = type(prop).__name__
kwargs = {
'label': prop.name.replace('_', ' ').title(),
'default': prop.default_value(),
'validators': [],
}
if field_args:
kwargs.update(field_args)
if prop.required and prop_type_name not in self.NO_AUTO_REQUIRED:
kwargs['validators'].append(validators.required())
if prop.choices:
# Use choices in a select field if it was not provided in field_args
if 'choices' not in kwargs:
kwargs['choices'] = [(v, v) for v in prop.choices]
return f.SelectField(**kwargs)
else:
converter = self.converters.get(prop_type_name, None)
if converter is not None:
return converter(model, prop, kwargs)
def model_fields(model, only=None, exclude=None, field_args=None,
converter=None):
"""
Extracts and returns a dictionary of form fields for a given
``db.Model`` class.
:param model:
The ``db.Model`` class to extract fields from.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to a keyword arguments
used to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
converter = converter or ModelConverter()
field_args = field_args or {}
# Get the field names we want to include or exclude, starting with the
# full list of model properties.
props = model.properties()
sorted_props = sorted(iteritems(props), key=lambda prop: prop[1].creation_counter)
field_names = list(x[0] for x in sorted_props)
if only:
field_names = list(f for f in only if f in field_names)
elif exclude:
field_names = list(f for f in field_names if f not in exclude)
# Create all fields.
field_dict = {}
for name in field_names:
field = converter.convert(model, props[name], field_args.get(name))
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, base_class=Form, only=None, exclude=None, field_args=None,
converter=None):
"""
Creates and returns a dynamic ``wtforms.Form`` class for a given
``db.Model`` class. The form class can be used as it is or serve as a base
for extended form classes, which can then mix non-model related fields,
subforms with other model forms, among other possibilities.
:param model:
The ``db.Model`` class to generate a form for.
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments
used to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
# Extract the fields from the model.
field_dict = model_fields(model, only, exclude, field_args, converter)
# Return a dynamically created form class, extending from base_class and
# including the created fields as properties.
return type(model.kind() + 'Form', (base_class,), field_dict)
| gpl-3.0 |
skyostil/tracy | src/analyzer/plugins/vg/__init__.py | 1 | 1176 | # Copyright (c) 2011 Nokia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""VG plugin"""
import VgPlugin
requiredLibrary = "vg"
plugins = [
VgPlugin.VgPlugin
]
| mit |
madmachinations/discordbot | plugins/stats/__init__.py | 2 | 24166 | #!/usr/bin/python
import __main__
import json
#===================================================================================================================
#PLUGIN CALLS
async def help_menu():
help_info = {}
help_info['title'] = 'Server statistics'
help_info['description'] = 'Track channel activity on this server.'
return help_info
async def help_section():
help_info = {}
cmd_name = 'stats'
help_info[cmd_name] = []
help_entry = {}
help_entry['command'] = 'server'
help_entry['args'] = 'mmm yyyy'
help_entry['description'] = 'View the number of posts made in text channels across the server for month mmm and year yyyy. If month and date are not specified, the current month and date will be used instead.'
help_entry['perm_name'] = 'view_stats'
help_info[cmd_name].append(help_entry)
help_entry = {}
help_entry['command'] = ''
help_entry['args'] = 'channel_name mmm yyyy'
help_entry['description'] = 'View the number of posts made in channel_name for month mmm and year yyyy. If month and date are not specified, the current month and date will be used instead.'
help_entry['perm_name'] = 'view_stats'
help_info[cmd_name].append(help_entry)
return help_info
async def plugin_permissions():
perm_info = {}
this_perm = 'view_stats'
perm_info[this_perm] = {}
perm_info[this_perm]['groups'] = [] #members/admins/owner
perm_info[this_perm]['groups'].append('owner')
return perm_info
async def server_setup_wizard():
return True
#===================================================================================================================
#SERVER EVENTS
async def server_join(server): pass
async def server_remove(server): pass
async def server_update(before,after): pass
async def server_connected(server): pass
#===================================================================================================================
#MESSAGE EVENTS
async def stats_next_key(date_key=None,date_dir=None):
ret_str = False
if(date_key != None and date_dir != None):
date_key = date_key.lower()
dk_split = date_key.split(" ")
dk_split[1] = int(dk_split[1])
if(date_dir == "next"):
if(dk_split[0] == "jan"): dk_split[0] = 'feb'
elif(dk_split[0] == "feb"): dk_split[0] = 'mar'
elif(dk_split[0] == "mar"): dk_split[0] = 'apr'
elif(dk_split[0] == "apr"): dk_split[0] = 'may'
elif(dk_split[0] == "may"): dk_split[0] = 'jun'
elif(dk_split[0] == "jun"): dk_split[0] = 'jul'
elif(dk_split[0] == "jul"): dk_split[0] = 'aug'
elif(dk_split[0] == "aug"): dk_split[0] = 'sep'
elif(dk_split[0] == "sep"): dk_split[0] = 'oct'
elif(dk_split[0] == "oct"): dk_split[0] = 'nov'
elif(dk_split[0] == "nov"): dk_split[0] = 'dec'
elif(dk_split[0] == "dec"):
dk_split[0] = 'jan'
dk_split[1] = dk_split[1] + 1
else:
if(dk_split[0] == "dec"): dk_split[0] = 'nov'
elif(dk_split[0] == "nov"): dk_split[0] = 'oct'
elif(dk_split[0] == "oct"): dk_split[0] = 'sep'
elif(dk_split[0] == "sep"): dk_split[0] = 'aug'
elif(dk_split[0] == "aug"): dk_split[0] = 'jul'
elif(dk_split[0] == "jul"): dk_split[0] = 'jun'
elif(dk_split[0] == "jun"): dk_split[0] = 'may'
elif(dk_split[0] == "may"): dk_split[0] = 'apr'
elif(dk_split[0] == "apr"): dk_split[0] = 'mar'
elif(dk_split[0] == "mar"): dk_split[0] = 'feb'
elif(dk_split[0] == "feb"): dk_split[0] = 'jan'
elif(dk_split[0] == "jan"):
dk_split[0] = 'dec'
dk_split[1] = dk_split[1] - 1
ret_str = dk_split[0]+' '+str(dk_split[1])
return ret_str
async def message_process(message):
bot_cmd_char = await __main__.get_cmd_char(message.server)
if(message.content.startswith(bot_cmd_char+'stats')):
chk_user_perm = await __main__.has_perm_to_run(message.server,message,message.author.id,'stats','view_stats',True)
if(chk_user_perm == True):
proc_msg = await __main__.get_cmd_args(message.content)
proc_msg_length = len(proc_msg)
if(proc_msg_length >= 2):
time_now = await __main__.current_timestamp()
use_server_id = await __main__.hash_server_id(message.server.id)
if(proc_msg[1] != "server"):
use_channel = await __main__.find_channel_arg(message.server,proc_msg[1],True)
else: use_channel = False
if(proc_msg_length == 4):
date_key = proc_msg[2]+' '+proc_msg[3]
else:
date_key = __main__.datetime.datetime.fromtimestamp(int(time_now)).strftime('%b %Y')
date_key = date_key.lower()
icon_url = __main__.client.user.avatar_url
if(icon_url == None or icon_url == ""): icon_url = __main__.client.user.default_avatar_url
if(proc_msg[1] == "server"):
list_title = 'Statistics for server'
else: list_title = 'Statistics for channel '+use_channel.name
list_descript = ''
list_title = list_title+' **('+date_key.title()+')**'
if(proc_msg[1] == "server"):
prev_date_key = await stats_next_key(date_key,'prev')
total_posts = 0
chan_data = []
get_stats = __main__.db.cursor()
get_stats.execute("SELECT * FROM channels WHERE server_id=? AND deleted='0'",(use_server_id,))
for row in get_stats:
get_count = await __main__.decrypt_data(row['post_count'])
if(get_count == False or get_count == None or get_count == ''):
get_count = {}
else: get_count = json.loads(get_count)
if(date_key in get_count):
curr_count = int(get_count[date_key]['count'])
else: curr_count = 0
if(curr_count > 0):
curr_appd = curr_count / 30
else: curr_appd = 0
curr_appd = round(curr_appd,2)
if(prev_date_key in get_count):
prev_count = int(get_count[prev_date_key]['count'])
else: prev_count = 0
prev_diff = curr_count - prev_count
if(prev_diff >= 0): prev_diff = '+'+str(prev_diff)
use_channel_name = await __main__.decrypt_data(row['name'])
total_posts = total_posts + curr_count
#if(curr_count > 0): list_descript = list_descript+'**'+use_channel_name+' - **'+str(curr_count)+' posts ('+str(prev_diff)+')\n'
add_chan_data = {}
add_chan_data['name'] = use_channel_name
add_chan_data['posts'] = curr_count
add_chan_data['prev_diff'] = prev_diff
chan_data.append(add_chan_data)
sort_chan_data = []
done_data = []
while(len(done_data) < len(chan_data)):
last_highest = False
for data in chan_data:
if((last_highest == False or last_highest['posts'] <= data['posts']) and data not in done_data):
last_highest = data
done_data.append(last_highest)
sort_chan_data.append(last_highest)
for data in sort_chan_data:
list_descript = list_descript+'**'+data['name']+' - **'+str(data['posts'])+' posts ('+str(data['prev_diff'])+')\n'
if(len(list_descript) >= 1600):
em = __main__.discord.Embed(title=list_title, description=list_descript, colour=3447003)
em.set_author(name=__main__.client.user.display_name, icon_url=icon_url)
await __main__.client.send_message(message.channel, embed=em)
list_descript = ''
total_appd = total_posts / 30
total_appd = round(total_appd,2)
list_descript = list_descript+'\n**TOTAL POSTS - **'+str(total_posts)
if(len(list_descript) > 1):
em = __main__.discord.Embed(title=list_title, description=list_descript, colour=3447003)
em.set_author(name=__main__.client.user.display_name, icon_url=icon_url)
await __main__.client.send_message(message.channel, embed=em)
else:
if(use_channel != False):
use_channel_id = await __main__.hash_member_id(message.server.id,use_channel.id)
get_stats = __main__.db.cursor()
get_stats.execute("SELECT * FROM channels WHERE server_id=? AND deleted='0' AND channel_id=?",(use_server_id,use_channel_id,))
for row in get_stats:
get_count = await __main__.decrypt_data(row['post_count'])
if(get_count == False or get_count == None or get_count == ''):
get_count = {}
else: get_count = json.loads(get_count)
use_date_key = date_key
run_count = 0
while(run_count <= 12):
prev_date_key = await stats_next_key(use_date_key,'prev')
if(use_date_key in get_count):
curr_count = int(get_count[use_date_key]['count'])
else: curr_count = 0
if(curr_count > 0):
curr_appd = curr_count / 30
else: curr_appd = 0
curr_appd = round(curr_appd,2)
if(prev_date_key in get_count):
prev_count = int(get_count[prev_date_key]['count'])
else: prev_count = 0
prev_diff = curr_count - prev_count
if(prev_diff >= 0): prev_diff = '+'+str(prev_diff)
if(curr_count > 0): list_descript = list_descript+'**'+use_date_key.title()+' - **'+str(curr_count)+' posts ('+str(prev_diff)+')\n'
use_date_key = await stats_next_key(use_date_key,'prev')
run_count = run_count + 1
if(list_descript == ''): list_descript = 'There are no stats for this channel to display.'
em = __main__.discord.Embed(title=list_title, description=list_descript, colour=3447003)
em.set_author(name=__main__.client.user.display_name, icon_url=icon_url)
#em.set_thumbnail(url=icon_url)
#em.set_image(url)
await __main__.client.send_message(message.channel, embed=em)
else: await __main__.client.send_message(message.channel,'Sorry <@'+message.author.id+'>, I couldn\'t find a channel called "'+proc_msg[1]+'"')
else: await __main__.client.send_message(message.channel,'Sorry <@'+message.author.id+'>, you must at least specify a place, which can be either "server" or a channel name.')
async def message_new(message):
if(message.channel.is_private == False):
time_now = await __main__.current_timestamp()
use_server_id = await __main__.hash_server_id(message.server.id)
use_channel_id = await __main__.hash_member_id(message.server.id,message.channel.id)
day_key = __main__.datetime.datetime.fromtimestamp(int(time_now)).strftime('%a')
day_key = day_key.lower()
hour_key = __main__.datetime.datetime.fromtimestamp(int(time_now)).strftime('%H')
date_key = __main__.datetime.datetime.fromtimestamp(int(time_now)).strftime('%b %Y')
date_key = date_key.lower()
#adjust channel activity count
get_mets = __main__.db.cursor()
get_mets.execute("SELECT * FROM channels WHERE server_id=? AND channel_id=?",(use_server_id,use_channel_id,))
found_mets = False
for row in get_mets:
found_mets = await __main__.decrypt_data(row['post_count'])
if(found_mets == None or found_mets == False or found_mets == ""):
found_mets = {}
else: found_mets = json.loads(found_mets)
if(date_key not in found_mets):
found_mets[date_key] = {}
found_mets[date_key]['count'] = 0
found_mets[date_key]['count'] = found_mets[date_key]['count'] + 1
found_mets = await __main__.encrypt_data(json.dumps(found_mets))
save_mets = __main__.db.cursor()
save_mets.execute("UPDATE channels SET post_count=? WHERE server_id=? AND channel_id=?",(found_mets,use_server_id,use_channel_id,))
__main__.db.commit()
#adjust user activity counts
if(message.author.id != __main__.client.user.id):
use_member_id = await __main__.hash_member_id(message.server.id,message.author.id)
get_mets = __main__.db.cursor()
get_mets.execute("SELECT * FROM users WHERE user_id=? AND server_id=?",(use_member_id,use_server_id))
for row in get_mets:
post_count = await __main__.decrypt_data(row['post_count'])
if(post_count != False and post_count != None and post_count != ""):
post_count = json.loads(post_count)
else: post_count = {}
if(date_key not in post_count):
post_count[date_key] = {}
post_count[date_key]['total'] = 0
post_count[date_key]['days'] = {}
post_count[date_key]['channels'] = {}
post_count[date_key]['total'] = post_count[date_key]['total'] + 1
if(day_key not in post_count[date_key]['days']):
post_count[date_key]['days'][day_key] = {}
post_count[date_key]['days'][day_key]['total'] = 0
post_count[date_key]['days'][day_key]['hours'] = {}
post_count[date_key]['days'][day_key]['total'] = post_count[date_key]['days'][day_key]['total'] + 1
if(hour_key not in post_count[date_key]['days'][day_key]['hours']): post_count[date_key]['days'][day_key]['hours'][hour_key] = 0
post_count[date_key]['days'][day_key]['hours'][hour_key] = post_count[date_key]['days'][day_key]['hours'][hour_key] + 1
if(message.channel.id not in post_count[date_key]['channels']): post_count[date_key]['channels'][message.channel.id] = 0
post_count[date_key]['channels'][message.channel.id] = post_count[date_key]['channels'][message.channel.id] + 1
post_count = await __main__.encrypt_data(json.dumps(post_count))
save_mets.execute("UPDATE users SET post_count=? WHERE user_id=? AND server_id=?",(post_count,use_member_id,use_server_id))
__main__.db.commit()
async def user_info_data(server,user):
ret_data = []
use_server_id = await __main__.hash_server_id(server.id)
use_member_id = await __main__.hash_member_id(server.id,user.id)
last_seen_timestamp = False
used_code = __main__.db.cursor()
used_code.execute("SELECT * FROM users WHERE server_id=? AND user_id=?",(use_server_id,use_member_id,))
for row in used_code:
post_count = await __main__.decrypt_data(row['post_count'])
if(row['last_seen'] != None and row['last_seen'] != ""): last_seen_timestamp = int(row['last_seen'])
if(post_count != False and post_count != None and post_count != ""):
post_count = json.loads(post_count)
if(len(post_count) > 0):
time_now = await __main__.current_timestamp()
date_key = __main__.datetime.datetime.fromtimestamp(int(time_now)).strftime('%b %Y')
date_key = date_key.lower()
found_key = False
while(found_key == False):
if(date_key not in post_count):
date_key = await stats_new_key(date_key,'prev')
else:
found_key = True
prev_date_key = await stats_next_key(date_key,'prev')
count_total = 0
if(date_key in post_count): count_total = count_total + post_count[date_key]['total']
if(prev_date_key in post_count): count_total = count_total + post_count[prev_date_key]['total']
#active days
weekdays_str = '';
if(last_seen_timestamp != False): weekdays_str = '__Last seen__\n'+str(await __main__.timestamp_to_date(last_seen_timestamp))
weekdays_run = ['mon','tue','wed','thu','fri','sat','sun']
for weekday in weekdays_run:
day_total = 0
if(date_key in post_count and weekday in post_count[date_key]['days']): day_total = day_total + post_count[date_key]['days'][weekday]['total']
if(prev_date_key in post_count and weekday in post_count[prev_date_key]['days']): day_total = day_total + post_count[prev_date_key]['days'][weekday]['total']
day_percent = round((day_total / count_total) * 100,0)
if(day_total > 0):
hour_run = 0
hour_block = ''
hour_block_total = 0
hours_out = ''
while(hour_run <= 23):
use_hour_key = hour_run
if(use_hour_key <= 9):
use_hour_key = '0'+str(use_hour_key)
else: use_hour_key = str(use_hour_key)
hour_total = 0
if(date_key in post_count and weekday in post_count[date_key]['days'] and use_hour_key in post_count[date_key]['days'][weekday]['hours']):
hour_total = hour_total + post_count[date_key]['days'][weekday]['hours'][use_hour_key]
if(prev_date_key in post_count and weekday in post_count[prev_date_key]['days'] and use_hour_key in post_count[prev_date_key]['days'][weekday]['hours']):
hour_total = hour_total + post_count[prev_date_key]['days'][weekday]['hours'][use_hour_key]
if(hour_total > 0): hour_block_total = hour_block_total + hour_total
if(hour_block == '' and hour_run != 23):
if(hour_total > 0): hour_block = use_hour_key+':00 - '
else:
if(hour_block == '' and hour_run == 23):
if(hour_total > 0): hour_block = use_hour_key+':00 - '
if(hour_total == 0 or hour_run == 23):
hour_percent = round((hour_block_total / day_total) * 100,0)
if(hour_run == 23):
last_time = '23:59'
else: last_time = use_hour_key+':00'
if(hour_block_total > 0):
hour_block = hour_block+last_time+' ('+str(hour_percent)+'% / '+str(hour_block_total)+')'
if(hours_out != ''): hours_out = hours_out+', '
hours_out = hours_out+hour_block
hour_block = ''
hour_block_total = 0
hour_run = hour_run + 1
weekdays_str = weekdays_str+'\n\n__'+weekday.capitalize()+' ('+str(day_percent)+'% / '+str(day_total)+')__\n'+hours_out
if(weekdays_str != ""):
b_field = {}
b_field['name'] = 'Active times (UTC)'
b_field['value'] = weekdays_str
b_field['inline'] = True
ret_data.append(b_field)
#active channels
use_channels = []
if(date_key in post_count):
for this_channel in post_count[date_key]['channels']:
if(this_channel not in use_channels): use_channels.append(this_channel)
if(prev_date_key in post_count):
for this_channel in post_count[prev_date_key]['channels']:
if(this_channel not in use_channels): use_channels.append(this_channel)
channels_str = ''
for this_channel in use_channels:
this_channel_total = 0
if(date_key in post_count and this_channel in post_count[date_key]['channels']):
this_channel_total = this_channel_total + post_count[date_key]['channels'][this_channel]
if(prev_date_key in post_count and this_channel in post_count[prev_date_key]['channels']):
this_channel_total = this_channel_total + post_count[prev_date_key]['channels'][this_channel]
if(this_channel_total > 0):
channel_obj = await __main__.find_channel_arg(server,'<#'+this_channel+'>',True)
if(channel_obj != False):
this_channel_percent = round((this_channel_total / count_total) * 100,0)
channels_str = channels_str+'\n**-** '+channel_obj.name+' ('+str(this_channel_percent)+'% / '+str(this_channel_total)+')'
if(channels_str != ""):
b_field = {}
b_field['name'] = 'Active channels'
b_field['value'] = channels_str
b_field['inline'] = True
ret_data.append(b_field)
if(len(ret_data) == 0): ret_data = False
return ret_data
async def message_edit(before,after): pass
async def message_delete(message): pass
async def message_typing(channel,user,datestamp): pass
#===================================================================================================================
#MESSAGE REACTION EVENTS
async def reaction_add(reaction,user): pass
async def reaction_remove(reaction,user): pass
#===================================================================================================================
#CHANNEL EVENTS
async def channel_create(channel): pass
async def channel_delete(channel): pass
async def channel_update(before,after): pass
#===================================================================================================================
#MEMBER EVENTS
async def member_join(member): pass
async def member_remove(member): pass
async def member_update(before,after): pass
async def member_voice_update(before,after): pass
async def member_ban(member): pass
async def member_unban(server,user): pass
#===================================================================================================================
#ROLE EVENTS
async def role_create(role): pass
async def role_delete(role): pass
async def role_update(before,after): pass
#===================================================================================================================
#EMOJI LIST EVENTS
async def emoji_list_update(before,after): pass
#===================================================================================================================
#GROUP CHAT EVENTS
async def group_join(channel,user): pass
async def group_remove(channel,user): pass
| gpl-3.0 |
JioCloud/nova | nova/virt/xenapi/client/objects.py | 97 | 4588 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import utils
class XenAPISessionObject(object):
"""Wrapper to make calling and mocking the session easier
The XenAPI protocol is an XML RPC API that is based around the
XenAPI database, and operations you can do on each of the objects
stored in the database, such as VM, SR, VDI, etc.
For more details see the XenAPI docs:
http://docs.vmd.citrix.com/XenServer/6.2.0/1.0/en_gb/api/
Most, objects like VM, SR, VDI, etc, share a common set of methods:
* vm_ref = session.VM.create(vm_rec)
* vm_ref = session.VM.get_by_uuid(uuid)
* session.VM.destroy(vm_ref)
* vm_refs = session.VM.get_all()
Each object also has specific messages, or functions, such as:
* session.VM.clean_reboot(vm_ref)
Each object has fields, like "VBDs" that can be fetched like this:
* vbd_refs = session.VM.get_VBDs(vm_ref)
You can get all the fields by fetching the full record.
However please note this is much more expensive than just
fetching the field you require:
* vm_rec = session.VM.get_record(vm_ref)
When searching for particular objects, you may be tempted
to use get_all(), but this often leads to races as objects
get deleted under your feet. It is preferable to use the undocumented:
* vms = session.VM.get_all_records_where(
'field "is_control_domain"="true"')
"""
def __init__(self, session, name):
self.session = session
self.name = name
def _call_method(self, method_name, *args):
call = "%s.%s" % (self.name, method_name)
return self.session.call_xenapi(call, *args)
def __getattr__(self, method_name):
return lambda *params: self._call_method(method_name, *params)
class VM(XenAPISessionObject):
"""Virtual Machine."""
def __init__(self, session):
super(VM, self).__init__(session, "VM")
class VBD(XenAPISessionObject):
"""Virtual block device."""
def __init__(self, session):
super(VBD, self).__init__(session, "VBD")
def plug(self, vbd_ref, vm_ref):
@utils.synchronized('xenapi-vbd-' + vm_ref)
def synchronized_plug():
self._call_method("plug", vbd_ref)
# NOTE(johngarbutt) we need to ensure there is only ever one
# VBD.unplug or VBD.plug happening at once per VM
# due to a bug in XenServer 6.1 and 6.2
synchronized_plug()
def unplug(self, vbd_ref, vm_ref):
@utils.synchronized('xenapi-vbd-' + vm_ref)
def synchronized_unplug():
self._call_method("unplug", vbd_ref)
# NOTE(johngarbutt) we need to ensure there is only ever one
# VBD.unplug or VBD.plug happening at once per VM
# due to a bug in XenServer 6.1 and 6.2
synchronized_unplug()
class VDI(XenAPISessionObject):
"""Virtual disk image."""
def __init__(self, session):
super(VDI, self).__init__(session, "VDI")
class SR(XenAPISessionObject):
"""Storage Repository."""
def __init__(self, session):
super(SR, self).__init__(session, "SR")
class PBD(XenAPISessionObject):
"""Physical block device."""
def __init__(self, session):
super(PBD, self).__init__(session, "PBD")
class PIF(XenAPISessionObject):
"""Physical Network Interface."""
def __init__(self, session):
super(PIF, self).__init__(session, "PIF")
class VLAN(XenAPISessionObject):
"""VLAN."""
def __init__(self, session):
super(VLAN, self).__init__(session, "VLAN")
class Host(XenAPISessionObject):
"""XenServer hosts."""
def __init__(self, session):
super(Host, self).__init__(session, "host")
class Network(XenAPISessionObject):
"""Networks that VIFs are attached to."""
def __init__(self, session):
super(Network, self).__init__(session, "network")
class Pool(XenAPISessionObject):
"""Pool of hosts."""
def __init__(self, session):
super(Pool, self).__init__(session, "pool")
| apache-2.0 |
jawaid/django-organice | tests/utils.py | 1 | 2663 | """
Helper functions for tests
"""
from organice.management.settings import DjangoSettingsManager
def probe_values_in_tuple(content, tuple_key, required_values):
"""
Test a tuple for required values, extracting the tuple beforehand.
:param content: content string containing the tuple attribute (e.g. Django settings)
:param tuple_key: attribute name of the tuple
:param required_values: list or tuple of values for testing the tuple
:return: None (asserts in case of failure)
"""
try:
start_pos = content.find("%s = (\n" % tuple_key)
assert start_pos != -1, "Tuple not found: %s" % tuple_key
stop_pos = 1 + content.find("\n)\n", start_pos)
assert stop_pos > start_pos, "End of tuple not found: %s" % tuple_key
tuple = content[start_pos:stop_pos]
for val in required_values:
val_line = (" '%s',\n" % val)
assert val_line in tuple, "Not found in tuple %s: %s" % (tuple_key, val)
return True
except AssertionError as ae:
print(ae.message)
return False
def probe_values_in_list(content, settings_path, required_values):
"""
Test a list for required values, extracting the list beforehand.
:param content: content string containing the list attribute (e.g. Django settings)
:param settings_path: attribute hierarchy list to find the list
:param required_values: list or tuple of values for testing the list
:return: None (asserts in case of failure)
"""
last_index = len(settings_path) - 1
indentation = DjangoSettingsManager._indentation_by(last_index)
try:
start, stop = DjangoSettingsManager._find_block(content, settings_path)
block = content[start:stop]
for val in required_values:
val_line = ("%s'%s',\n" % (indentation, val))
assert val_line in block, "Not found in block %s: %s" % \
(settings_path[last_index], val)
return True
except AssertionError as ae:
print(ae.message)
return False
def pytest_generate_tests(metafunc):
"""
A test scenarios implementation for py.test, as found at
http://pytest.org/latest/example/parametrize.html#a-quick-port-of-testscenarios
Picks up a ``scenarios`` class variable to parametrize all test function calls.
"""
idlist = []
argvalues = []
for scenario in metafunc.cls.scenarios:
idlist.append(scenario[0])
items = scenario[1].items()
argnames = [x[0] for x in items]
argvalues.append(([x[1] for x in items]))
metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
| apache-2.0 |
mcrowson/django | tests/generic_relations/tests.py | 35 | 28530 | from __future__ import unicode_literals
from django import forms
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import IntegrityError
from django.db.models import Q
from django.test import SimpleTestCase, TestCase
from django.utils import six
from .models import (
AllowsNullGFK, Animal, Comparison, ConcreteRelatedModel,
ForConcreteModelModel, ForProxyModelModel, Gecko, ManualPK, Mineral,
ProxyRelatedModel, Rock, TaggedItem, ValuableTaggedItem, Vegetable,
)
class GenericRelationsTests(TestCase):
def setUp(self):
self.lion = Animal.objects.create(
common_name="Lion", latin_name="Panthera leo")
self.platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus")
Vegetable.objects.create(name="Eggplant", is_yucky=True)
self.bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
self.quartz = Mineral.objects.create(name="Quartz", hardness=7)
# Tagging stuff.
self.bacon.tags.create(tag="fatty")
self.bacon.tags.create(tag="salty")
self.lion.tags.create(tag="yellow")
self.lion.tags.create(tag="hairy")
# Original list of tags:
self.comp_func = lambda obj: (
obj.tag, obj.content_type.model_class(), obj.object_id
)
def test_generic_update_or_create_when_created(self):
"""
Should be able to use update_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.update_or_create(tag='stinky')
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_update_or_create_when_updated(self):
"""
Should be able to use update_or_create from the generic related manager
to update a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag='stinky')
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.update_or_create(defaults={'tag': 'juicy'}, id=tag.id)
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
self.assertEqual(tag.tag, 'juicy')
def test_generic_get_or_create_when_created(self):
"""
Should be able to use get_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.get_or_create(tag='stinky')
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_get_or_create_when_exists(self):
"""
Should be able to use get_or_create from the generic related manager
to get a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag="stinky")
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.get_or_create(id=tag.id, defaults={'tag': 'juicy'})
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
# shouldn't had changed the tag
self.assertEqual(tag.tag, 'stinky')
def test_generic_relations_m2m_mimic(self):
"""
Objects with declared GenericRelations can be tagged directly -- the
API mimics the many-to-many API.
"""
self.assertQuerysetEqual(self.lion.tags.all(), [
"<TaggedItem: hairy>",
"<TaggedItem: yellow>"
])
self.assertQuerysetEqual(self.bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>"
])
def test_access_content_object(self):
"""
Test accessing the content object like a foreign key.
"""
tagged_item = TaggedItem.objects.get(tag="salty")
self.assertEqual(tagged_item.content_object, self.bacon)
def test_query_content_object(self):
qs = TaggedItem.objects.filter(
animal__isnull=False).order_by('animal__common_name', 'tag')
self.assertQuerysetEqual(
qs, ["<TaggedItem: hairy>", "<TaggedItem: yellow>"]
)
mpk = ManualPK.objects.create(id=1)
mpk.tags.create(tag='mpk')
qs = TaggedItem.objects.filter(
Q(animal__isnull=False) | Q(manualpk__id=1)).order_by('tag')
self.assertQuerysetEqual(
qs, ["hairy", "mpk", "yellow"], lambda x: x.tag)
def test_exclude_generic_relations(self):
"""
Test lookups over an object without GenericRelations.
"""
# Recall that the Mineral class doesn't have an explicit GenericRelation
# defined. That's OK, because you can create TaggedItems explicitly.
# However, excluding GenericRelations means your lookups have to be a
# bit more explicit.
TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
ctype = ContentType.objects.get_for_model(self.quartz)
q = TaggedItem.objects.filter(
content_type__pk=ctype.id, object_id=self.quartz.id
)
self.assertQuerysetEqual(q, [
"<TaggedItem: clearish>",
"<TaggedItem: shiny>"
])
def test_access_via_content_type(self):
"""
Test lookups through content type.
"""
self.lion.delete()
self.platypus.tags.create(tag="fatty")
ctype = ContentType.objects.get_for_model(self.platypus)
self.assertQuerysetEqual(
Animal.objects.filter(tags__content_type=ctype),
["<Animal: Platypus>"])
def test_set_foreign_key(self):
"""
You can set a generic foreign key in the way you'd expect.
"""
tag1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
tag1.content_object = self.platypus
tag1.save()
self.assertQuerysetEqual(
self.platypus.tags.all(),
["<TaggedItem: shiny>"])
def test_queries_across_generic_relations(self):
"""
Queries across generic relations respect the content types. Even though
there are two TaggedItems with a tag of "fatty", this query only pulls
out the one with the content type related to Animals.
"""
self.assertQuerysetEqual(Animal.objects.order_by('common_name'), [
"<Animal: Lion>",
"<Animal: Platypus>"
])
def test_queries_content_type_restriction(self):
"""
Create another fatty tagged instance with different PK to ensure there
is a content type restriction in the generated queries below.
"""
mpk = ManualPK.objects.create(id=self.lion.pk)
mpk.tags.create(tag="fatty")
self.platypus.tags.create(tag="fatty")
self.assertQuerysetEqual(
Animal.objects.filter(tags__tag='fatty'), ["<Animal: Platypus>"])
self.assertQuerysetEqual(
Animal.objects.exclude(tags__tag='fatty'), ["<Animal: Lion>"])
def test_object_deletion_with_generic_relation(self):
"""
If you delete an object with an explicit Generic relation, the related
objects are deleted when the source object is deleted.
"""
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('hairy', Animal, self.lion.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk)
],
self.comp_func
)
self.lion.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('salty', Vegetable, self.bacon.pk),
],
self.comp_func
)
def test_object_deletion_without_generic_relation(self):
"""
If Generic Relation is not explicitly defined, any related objects
remain after deletion of the source object.
"""
TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
quartz_pk = self.quartz.pk
self.quartz.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz_pk),
('fatty', Vegetable, self.bacon.pk),
('hairy', Animal, self.lion.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk),
],
self.comp_func
)
def test_tag_deletion_related_objects_unaffected(self):
"""
If you delete a tag, the objects using the tag are unaffected (other
than losing a tag).
"""
ctype = ContentType.objects.get_for_model(self.lion)
tag = TaggedItem.objects.get(
content_type__pk=ctype.id, object_id=self.lion.id, tag="hairy")
tag.delete()
self.assertQuerysetEqual(self.lion.tags.all(), ["<TaggedItem: yellow>"])
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk)
],
self.comp_func
)
def test_add_bulk(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
t1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
t2 = TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
# One update() query.
with self.assertNumQueries(1):
bacon.tags.add(t1, t2)
self.assertEqual(t1.content_object, bacon)
self.assertEqual(t2.content_object, bacon)
def test_add_bulk_false(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
t1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
t2 = TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
# One save() for each object.
with self.assertNumQueries(2):
bacon.tags.add(t1, t2, bulk=False)
self.assertEqual(t1.content_object, bacon)
self.assertEqual(t2.content_object, bacon)
def test_add_rejects_unsaved_objects(self):
t1 = TaggedItem(content_object=self.quartz, tag="shiny")
msg = "<TaggedItem: shiny> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.bacon.tags.add(t1)
def test_set(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
fatty = bacon.tags.create(tag="fatty")
salty = bacon.tags.create(tag="salty")
bacon.tags.set([fatty, salty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags.set([fatty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags.set([])
self.assertQuerysetEqual(bacon.tags.all(), [])
bacon.tags.set([fatty, salty], bulk=False, clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags.set([fatty], bulk=False, clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags.set([], clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [])
def test_assign(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
fatty = bacon.tags.create(tag="fatty")
salty = bacon.tags.create(tag="salty")
bacon.tags = [fatty, salty]
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags = [fatty]
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags = []
self.assertQuerysetEqual(bacon.tags.all(), [])
def test_assign_with_queryset(self):
# Ensure that querysets used in reverse GFK assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ManyRelatedObjectsDescriptor.__set__. Refs #19816.
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
bacon.tags.create(tag="fatty")
bacon.tags.create(tag="salty")
self.assertEqual(2, bacon.tags.count())
qs = bacon.tags.filter(tag="fatty")
bacon.tags = qs
self.assertEqual(1, bacon.tags.count())
self.assertEqual(1, qs.count())
def test_generic_relation_related_name_default(self):
# Test that GenericRelation by default isn't usable from
# the reverse side.
with self.assertRaises(FieldError):
TaggedItem.objects.filter(vegetable__isnull=True)
def test_multiple_gfk(self):
# Simple tests for multiple GenericForeignKeys
# only uses one model, since the above tests should be sufficient.
tiger = Animal.objects.create(common_name="tiger")
cheetah = Animal.objects.create(common_name="cheetah")
bear = Animal.objects.create(common_name="bear")
# Create directly
Comparison.objects.create(
first_obj=cheetah, other_obj=tiger, comparative="faster"
)
Comparison.objects.create(
first_obj=tiger, other_obj=cheetah, comparative="cooler"
)
# Create using GenericRelation
tiger.comparisons.create(other_obj=bear, comparative="cooler")
tiger.comparisons.create(other_obj=cheetah, comparative="stronger")
self.assertQuerysetEqual(cheetah.comparisons.all(), [
"<Comparison: cheetah is faster than tiger>"
])
# Filtering works
self.assertQuerysetEqual(tiger.comparisons.filter(comparative="cooler"), [
"<Comparison: tiger is cooler than cheetah>",
"<Comparison: tiger is cooler than bear>",
], ordered=False)
# Filtering and deleting works
subjective = ["cooler"]
tiger.comparisons.filter(comparative__in=subjective).delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: cheetah is faster than tiger>",
"<Comparison: tiger is stronger than cheetah>"
], ordered=False)
# If we delete cheetah, Comparisons with cheetah as 'first_obj' will be
# deleted since Animal has an explicit GenericRelation to Comparison
# through first_obj. Comparisons with cheetah as 'other_obj' will not
# be deleted.
cheetah.delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: tiger is stronger than None>"
])
def test_gfk_subclasses(self):
# GenericForeignKey should work with subclasses (see #8309)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
valuedtag = ValuableTaggedItem.objects.create(
content_object=quartz, tag="shiny", value=10
)
self.assertEqual(valuedtag.content_object, quartz)
def test_generic_inline_formsets(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet()
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" />
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>"""
)
formset = GenericFormSet(instance=Animal())
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag"
type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden"
name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>"""
)
platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus"
)
platypus.tags.create(tag="shiny")
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet(instance=platypus)
tagged_item_id = TaggedItem.objects.get(
tag='shiny', object_id=platypus.id
).id
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag" value="shiny" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" />
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id"
value="%s" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-tag">Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-1-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-1-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-1-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE" />
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-1-id"
id="id_generic_relations-taggeditem-content_type-object_id-1-id" /></p>""" % tagged_item_id
)
lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
formset = GenericFormSet(instance=lion, prefix='x')
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_x-0-tag">Tag:</label>
<input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50" /></p>
<p><label for="id_x-0-DELETE">Delete:</label> <input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE" />
<input type="hidden" name="x-0-id" id="id_x-0-id" /></p>"""
)
def test_gfk_manager(self):
# GenericForeignKey should not use the default manager (which may filter objects) #16048
tailless = Gecko.objects.create(has_tail=False)
tag = TaggedItem.objects.create(content_object=tailless, tag="lizard")
self.assertEqual(tag.content_object, tailless)
def test_subclasses_with_gen_rel(self):
"""
Test that concrete model subclasses with generic relations work
correctly (ticket 11263).
"""
granite = Rock.objects.create(name='granite', hardness=5)
TaggedItem.objects.create(content_object=granite, tag="countertop")
self.assertEqual(Rock.objects.filter(tags__tag="countertop").count(), 1)
def test_generic_inline_formsets_initial(self):
"""
Test for #17927 Initial values support for BaseGenericInlineFormSet.
"""
quartz = Mineral.objects.create(name="Quartz", hardness=7)
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
ctype = ContentType.objects.get_for_model(quartz)
initial_data = [{
'tag': 'lizard',
'content_type': ctype.pk,
'object_id': quartz.pk,
}]
formset = GenericFormSet(initial=initial_data)
self.assertEqual(formset.forms[0].initial, initial_data[0])
def test_get_or_create(self):
# get_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
tag, created = TaggedItem.objects.get_or_create(tag="shiny",
defaults={'content_object': quartz})
self.assertTrue(created)
self.assertEqual(tag.tag, "shiny")
self.assertEqual(tag.content_object.id, quartz.id)
def test_update_or_create_defaults(self):
# update_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
diamond = Mineral.objects.create(name="Diamond", hardness=7)
tag, created = TaggedItem.objects.update_or_create(tag="shiny",
defaults={'content_object': quartz})
self.assertTrue(created)
self.assertEqual(tag.content_object.id, quartz.id)
tag, created = TaggedItem.objects.update_or_create(tag="shiny",
defaults={'content_object': diamond})
self.assertFalse(created)
self.assertEqual(tag.content_object.id, diamond.id)
def test_query_content_type(self):
msg = "Field 'content_object' does not generate an automatic reverse relation"
with self.assertRaisesMessage(FieldError, msg):
TaggedItem.objects.get(content_object='')
def test_unsaved_instance_on_generic_foreign_key(self):
"""
Assigning an unsaved object to GenericForeignKey should raise an
exception on model.save().
"""
quartz = Mineral(name="Quartz", hardness=7)
with self.assertRaises(IntegrityError):
TaggedItem.objects.create(tag="shiny", content_object=quartz)
class CustomWidget(forms.TextInput):
pass
class TaggedItemForm(forms.ModelForm):
class Meta:
model = TaggedItem
fields = '__all__'
widgets = {'tag': CustomWidget}
class GenericInlineFormsetTest(TestCase):
def test_generic_inlineformset_factory(self):
"""
Regression for #14572: Using base forms with widgets
defined in Meta should not raise errors.
"""
Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm)
form = Formset().forms[0]
self.assertIsInstance(form['tag'].field.widget, CustomWidget)
def test_save_new_uses_form_save(self):
"""
Regression for #16260: save_new should call form.save()
"""
class SaveTestForm(forms.ModelForm):
def save(self, *args, **kwargs):
self.instance.saved_by = "custom method"
return super(SaveTestForm, self).save(*args, **kwargs)
Formset = generic_inlineformset_factory(
ForProxyModelModel, fields='__all__', form=SaveTestForm)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj = formset.save()[0]
self.assertEqual(new_obj.saved_by, "custom method")
def test_save_new_for_proxy(self):
Formset = generic_inlineformset_factory(ForProxyModelModel,
fields='__all__', for_concrete_model=False)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertEqual(new_obj.obj, instance)
def test_save_new_for_concrete(self):
Formset = generic_inlineformset_factory(ForProxyModelModel,
fields='__all__', for_concrete_model=True)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel)
class ProxyRelatedModelTest(TestCase):
def test_default_behavior(self):
"""
The default for for_concrete_model should be True
"""
base = ForConcreteModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
base = ForConcreteModelModel.objects.get(pk=base.pk)
rel = ConcreteRelatedModel.objects.get(pk=rel.pk)
self.assertEqual(base.obj, rel)
def test_works_normally(self):
"""
When for_concrete_model is False, we should still be able to get
an instance of the concrete class.
"""
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertEqual(base.obj, rel)
def test_proxy_is_returned(self):
"""
Instances of the proxy should be returned when
for_concrete_model is False.
"""
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertIsInstance(base.obj, ProxyRelatedModel)
def test_query(self):
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ConcreteRelatedModel.objects.get(bases__id=base.id))
def test_query_proxy(self):
base = ForProxyModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ProxyRelatedModel.objects.get(bases__id=base.id))
def test_generic_relation(self):
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
rel = ProxyRelatedModel.objects.get(pk=base.obj.pk)
self.assertEqual(base, rel.bases.get())
def test_generic_relation_set(self):
base = ForProxyModelModel()
base.obj = ConcreteRelatedModel.objects.create()
base.save()
newrel = ConcreteRelatedModel.objects.create()
newrel.bases = [base]
newrel = ConcreteRelatedModel.objects.get(pk=newrel.pk)
self.assertEqual(base, newrel.bases.get())
class TestInitWithNoneArgument(SimpleTestCase):
def test_none_not_allowed(self):
# TaggedItem requires a content_type, initializing with None should
# raise a ValueError.
with six.assertRaisesRegex(self, ValueError,
'Cannot assign None: "TaggedItem.content_type" does not allow null values'):
TaggedItem(content_object=None)
def test_none_allowed(self):
# AllowsNullGFK doesn't require a content_type, so None argument should
# also be allowed.
AllowsNullGFK(content_object=None)
| bsd-3-clause |
rlindner81/pyload | module/PluginThread.py | 40 | 21373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: RaNaN
"""
from Queue import Queue
from threading import Thread
from os import listdir, stat
from os.path import join
from time import sleep, time, strftime, gmtime
from traceback import print_exc, format_exc
from pprint import pformat
from sys import exc_info, exc_clear
from copy import copy
from types import MethodType
from pycurl import error
from PyFile import PyFile
from plugins.Plugin import Abort, Fail, Reconnect, Retry, SkipDownload
from common.packagetools import parseNames
from utils import save_join
from Api import OnlineStatus
class PluginThread(Thread):
"""abstract base class for thread types"""
#----------------------------------------------------------------------
def __init__(self, manager):
"""Constructor"""
Thread.__init__(self)
self.setDaemon(True)
self.m = manager #thread manager
def writeDebugReport(self, pyfile):
""" writes a
:return:
"""
dump_name = "debug_%s_%s.zip" % (pyfile.pluginname, strftime("%d-%m-%Y_%H-%M-%S"))
dump = self.getDebugDump(pyfile)
try:
import zipfile
zip = zipfile.ZipFile(dump_name, "w")
for f in listdir(join("tmp", pyfile.pluginname)):
try:
# avoid encoding errors
zip.write(join("tmp", pyfile.pluginname, f), save_join(pyfile.pluginname, f))
except:
pass
info = zipfile.ZipInfo(save_join(pyfile.pluginname, "debug_Report.txt"), gmtime())
info.external_attr = 0644 << 16L # change permissions
zip.writestr(info, dump)
zip.close()
if not stat(dump_name).st_size:
raise Exception("Empty Zipfile")
except Exception, e:
self.m.log.debug("Error creating zip file: %s" % e)
dump_name = dump_name.replace(".zip", ".txt")
f = open(dump_name, "wb")
f.write(dump)
f.close()
self.m.core.log.info("Debug Report written to %s" % dump_name)
def getDebugDump(self, pyfile):
dump = "pyLoad %s Debug Report of %s %s \n\nTRACEBACK:\n %s \n\nFRAMESTACK:\n" % (
self.m.core.api.getServerVersion(), pyfile.pluginname, pyfile.plugin.__version__, format_exc())
tb = exc_info()[2]
stack = []
while tb:
stack.append(tb.tb_frame)
tb = tb.tb_next
for frame in stack[1:]:
dump += "\nFrame %s in %s at line %s\n" % (frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno)
for key, value in frame.f_locals.items():
dump += "\t%20s = " % key
try:
dump += pformat(value) + "\n"
except Exception, e:
dump += "<ERROR WHILE PRINTING VALUE> " + str(e) + "\n"
del frame
del stack #delete it just to be sure...
dump += "\n\nPLUGIN OBJECT DUMP: \n\n"
for name in dir(pyfile.plugin):
attr = getattr(pyfile.plugin, name)
if not name.endswith("__") and type(attr) != MethodType:
dump += "\t%20s = " % name
try:
dump += pformat(attr) + "\n"
except Exception, e:
dump += "<ERROR WHILE PRINTING VALUE> " + str(e) + "\n"
dump += "\nPYFILE OBJECT DUMP: \n\n"
for name in dir(pyfile):
attr = getattr(pyfile, name)
if not name.endswith("__") and type(attr) != MethodType:
dump += "\t%20s = " % name
try:
dump += pformat(attr) + "\n"
except Exception, e:
dump += "<ERROR WHILE PRINTING VALUE> " + str(e) + "\n"
if pyfile.pluginname in self.m.core.config.plugin:
dump += "\n\nCONFIG: \n\n"
dump += pformat(self.m.core.config.plugin[pyfile.pluginname]) + "\n"
return dump
def clean(self, pyfile):
""" set thread unactive and release pyfile """
self.active = False
pyfile.release()
class DownloadThread(PluginThread):
"""thread for downloading files from 'real' hoster plugins"""
#----------------------------------------------------------------------
def __init__(self, manager):
"""Constructor"""
PluginThread.__init__(self, manager)
self.queue = Queue() # job queue
self.active = False
self.start()
#----------------------------------------------------------------------
def run(self):
"""run method"""
pyfile = None
while True:
del pyfile
self.active = self.queue.get()
pyfile = self.active
if self.active == "quit":
self.active = False
self.m.threads.remove(self)
return True
try:
if not pyfile.hasPlugin(): continue
#this pyfile was deleted while queueing
pyfile.plugin.checkForSameFiles(starting=True)
self.m.log.info(_("Download starts: %s" % pyfile.name))
# start download
self.m.core.hookManager.downloadPreparing(pyfile)
pyfile.plugin.preprocessing(self)
self.m.log.info(_("Download finished: %s") % pyfile.name)
self.m.core.hookManager.downloadFinished(pyfile)
self.m.core.files.checkPackageFinished(pyfile)
except NotImplementedError:
self.m.log.error(_("Plugin %s is missing a function.") % pyfile.pluginname)
pyfile.setStatus("failed")
pyfile.error = "Plugin does not work"
self.clean(pyfile)
continue
except Abort:
try:
self.m.log.info(_("Download aborted: %s") % pyfile.name)
except:
pass
pyfile.setStatus("aborted")
self.clean(pyfile)
continue
except Reconnect:
self.queue.put(pyfile)
#pyfile.req.clearCookies()
while self.m.reconnecting.isSet():
sleep(0.5)
continue
except Retry, e:
reason = e.args[0]
self.m.log.info(_("Download restarted: %(name)s | %(msg)s") % {"name": pyfile.name, "msg": reason})
self.queue.put(pyfile)
continue
except Fail, e:
msg = e.args[0]
if msg == "offline":
pyfile.setStatus("offline")
self.m.log.warning(_("Download is offline: %s") % pyfile.name)
elif msg == "temp. offline":
pyfile.setStatus("temp. offline")
self.m.log.warning(_("Download is temporary offline: %s") % pyfile.name)
else:
pyfile.setStatus("failed")
self.m.log.warning(_("Download failed: %(name)s | %(msg)s") % {"name": pyfile.name, "msg": msg})
pyfile.error = msg
self.m.core.hookManager.downloadFailed(pyfile)
self.clean(pyfile)
continue
except error, e:
if len(e.args) == 2:
code, msg = e.args
else:
code = 0
msg = e.args
self.m.log.debug("pycurl exception %s: %s" % (code, msg))
if code in (7, 18, 28, 52, 56):
self.m.log.warning(_("Couldn't connect to host or connection reset, waiting 1 minute and retry."))
wait = time() + 60
pyfile.waitUntil = wait
pyfile.setStatus("waiting")
while time() < wait:
sleep(1)
if pyfile.abort:
break
if pyfile.abort:
self.m.log.info(_("Download aborted: %s") % pyfile.name)
pyfile.setStatus("aborted")
self.clean(pyfile)
else:
self.queue.put(pyfile)
continue
else:
pyfile.setStatus("failed")
self.m.log.error("pycurl error %s: %s" % (code, msg))
if self.m.core.debug:
print_exc()
self.writeDebugReport(pyfile)
self.m.core.hookManager.downloadFailed(pyfile)
self.clean(pyfile)
continue
except SkipDownload, e:
pyfile.setStatus("skipped")
self.m.log.info(
_("Download skipped: %(name)s due to %(plugin)s") % {"name": pyfile.name, "plugin": e.message})
self.clean(pyfile)
self.m.core.files.checkPackageFinished(pyfile)
self.active = False
self.m.core.files.save()
continue
except Exception, e:
pyfile.setStatus("failed")
self.m.log.warning(_("Download failed: %(name)s | %(msg)s") % {"name": pyfile.name, "msg": str(e)})
pyfile.error = str(e)
if self.m.core.debug:
print_exc()
self.writeDebugReport(pyfile)
self.m.core.hookManager.downloadFailed(pyfile)
self.clean(pyfile)
continue
finally:
self.m.core.files.save()
pyfile.checkIfProcessed()
exc_clear()
#pyfile.plugin.req.clean()
self.active = False
pyfile.finishIfDone()
self.m.core.files.save()
def put(self, job):
"""assing job to thread"""
self.queue.put(job)
def stop(self):
"""stops the thread"""
self.put("quit")
class DecrypterThread(PluginThread):
"""thread for decrypting"""
def __init__(self, manager, pyfile):
"""constructor"""
PluginThread.__init__(self, manager)
self.active = pyfile
manager.localThreads.append(self)
pyfile.setStatus("decrypting")
self.start()
def getActiveFiles(self):
return [self.active]
def run(self):
"""run method"""
pyfile = self.active
retry = False
try:
self.m.log.info(_("Decrypting starts: %s") % self.active.name)
self.active.plugin.preprocessing(self)
except NotImplementedError:
self.m.log.error(_("Plugin %s is missing a function.") % self.active.pluginname)
return
except Fail, e:
msg = e.args[0]
if msg == "offline":
self.active.setStatus("offline")
self.m.log.warning(_("Download is offline: %s") % self.active.name)
else:
self.active.setStatus("failed")
self.m.log.error(_("Decrypting failed: %(name)s | %(msg)s") % {"name": self.active.name, "msg": msg})
self.active.error = msg
return
except Abort:
self.m.log.info(_("Download aborted: %s") % pyfile.name)
pyfile.setStatus("aborted")
return
except Retry:
self.m.log.info(_("Retrying %s") % self.active.name)
retry = True
return self.run()
except Exception, e:
self.active.setStatus("failed")
self.m.log.error(_("Decrypting failed: %(name)s | %(msg)s") % {"name": self.active.name, "msg": str(e)})
self.active.error = str(e)
if self.m.core.debug:
print_exc()
self.writeDebugReport(pyfile)
return
finally:
if not retry:
self.active.release()
self.active = False
self.m.core.files.save()
self.m.localThreads.remove(self)
exc_clear()
#self.m.core.hookManager.downloadFinished(pyfile)
#self.m.localThreads.remove(self)
#self.active.finishIfDone()
if not retry:
pyfile.delete()
class HookThread(PluginThread):
"""thread for hooks"""
#----------------------------------------------------------------------
def __init__(self, m, function, args, kwargs):
"""Constructor"""
PluginThread.__init__(self, m)
self.f = function
self.args = args
self.kwargs = kwargs
self.active = []
m.localThreads.append(self)
self.start()
def getActiveFiles(self):
return self.active
def addActive(self, pyfile):
""" Adds a pyfile to active list and thus will be displayed on overview"""
if pyfile not in self.active:
self.active.append(pyfile)
def finishFile(self, pyfile):
if pyfile in self.active:
self.active.remove(pyfile)
pyfile.finishIfDone()
def run(self):
try:
try:
self.kwargs["thread"] = self
self.f(*self.args, **self.kwargs)
except TypeError, e:
#dirty method to filter out exceptions
if "unexpected keyword argument 'thread'" not in e.args[0]:
raise
del self.kwargs["thread"]
self.f(*self.args, **self.kwargs)
finally:
local = copy(self.active)
for x in local:
self.finishFile(x)
self.m.localThreads.remove(self)
class InfoThread(PluginThread):
def __init__(self, manager, data, pid=-1, rid=-1, add=False):
"""Constructor"""
PluginThread.__init__(self, manager)
self.data = data
self.pid = pid # package id
# [ .. (name, plugin) .. ]
self.rid = rid #result id
self.add = add #add packages instead of return result
self.cache = [] #accumulated data
self.start()
def run(self):
"""run method"""
plugins = {}
container = []
for url, plugin in self.data:
if plugin in plugins:
plugins[plugin].append(url)
else:
plugins[plugin] = [url]
# filter out container plugins
for name in self.m.core.pluginManager.containerPlugins:
if name in plugins:
container.extend([(name, url) for url in plugins[name]])
del plugins[name]
#directly write to database
if self.pid > -1:
for pluginname, urls in plugins.iteritems():
plugin = self.m.core.pluginManager.getPlugin(pluginname, True)
if hasattr(plugin, "getInfo"):
self.fetchForPlugin(pluginname, plugin, urls, self.updateDB)
self.m.core.files.save()
elif self.add:
for pluginname, urls in plugins.iteritems():
plugin = self.m.core.pluginManager.getPlugin(pluginname, True)
if hasattr(plugin, "getInfo"):
self.fetchForPlugin(pluginname, plugin, urls, self.updateCache, True)
else:
#generate default result
result = [(url, 0, 3, url) for url in urls]
self.updateCache(pluginname, result)
packs = parseNames([(name, url) for name, x, y, url in self.cache])
self.m.log.debug("Fetched and generated %d packages" % len(packs))
for k, v in packs:
self.m.core.api.addPackage(k, v)
#empty cache
del self.cache[:]
else: #post the results
for name, url in container:
#attach container content
try:
data = self.decryptContainer(name, url)
except:
print_exc()
self.m.log.error("Could not decrypt container.")
data = []
for url, plugin in data:
if plugin in plugins:
plugins[plugin].append(url)
else:
plugins[plugin] = [url]
self.m.infoResults[self.rid] = {}
for pluginname, urls in plugins.iteritems():
plugin = self.m.core.pluginManager.getPlugin(pluginname, True)
if hasattr(plugin, "getInfo"):
self.fetchForPlugin(pluginname, plugin, urls, self.updateResult, True)
#force to process cache
if self.cache:
self.updateResult(pluginname, [], True)
else:
#generate default result
result = [(url, 0, 3, url) for url in urls]
self.updateResult(pluginname, result, True)
self.m.infoResults[self.rid]["ALL_INFO_FETCHED"] = {}
self.m.timestamp = time() + 5 * 60
def updateDB(self, plugin, result):
self.m.core.files.updateFileInfo(result, self.pid)
def updateResult(self, plugin, result, force=False):
#parse package name and generate result
#accumulate results
self.cache.extend(result)
if len(self.cache) >= 20 or force:
#used for package generating
tmp = [(name, (url, OnlineStatus(name, plugin, "unknown", status, int(size))))
for name, size, status, url in self.cache]
data = parseNames(tmp)
result = {}
for k, v in data.iteritems():
for url, status in v:
status.packagename = k
result[url] = status
self.m.setInfoResults(self.rid, result)
self.cache = []
def updateCache(self, plugin, result):
self.cache.extend(result)
def fetchForPlugin(self, pluginname, plugin, urls, cb, err=None):
try:
result = [] #result loaded from cache
process = [] #urls to process
for url in urls:
if url in self.m.infoCache:
result.append(self.m.infoCache[url])
else:
process.append(url)
if result:
self.m.log.debug("Fetched %d values from cache for %s" % (len(result), pluginname))
cb(pluginname, result)
if process:
self.m.log.debug("Run Info Fetching for %s" % pluginname)
for result in plugin.getInfo(process):
#result = [ .. (name, size, status, url) .. ]
if not type(result) == list: result = [result]
for res in result:
self.m.infoCache[res[3]] = res
cb(pluginname, result)
self.m.log.debug("Finished Info Fetching for %s" % pluginname)
except Exception, e:
self.m.log.warning(_("Info Fetching for %(name)s failed | %(err)s") %
{"name": pluginname, "err": str(e)})
if self.m.core.debug:
print_exc()
# generate default results
if err:
result = [(url, 0, 3, url) for url in urls]
cb(pluginname, result)
def decryptContainer(self, plugin, url):
data = []
# only works on container plugins
self.m.log.debug("Pre decrypting %s with %s" % (url, plugin))
# dummy pyfile
pyfile = PyFile(self.m.core.files, -1, url, url, 0, 0, "", plugin, -1, -1)
pyfile.initPlugin()
# little plugin lifecycle
try:
pyfile.plugin.setup()
pyfile.plugin.loadToDisk()
pyfile.plugin.decrypt(pyfile)
pyfile.plugin.deleteTmp()
for pack in pyfile.plugin.packages:
pyfile.plugin.urls.extend(pack[1])
data = self.m.core.pluginManager.parseUrls(pyfile.plugin.urls)
self.m.log.debug("Got %d links." % len(data))
except Exception, e:
self.m.log.debug("Pre decrypting error: %s" % str(e))
finally:
pyfile.release()
return data
| gpl-3.0 |
afloren/nipype | nipype/interfaces/nitime/analysis.py | 14 | 10004 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Interfaces to functionality from nitime for time-series analysis of fmri data
- nitime.analysis.CoherenceAnalyzer: Coherence/y
- nitime.fmri.io:
- nitime.viz.drawmatrix_channels
"""
import warnings
import numpy as np
import tempfile
from ...utils.misc import package_check
from ..base import (TraitedSpec, File, Undefined, traits,
BaseInterface, isdefined, BaseInterfaceInputSpec)
from ...utils.filemanip import fname_presuffix
have_nitime = True
try:
package_check('nitime')
except Exception, e:
have_nitime = False
else:
import nitime.analysis as nta
from nitime.timeseries import TimeSeries
import nitime.viz as viz
class CoherenceAnalyzerInputSpec(BaseInterfaceInputSpec):
#Input either csv file, or time-series object and use _xor_inputs to
#discriminate
_xor_inputs = ('in_file', 'in_TS')
in_file = File(desc=('csv file with ROIs on the columns and '
'time-points on the rows. ROI names at the top row'),
exists=True,
requires=('TR',))
#If you gave just a file name, you need to specify the sampling_rate:
TR = traits.Float(desc=('The TR used to collect the data'
'in your csv file <in_file>'))
in_TS = traits.Any(desc='a nitime TimeSeries object')
NFFT = traits.Range(low=32, value=64, usedefault=True,
desc=('This is the size of the window used for '
'the spectral estimation. Use values between '
'32 and the number of samples in your time-series.'
'(Defaults to 64.)'))
n_overlap = traits.Range(low=0, value=0, usedefault=True,
desc=('The number of samples which overlap'
'between subsequent windows.(Defaults to 0)'))
frequency_range = traits.List(value=[0.02, 0.15], usedefault=True,
minlen=2,
maxlen=2,
desc=('The range of frequencies over'
'which the analysis will average.'
'[low,high] (Default [0.02,0.15]'))
output_csv_file = File(desc='File to write outputs (coherence,time-delay) with file-names: file_name_ {coherence,timedelay}')
output_figure_file = File(desc='File to write output figures (coherence,time-delay) with file-names: file_name_{coherence,timedelay}. Possible formats: .png,.svg,.pdf,.jpg,...')
figure_type = traits.Enum('matrix', 'network', usedefault=True,
desc=("The type of plot to generate, where "
"'matrix' denotes a matrix image and"
"'network' denotes a graph representation."
" Default: 'matrix'"))
class CoherenceAnalyzerOutputSpec(TraitedSpec):
coherence_array = traits.Array(desc=('The pairwise coherence values'
'between the ROIs'))
timedelay_array = traits.Array(desc=('The pairwise time delays between the'
'ROIs (in seconds)'))
coherence_csv = File(desc=('A csv file containing the pairwise '
'coherence values'))
timedelay_csv = File(desc=('A csv file containing the pairwise '
'time delay values'))
coherence_fig = File(desc=('Figure representing coherence values'))
timedelay_fig = File(desc=('Figure representing coherence values'))
class CoherenceAnalyzer(BaseInterface):
input_spec = CoherenceAnalyzerInputSpec
output_spec = CoherenceAnalyzerOutputSpec
def _read_csv(self):
"""
Read from csv in_file and return an array and ROI names
The input file should have a first row containing the names of the
ROIs (strings)
the rest of the data will be read in and transposed so that the rows
(TRs) will becomes the second (and last) dimension of the array
"""
#Check that input conforms to expectations:
first_row = open(self.inputs.in_file).readline()
if not first_row[1].isalpha():
raise ValueError("First row of in_file should contain ROI names as strings of characters")
roi_names = open(self.inputs.in_file).readline().replace('\"', '').strip('\n').split(',')
#Transpose, so that the time is the last dimension:
data = np.loadtxt(self.inputs.in_file, skiprows=1, delimiter=',').T
return data, roi_names
def _csv2ts(self):
""" Read data from the in_file and generate a nitime TimeSeries object"""
data, roi_names = self._read_csv()
TS = TimeSeries(data=data,
sampling_interval=self.inputs.TR,
time_unit='s')
TS.metadata = dict(ROIs=roi_names)
return TS
#Rewrite _run_interface, but not run
def _run_interface(self, runtime):
lb, ub = self.inputs.frequency_range
if self.inputs.in_TS is Undefined:
# get TS form csv and inputs.TR
TS = self._csv2ts()
else:
# get TS from inputs.in_TS
TS = self.inputs.in_TS
# deal with creating or storing ROI names:
if not TS.metadata.has_key('ROIs'):
self.ROIs = ['roi_%d' % x for x, _ in enumerate(TS.data)]
else:
self.ROIs = TS.metadata['ROIs']
A = nta.CoherenceAnalyzer(TS,
method=dict(this_method='welch',
NFFT=self.inputs.NFFT,
n_overlap=self.inputs.n_overlap))
freq_idx = np.where((A.frequencies > self.inputs.frequency_range[0]) *
(A.frequencies < self.inputs.frequency_range[1]))[0]
#Get the coherence matrix from the analyzer, averaging on the last
#(frequency) dimension: (roi X roi array)
self.coherence = np.mean(A.coherence[:, :, freq_idx], -1)
# Get the time delay from analyzer, (roi X roi array)
self.delay = np.mean(A.delay[:, :, freq_idx], -1)
return runtime
#Rewrite _list_outputs (look at BET)
def _list_outputs(self):
outputs = self.output_spec().get()
#if isdefined(self.inputs.output_csv_file):
#write to a csv file and assign a value to self.coherence_file (a
#file name + path)
#Always defined (the arrays):
outputs['coherence_array'] = self.coherence
outputs['timedelay_array'] = self.delay
#Conditional
if isdefined(self.inputs.output_csv_file) and hasattr(self, 'coherence'):
# we need to make a function that we call here that writes the
# coherence values to this file "coherence_csv" and makes the
# time_delay csv file??
self._make_output_files()
outputs['coherence_csv'] = fname_presuffix(self.inputs.output_csv_file, suffix='_coherence')
outputs['timedelay_csv'] = fname_presuffix(self.inputs.output_csv_file, suffix='_delay')
if isdefined(self.inputs.output_figure_file) and hasattr(self,
'coherence'):
self._make_output_figures()
outputs['coherence_fig'] = fname_presuffix(self.inputs.output_figure_file, suffix='_coherence')
outputs['timedelay_fig'] = fname_presuffix(self.inputs.output_figure_file, suffix='_delay')
return outputs
def _make_output_files(self):
"""
Generate the output csv files.
"""
for this in zip([self.coherence, self.delay], ['coherence', 'delay']):
tmp_f = tempfile.mkstemp()[1]
np.savetxt(tmp_f, this[0], delimiter=',')
fid = open(fname_presuffix(self.inputs.output_csv_file,
suffix='_%s' % this[1]), 'w+')
# this writes ROIs as header line
fid.write(',' + ','.join(self.ROIs) + '\n')
# this writes ROI and data to a line
for r, line in zip(self.ROIs, open(tmp_f)):
fid.write('%s,%s' % (r, line))
fid.close()
def _make_output_figures(self):
"""
Generate the desired figure and save the files according to
self.inputs.output_figure_file
"""
if self.inputs.figure_type == 'matrix':
fig_coh = viz.drawmatrix_channels(self.coherence,
channel_names=self.ROIs,
color_anchor=0)
fig_coh.savefig(fname_presuffix(self.inputs.output_figure_file,
suffix='_coherence'))
fig_dt = viz.drawmatrix_channels(self.delay,
channel_names=self.ROIs,
color_anchor=0)
fig_dt.savefig(fname_presuffix(self.inputs.output_figure_file,
suffix='_delay'))
else:
fig_coh = viz.drawgraph_channels(self.coherence,
channel_names=self.ROIs)
fig_coh.savefig(fname_presuffix(self.inputs.output_figure_file,
suffix='_coherence'))
fig_dt = viz.drawgraph_channels(self.delay,
channel_names=self.ROIs)
fig_dt.savefig(fname_presuffix(self.inputs.output_figure_file,
suffix='_delay'))
class GetTimeSeriesInputSpec():
pass
class GetTimeSeriesOutputSpec():
pass
class GetTimeSeries():
# getting time series data from nifti files and ROIs
pass
| bsd-3-clause |
mahak/keystone | keystone/tests/unit/test_app_config.py | 2 | 6285 | # encoding: utf-8
#
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from keystone.server.flask import core as server_flask
from keystone.tests import unit
class AppConfigTest(unit.TestCase):
default_config_file = 'keystone.conf'
custom_config_dir = '/etc/kst/'
custom_config_files = ['kst.conf', 'kst2.conf']
def test_config_files_have_default_values_when_envars_not_set(self):
config_files = server_flask._get_config_files()
config_files.sort()
expected_config_files = []
self.assertListEqual(config_files, expected_config_files)
def test_config_files_have_default_values_with_empty_envars(self):
env = {'OS_KEYSTONE_CONFIG_FILES': '',
'OS_KEYSTONE_CONFIG_DIR': ''}
config_files = server_flask._get_config_files(env)
config_files.sort()
expected_config_files = []
self.assertListEqual(config_files, expected_config_files)
def test_can_use_single_config_file_under_default_config_dir(self):
cfg = self.custom_config_files[0]
env = {'OS_KEYSTONE_CONFIG_FILES': cfg}
config_files = server_flask._get_config_files(env)
expected_config_files = [cfg]
self.assertListEqual(config_files, expected_config_files)
def test_can_use_multiple_config_files_under_default_config_dir(self):
env = {'OS_KEYSTONE_CONFIG_FILES': ';'.join(self.custom_config_files)}
config_files = server_flask._get_config_files(env)
config_files.sort()
expected_config_files = self.custom_config_files
self.assertListEqual(config_files, expected_config_files)
config_with_empty_strings = self.custom_config_files + ['', ' ']
env = {'OS_KEYSTONE_CONFIG_FILES': ';'.join(config_with_empty_strings)}
config_files = server_flask._get_config_files(env)
config_files.sort()
self.assertListEqual(config_files, expected_config_files)
def test_can_use_single_absolute_path_config_file(self):
cfg = self.custom_config_files[0]
cfgpath = os.path.join(self.custom_config_dir, cfg)
env = {'OS_KEYSTONE_CONFIG_FILES': cfgpath}
config_files = server_flask._get_config_files(env)
self.assertListEqual(config_files, [cfgpath])
def test_can_use_multiple_absolute_path_config_files(self):
cfgpaths = [os.path.join(self.custom_config_dir, cfg)
for cfg in self.custom_config_files]
cfgpaths.sort()
env = {'OS_KEYSTONE_CONFIG_FILES': ';'.join(cfgpaths)}
config_files = server_flask._get_config_files(env)
config_files.sort()
self.assertListEqual(config_files, cfgpaths)
env = {'OS_KEYSTONE_CONFIG_FILES': ';'.join(cfgpaths + ['', ' '])}
config_files = server_flask._get_config_files(env)
config_files.sort()
self.assertListEqual(config_files, cfgpaths)
def test_can_use_default_config_files_with_custom_config_dir(self):
env = {'OS_KEYSTONE_CONFIG_DIR': self.custom_config_dir}
config_files = server_flask._get_config_files(env)
config_files.sort()
expected_config_files = [os.path.join(self.custom_config_dir,
self.default_config_file)]
self.assertListEqual(config_files, expected_config_files)
def test_can_use_single_config_file_under_custom_config_dir(self):
cfg = self.custom_config_files[0]
env = {'OS_KEYSTONE_CONFIG_DIR': self.custom_config_dir,
'OS_KEYSTONE_CONFIG_FILES': cfg}
config_files = server_flask._get_config_files(env)
config_files.sort()
expected_config_files = [os.path.join(self.custom_config_dir, cfg)]
self.assertListEqual(config_files, expected_config_files)
def test_can_use_multiple_config_files_under_custom_config_dir(self):
env = {'OS_KEYSTONE_CONFIG_DIR': self.custom_config_dir,
'OS_KEYSTONE_CONFIG_FILES': ';'.join(self.custom_config_files)}
config_files = server_flask._get_config_files(env)
config_files.sort()
expected_config_files = [os.path.join(self.custom_config_dir, s)
for s in self.custom_config_files]
expected_config_files.sort()
self.assertListEqual(config_files, expected_config_files)
config_with_empty_strings = self.custom_config_files + ['', ' ']
env = {'OS_KEYSTONE_CONFIG_DIR': self.custom_config_dir,
'OS_KEYSTONE_CONFIG_FILES': ';'.join(config_with_empty_strings)}
config_files = server_flask._get_config_files(env)
config_files.sort()
self.assertListEqual(config_files, expected_config_files)
def test_can_mix_relative_and_absolute_paths_config_file(self):
cfg0 = self.custom_config_files[0]
cfgpath0 = os.path.join(self.custom_config_dir,
self.custom_config_files[0])
cfgpath1 = os.path.join(self.custom_config_dir,
self.custom_config_files[1])
env = {'OS_KEYSTONE_CONFIG_DIR': self.custom_config_dir,
'OS_KEYSTONE_CONFIG_FILES': ';'.join([cfg0, cfgpath1])}
config_files = server_flask._get_config_files(env)
config_files.sort()
expected_config_files = [cfgpath0, cfgpath1]
expected_config_files.sort()
self.assertListEqual(config_files, expected_config_files)
env = {'OS_KEYSTONE_CONFIG_FILES': ';'.join([cfg0, cfgpath1])}
config_files = server_flask._get_config_files(env)
config_files.sort()
expected_config_files = [cfg0, cfgpath1]
expected_config_files.sort()
self.assertListEqual(config_files, expected_config_files)
| apache-2.0 |
dan1/horizon-proto | openstack_dashboard/dashboards/admin/volumes/volumes/forms.py | 6 | 9763 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.forms import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators as utils_validators
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes.volumes \
import forms as project_forms
class ManageVolume(forms.SelfHandlingForm):
identifier = forms.CharField(
max_length=255,
label=_("Identifier"),
help_text=_("Name or other identifier for existing volume"))
id_type = forms.ChoiceField(
label=_("Identifier Type"),
help_text=_("Type of backend device identifier provided"))
host = forms.CharField(
max_length=255,
label=_("Host"),
help_text=_("Cinder host on which the existing volume resides; "
"takes the form: host@backend-name#pool"))
name = forms.CharField(
max_length=255,
label=_("Volume Name"),
required=False,
help_text=_("Volume name to be assigned"))
description = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'class': 'modal-body-fixed-width', 'rows': 4}),
label=_("Description"), required=False)
metadata = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'class': 'modal-body-fixed-width', 'rows': 2}),
label=_("Metadata"), required=False,
help_text=_("Comma-separated key=value pairs"),
validators=[utils_validators.validate_metadata])
volume_type = forms.ChoiceField(
label=_("Volume Type"),
required=False)
availability_zone = forms.ChoiceField(
label=_("Availability Zone"),
required=False)
bootable = forms.BooleanField(
label=_("Bootable"),
required=False,
help_text=_("Specifies that the newly created volume "
"should be marked as bootable"))
def __init__(self, request, *args, **kwargs):
super(ManageVolume, self).__init__(request, *args, **kwargs)
self.fields['id_type'].choices = [("source-name", _("Name"))] + \
[("source-id", _("ID"))]
volume_types = cinder.volume_type_list(request)
self.fields['volume_type'].choices = [("", _("No volume type"))] + \
[(type.name, type.name)
for type in volume_types]
self.fields['availability_zone'].choices = \
project_forms.availability_zones(request)
def handle(self, request, data):
try:
az = data.get('availability_zone')
# assume user enters metadata with "key1=val1,key2=val2"
# convert to dictionary
metadataDict = {}
metadata = data.get('metadata')
if metadata:
metadata.replace(" ", "")
for item in metadata.split(','):
key, value = item.split('=')
metadataDict[key] = value
cinder.volume_manage(request,
host=data['host'],
identifier=data['identifier'],
id_type=data['id_type'],
name=data['name'],
description=data['description'],
volume_type=data['volume_type'],
availability_zone=az,
metadata=metadataDict,
bootable=data['bootable'])
# for success message, use identifier if user does not
# provide a volume name
volume_name = data['name']
if not volume_name:
volume_name = data['identifier']
messages.success(
request,
_('Successfully sent the request to manage volume: %s')
% volume_name)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Unable to manage volume."),
redirect=redirect)
class UnmanageVolume(forms.SelfHandlingForm):
name = forms.CharField(label=_("Volume Name"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.CharField(label=_("Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
volume_id = forms.CharField(label=_("ID"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
def handle(self, request, data):
try:
cinder.volume_unmanage(request, self.initial['volume_id'])
messages.success(
request,
_('Successfully sent the request to unmanage volume: %s')
% data['name'])
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Unable to unmanage volume."),
redirect=redirect)
class CreateVolumeType(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if len(cleaned_name.strip()) == 0:
raise ValidationError(_('Volume type name can not be empty.'))
return cleaned_name
def handle(self, request, data):
try:
# Remove any new lines in the public key
volume_type = cinder.volume_type_create(request,
data['name'])
messages.success(request, _('Successfully created volume type: %s')
% data['name'])
return volume_type
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to create volume type.'),
redirect=redirect)
class UpdateStatus(forms.SelfHandlingForm):
status = forms.ChoiceField(label=_("Status"))
def __init__(self, request, *args, **kwargs):
super(UpdateStatus, self).__init__(request, *args, **kwargs)
# This set of states was culled from cinder's admin_actions.py
self.fields['status'].choices = (
('attaching', _('Attaching')),
('available', _('Available')),
('creating', _('Creating')),
('deleting', _('Deleting')),
('detaching', _('Detaching')),
('error', _('Error')),
('error_deleting', _('Error Deleting')),
('in-use', _('In Use')),
)
def handle(self, request, data):
# Obtain the localized status for including in the message
for choice in self.fields['status'].choices:
if choice[0] == data['status']:
new_status = choice[1]
break
else:
new_status = data['status']
try:
cinder.volume_reset_state(request,
self.initial['volume_id'],
data['status'])
messages.success(request,
_('Successfully updated volume status to "%s".') %
new_status)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to update volume status to "%s".') %
new_status, redirect=redirect)
class CreateQosSpec(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
consumer = forms.ChoiceField(label=_("Consumer"),
choices=cinder.CONSUMER_CHOICES)
def handle(self, request, data):
try:
qos_spec = cinder.qos_spec_create(request,
data['name'],
{'consumer': data['consumer']})
messages.success(request,
_('Successfully created QoS Spec: %s')
% data['name'])
return qos_spec
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to create QoS Spec.'),
redirect=redirect)
| apache-2.0 |
40323250/bg9_cdw11 | static/plugin/liquid_tags/soundcloud.py | 273 | 1523 | """
Soundcloud Tag
--------------
This implements a Liquid-style soundcloud tag for Pelican.
It asks the official Soundcloud-API for the widget html code.
Syntax
------
`{% soundcloud track_url %}`
Example
-------
`{% soundcloud https://soundcloud.com/luftmentsh/hakotel %}`
Output
------
`<iframe width="100%" height="400" scrolling="no" frameborder="no" src="https://w.soundcloud.com/player/?visual=true&url=http%3A%2F%2Fapi.soundcloud.com%2Ftracks%2F33875102&show_artwork=true"></iframe>`
"""
from .mdx_liquid_tags import LiquidTags
import re
import json
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
SYNTAX = '{% soundcloud track_url %}'
PARSE_SYNTAX = re.compile(r'(?P<track_url>https?://soundcloud.com/[\S]+)')
def get_widget(track_url):
r = urlopen(
'http://soundcloud.com/oembed',
data='format=json&url={}'.format(track_url).encode('utf-8'))
return json.loads(r.read().decode('utf-8'))['html']
def match_it(markup):
match = PARSE_SYNTAX.search(markup)
if match:
return match.groupdict()
else:
raise ValueError('Error processing input. '
'Expected syntax: {}'.format(SYNTAX))
@LiquidTags.register('soundcloud')
def soundcloud(preprocessor, tag, markup):
track_url = match_it(markup)['track_url']
return get_widget(track_url)
# ---------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
| agpl-3.0 |
grepme/CMPUT410Lab01 | virt_env/virt1/lib/python2.7/site-packages/Paste-1.7.5.1-py2.7.egg/paste/util/template.py | 28 | 24295 | """
A small templating language
This implements a small templating language for use internally in
Paste and Paste Script. This language implements if/elif/else,
for/continue/break, expressions, and blocks of Python code. The
syntax is::
{{any expression (function calls etc)}}
{{any expression | filter}}
{{for x in y}}...{{endfor}}
{{if x}}x{{elif y}}y{{else}}z{{endif}}
{{py:x=1}}
{{py:
def foo(bar):
return 'baz'
}}
{{default var = default_value}}
{{# comment}}
You use this with the ``Template`` class or the ``sub`` shortcut.
The ``Template`` class takes the template string and the name of
the template (for errors) and a default namespace. Then (like
``string.Template``) you can call the ``tmpl.substitute(**kw)``
method to make a substitution (or ``tmpl.substitute(a_dict)``).
``sub(content, **kw)`` substitutes the template immediately. You
can use ``__name='tmpl.html'`` to set the name of the template.
If there are syntax errors ``TemplateError`` will be raised.
"""
import re
import sys
import cgi
import urllib
from paste.util.looper import looper
__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
'sub_html', 'html', 'bunch']
token_re = re.compile(r'\{\{|\}\}')
in_re = re.compile(r'\s+in\s+')
var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
class TemplateError(Exception):
"""Exception raised while parsing a template
"""
def __init__(self, message, position, name=None):
self.message = message
self.position = position
self.name = name
def __str__(self):
msg = '%s at line %s column %s' % (
self.message, self.position[0], self.position[1])
if self.name:
msg += ' in %s' % self.name
return msg
class _TemplateContinue(Exception):
pass
class _TemplateBreak(Exception):
pass
class Template(object):
default_namespace = {
'start_braces': '{{',
'end_braces': '}}',
'looper': looper,
}
default_encoding = 'utf8'
def __init__(self, content, name=None, namespace=None):
self.content = content
self._unicode = isinstance(content, unicode)
self.name = name
self._parsed = parse(content, name=name)
if namespace is None:
namespace = {}
self.namespace = namespace
def from_filename(cls, filename, namespace=None, encoding=None):
f = open(filename, 'rb')
c = f.read()
f.close()
if encoding:
c = c.decode(encoding)
return cls(content=c, name=filename, namespace=namespace)
from_filename = classmethod(from_filename)
def __repr__(self):
return '<%s %s name=%r>' % (
self.__class__.__name__,
hex(id(self))[2:], self.name)
def substitute(self, *args, **kw):
if args:
if kw:
raise TypeError(
"You can only give positional *or* keyword arguments")
if len(args) > 1:
raise TypeError(
"You can only give on positional argument")
kw = args[0]
ns = self.default_namespace.copy()
ns.update(self.namespace)
ns.update(kw)
result = self._interpret(ns)
return result
def _interpret(self, ns):
__traceback_hide__ = True
parts = []
self._interpret_codes(self._parsed, ns, out=parts)
return ''.join(parts)
def _interpret_codes(self, codes, ns, out):
__traceback_hide__ = True
for item in codes:
if isinstance(item, basestring):
out.append(item)
else:
self._interpret_code(item, ns, out)
def _interpret_code(self, code, ns, out):
__traceback_hide__ = True
name, pos = code[0], code[1]
if name == 'py':
self._exec(code[2], ns, pos)
elif name == 'continue':
raise _TemplateContinue()
elif name == 'break':
raise _TemplateBreak()
elif name == 'for':
vars, expr, content = code[2], code[3], code[4]
expr = self._eval(expr, ns, pos)
self._interpret_for(vars, expr, content, ns, out)
elif name == 'cond':
parts = code[2:]
self._interpret_if(parts, ns, out)
elif name == 'expr':
parts = code[2].split('|')
base = self._eval(parts[0], ns, pos)
for part in parts[1:]:
func = self._eval(part, ns, pos)
base = func(base)
out.append(self._repr(base, pos))
elif name == 'default':
var, expr = code[2], code[3]
if var not in ns:
result = self._eval(expr, ns, pos)
ns[var] = result
elif name == 'comment':
return
else:
assert 0, "Unknown code: %r" % name
def _interpret_for(self, vars, expr, content, ns, out):
__traceback_hide__ = True
for item in expr:
if len(vars) == 1:
ns[vars[0]] = item
else:
if len(vars) != len(item):
raise ValueError(
'Need %i items to unpack (got %i items)'
% (len(vars), len(item)))
for name, value in zip(vars, item):
ns[name] = value
try:
self._interpret_codes(content, ns, out)
except _TemplateContinue:
continue
except _TemplateBreak:
break
def _interpret_if(self, parts, ns, out):
__traceback_hide__ = True
# @@: if/else/else gets through
for part in parts:
assert not isinstance(part, basestring)
name, pos = part[0], part[1]
if name == 'else':
result = True
else:
result = self._eval(part[2], ns, pos)
if result:
self._interpret_codes(part[3], ns, out)
break
def _eval(self, code, ns, pos):
__traceback_hide__ = True
try:
value = eval(code, ns)
return value
except:
exc_info = sys.exc_info()
e = exc_info[1]
if getattr(e, 'args'):
arg0 = e.args[0]
else:
arg0 = str(e)
e.args = (self._add_line_info(arg0, pos),)
raise exc_info[0], e, exc_info[2]
def _exec(self, code, ns, pos):
__traceback_hide__ = True
try:
exec code in ns
except:
exc_info = sys.exc_info()
e = exc_info[1]
e.args = (self._add_line_info(e.args[0], pos),)
raise exc_info[0], e, exc_info[2]
def _repr(self, value, pos):
__traceback_hide__ = True
try:
if value is None:
return ''
if self._unicode:
try:
value = unicode(value)
except UnicodeDecodeError:
value = str(value)
else:
value = str(value)
except:
exc_info = sys.exc_info()
e = exc_info[1]
e.args = (self._add_line_info(e.args[0], pos),)
raise exc_info[0], e, exc_info[2]
else:
if self._unicode and isinstance(value, str):
if not self.decode_encoding:
raise UnicodeDecodeError(
'Cannot decode str value %r into unicode '
'(no default_encoding provided)' % value)
value = value.decode(self.default_encoding)
elif not self._unicode and isinstance(value, unicode):
if not self.decode_encoding:
raise UnicodeEncodeError(
'Cannot encode unicode value %r into str '
'(no default_encoding provided)' % value)
value = value.encode(self.default_encoding)
return value
def _add_line_info(self, msg, pos):
msg = "%s at line %s column %s" % (
msg, pos[0], pos[1])
if self.name:
msg += " in file %s" % self.name
return msg
def sub(content, **kw):
name = kw.get('__name')
tmpl = Template(content, name=name)
return tmpl.substitute(kw)
return result
def paste_script_template_renderer(content, vars, filename=None):
tmpl = Template(content, name=filename)
return tmpl.substitute(vars)
class bunch(dict):
def __init__(self, **kw):
for name, value in kw.items():
setattr(self, name, value)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, key):
if 'default' in self:
try:
return dict.__getitem__(self, key)
except KeyError:
return dict.__getitem__(self, 'default')
else:
return dict.__getitem__(self, key)
def __repr__(self):
items = [
(k, v) for k, v in self.items()]
items.sort()
return '<%s %s>' % (
self.__class__.__name__,
' '.join(['%s=%r' % (k, v) for k, v in items]))
############################################################
## HTML Templating
############################################################
class html(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__, self.value)
def html_quote(value):
if value is None:
return ''
if not isinstance(value, basestring):
if hasattr(value, '__unicode__'):
value = unicode(value)
else:
value = str(value)
value = cgi.escape(value, 1)
if isinstance(value, unicode):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def url(v):
if not isinstance(v, basestring):
if hasattr(v, '__unicode__'):
v = unicode(v)
else:
v = str(v)
if isinstance(v, unicode):
v = v.encode('utf8')
return urllib.quote(v)
def attr(**kw):
kw = kw.items()
kw.sort()
parts = []
for name, value in kw:
if value is None:
continue
if name.endswith('_'):
name = name[:-1]
parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
return html(' '.join(parts))
class HTMLTemplate(Template):
default_namespace = Template.default_namespace.copy()
default_namespace.update(dict(
html=html,
attr=attr,
url=url,
))
def _repr(self, value, pos):
plain = Template._repr(self, value, pos)
if isinstance(value, html):
return plain
else:
return html_quote(plain)
def sub_html(content, **kw):
name = kw.get('__name')
tmpl = HTMLTemplate(content, name=name)
return tmpl.substitute(kw)
return result
############################################################
## Lexing and Parsing
############################################################
def lex(s, name=None, trim_whitespace=True):
"""
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
TemplateError: {{ inside expression at line 1 column 10
"""
in_expr = False
chunks = []
last = 0
last_pos = (1, 1)
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end())
if expr == '{{' and in_expr:
raise TemplateError('{{ inside expression', position=pos,
name=name)
elif expr == '}}' and not in_expr:
raise TemplateError('}} outside expression', position=pos,
name=name)
if expr == '{{':
part = s[last:match.start()]
if part:
chunks.append(part)
in_expr = True
else:
chunks.append((s[last:match.start()], last_pos))
in_expr = False
last = match.end()
last_pos = pos
if in_expr:
raise TemplateError('No }} to finish last expression',
name=name, position=last_pos)
part = s[last:]
if part:
chunks.append(part)
if trim_whitespace:
chunks = trim_lex(chunks)
return chunks
statement_re = re.compile(r'^(?:if |elif |else |for |py:)')
single_statements = ['endif', 'endfor', 'continue', 'break']
trail_whitespace_re = re.compile(r'\n[\t ]*$')
lead_whitespace_re = re.compile(r'^[\t ]*\n')
def trim_lex(tokens):
r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
"""
for i in range(len(tokens)):
current = tokens[i]
if isinstance(tokens[i], basestring):
# we don't trim this
continue
item = current[0]
if not statement_re.search(item) and item not in single_statements:
continue
if not i:
prev = ''
else:
prev = tokens[i-1]
if i+1 >= len(tokens):
next = ''
else:
next = tokens[i+1]
if (not isinstance(next, basestring)
or not isinstance(prev, basestring)):
continue
if ((not prev or trail_whitespace_re.search(prev))
and (not next or lead_whitespace_re.search(next))):
if prev:
m = trail_whitespace_re.search(prev)
# +1 to leave the leading \n on:
prev = prev[:m.start()+1]
tokens[i-1] = prev
if next:
m = lead_whitespace_re.search(next)
next = next[m.end():]
tokens[i+1] = next
return tokens
def find_position(string, index):
"""Given a string and index, return (line, column)"""
leading = string[:index].splitlines()
return (len(leading), len(leading[-1])+1)
def parse(s, name=None):
r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}')
Traceback (most recent call last):
...
TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
"""
tokens = lex(s, name=name)
result = []
while tokens:
next, tokens = parse_expr(tokens, name)
result.append(next)
return result
def parse_expr(tokens, name, context=()):
if isinstance(tokens[0], basestring):
return tokens[0], tokens[1:]
expr, pos = tokens[0]
expr = expr.strip()
if expr.startswith('py:'):
expr = expr[3:].lstrip(' \t')
if expr.startswith('\n'):
expr = expr[1:]
else:
if '\n' in expr:
raise TemplateError(
'Multi-line py blocks must start with a newline',
position=pos, name=name)
return ('py', pos, expr), tokens[1:]
elif expr in ('continue', 'break'):
if 'for' not in context:
raise TemplateError(
'continue outside of for loop',
position=pos, name=name)
return (expr, pos), tokens[1:]
elif expr.startswith('if '):
return parse_cond(tokens, name, context)
elif (expr.startswith('elif ')
or expr == 'else'):
raise TemplateError(
'%s outside of an if block' % expr.split()[0],
position=pos, name=name)
elif expr in ('if', 'elif', 'for'):
raise TemplateError(
'%s with no expression' % expr,
position=pos, name=name)
elif expr in ('endif', 'endfor'):
raise TemplateError(
'Unexpected %s' % expr,
position=pos, name=name)
elif expr.startswith('for '):
return parse_for(tokens, name, context)
elif expr.startswith('default '):
return parse_default(tokens, name, context)
elif expr.startswith('#'):
return ('comment', pos, tokens[0][0]), tokens[1:]
return ('expr', pos, tokens[0][0]), tokens[1:]
def parse_cond(tokens, name, context):
start = tokens[0][1]
pieces = []
context = context + ('if',)
while 1:
if not tokens:
raise TemplateError(
'Missing {{endif}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endif'):
return ('cond', start) + tuple(pieces), tokens[1:]
next, tokens = parse_one_cond(tokens, name, context)
pieces.append(next)
def parse_one_cond(tokens, name, context):
(first, pos), tokens = tokens[0], tokens[1:]
content = []
if first.endswith(':'):
first = first[:-1]
if first.startswith('if '):
part = ('if', pos, first[3:].lstrip(), content)
elif first.startswith('elif '):
part = ('elif', pos, first[5:].lstrip(), content)
elif first == 'else':
part = ('else', pos, None, content)
else:
assert 0, "Unexpected token %r at %s" % (first, pos)
while 1:
if not tokens:
raise TemplateError(
'No {{endif}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and (tokens[0][0] == 'endif'
or tokens[0][0].startswith('elif ')
or tokens[0][0] == 'else')):
return part, tokens
next, tokens = parse_expr(tokens, name, context)
content.append(next)
def parse_for(tokens, name, context):
first, pos = tokens[0]
tokens = tokens[1:]
context = ('for',) + context
content = []
assert first.startswith('for ')
if first.endswith(':'):
first = first[:-1]
first = first[3:].strip()
match = in_re.search(first)
if not match:
raise TemplateError(
'Bad for (no "in") in %r' % first,
position=pos, name=name)
vars = first[:match.start()]
if '(' in vars:
raise TemplateError(
'You cannot have () in the variable section of a for loop (%r)'
% vars, position=pos, name=name)
vars = tuple([
v.strip() for v in first[:match.start()].split(',')
if v.strip()])
expr = first[match.end():]
while 1:
if not tokens:
raise TemplateError(
'No {{endfor}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endfor'):
return ('for', pos, vars, expr, content), tokens[1:]
next, tokens = parse_expr(tokens, name, context)
content.append(next)
def parse_default(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('default ')
first = first.split(None, 1)[1]
parts = first.split('=', 1)
if len(parts) == 1:
raise TemplateError(
"Expression must be {{default var=value}}; no = found in %r" % first,
position=pos, name=name)
var = parts[0].strip()
if ',' in var:
raise TemplateError(
"{{default x, y = ...}} is not supported",
position=pos, name=name)
if not var_re.search(var):
raise TemplateError(
"Not a valid variable name for {{default}}: %r"
% var, position=pos, name=name)
expr = parts[1].strip()
return ('default', pos, var, expr), tokens[1:]
_fill_command_usage = """\
%prog [OPTIONS] TEMPLATE arg=value
Use py:arg=value to set a Python value; otherwise all values are
strings.
"""
def fill_command(args=None):
import sys, optparse, pkg_resources, os
if args is None:
args = sys.argv[1:]
dist = pkg_resources.get_distribution('Paste')
parser = optparse.OptionParser(
version=str(dist),
usage=_fill_command_usage)
parser.add_option(
'-o', '--output',
dest='output',
metavar="FILENAME",
help="File to write output to (default stdout)")
parser.add_option(
'--html',
dest='use_html',
action='store_true',
help="Use HTML style filling (including automatic HTML quoting)")
parser.add_option(
'--env',
dest='use_env',
action='store_true',
help="Put the environment in as top-level variables")
options, args = parser.parse_args(args)
if len(args) < 1:
print 'You must give a template filename'
print dir(parser)
assert 0
template_name = args[0]
args = args[1:]
vars = {}
if options.use_env:
vars.update(os.environ)
for value in args:
if '=' not in value:
print 'Bad argument: %r' % value
sys.exit(2)
name, value = value.split('=', 1)
if name.startswith('py:'):
name = name[:3]
value = eval(value)
vars[name] = value
if template_name == '-':
template_content = sys.stdin.read()
template_name = '<stdin>'
else:
f = open(template_name, 'rb')
template_content = f.read()
f.close()
if options.use_html:
TemplateClass = HTMLTemplate
else:
TemplateClass = Template
template = TemplateClass(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
f = open(options.output, 'wb')
f.write(result)
f.close()
else:
sys.stdout.write(result)
if __name__ == '__main__':
from paste.util.template import fill_command
fill_command()
| apache-2.0 |
maxwell-lv/MyQuant | vip.py | 1 | 2247 | from xlrd import open_workbook
from xlwt import Workbook
import pandas as pd
import sys
loan_file = 'd:\\projects\\vip\\vip_loan.xls'
profit_file = 'd:\\projects\\vip\\vip_profit.xls'
repay_file = 'd:\\projects\\vip\\vip_repay.xls'
loan_book = open_workbook(loan_file)
profit_book = open_workbook(profit_file)
repay_book = open_workbook(repay_file)
ploan = pd.read_excel(loan_file, '项目信息')
pprofit = pd.read_excel(profit_file, '每日收益信息')
prepay = pd.read_excel(repay_file, '项目结算信息')
loan_sheet = loan_book.sheet_by_index(0)
profit_sheet = profit_book.sheet_by_index(0)
repay_sheet = repay_book.sheet_by_index(0)
def get_project_number(project_name):
global loan_sheet
for row in range(loan_sheet.nrows):
if loan_sheet.cell(row, 3).value == project_name:
return str(int(loan_sheet.cell(row, 2).value))
print('can\'t find %s' % project_name)
return ''
def profit():
wb = Workbook()
sprofit = wb.add_sheet('每日收益信息')
for row in range(1, profit_sheet.nrows):
project_name = profit_sheet.cell(row, 1).value
project_number = get_project_number(project_name)
sprofit.write(row, 0, project_number)
for i in range(1, 8):
sprofit.write(row, i, profit_sheet.cell(row, i).value)
wb.save('test.xls')
def repay():
global repay_sheet
wb = Workbook()
srepay = wb.add_sheet('项目结算信息')
for row in range(1, repay_sheet.nrows):
project_name = repay_sheet.cell(row, 1).value
project_number = get_project_number(project_name)
srepay.write(row, 0, project_number)
for i in range(1, 7):
srepay.write(row, i, repay_sheet.cell(row, i).value)
wb.save('repay.xls')
def format_date():
global repay_sheet
wb = Workbook()
srepay = wb.add_sheet('项目结算信息')
from math import floor
for row in range(1, repay_sheet.nrows):
raw_date = int(repay_sheet.cell(row, 2).value)
month = floor(raw_date / 100)
day = raw_date % 100
year = 2016 if raw_date > 323 else 2017
date_str = '%d-%02d-%02d' % (year, month, day)
srepay.write(row, 0, date_str)
wb.save('repay.xls')
format_date() | gpl-3.0 |
tedelhourani/ansible | lib/ansible/modules/packaging/os/zypper_repository.py | 29 | 13978 | #!/usr/bin/python
# encoding: utf-8
# (c) 2013, Matthias Vogelgesang <[email protected]>
# (c) 2014, Justin Lecher <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zypper_repository
author: "Matthias Vogelgesang (@matze)"
version_added: "1.4"
short_description: Add and remove Zypper repositories
description:
- Add or remove Zypper repositories on SUSE and openSUSE
options:
name:
required: false
default: none
description:
- A name for the repository. Not required when adding repofiles.
repo:
required: false
default: none
description:
- URI of the repository or .repo file. Required when state=present.
state:
required: false
choices: [ "absent", "present" ]
default: "present"
description:
- A source string state.
description:
required: false
default: none
description:
- A description of the repository
disable_gpg_check:
description:
- Whether to disable GPG signature checking of
all packages. Has an effect only if state is
I(present).
- Needs zypper version >= 1.6.2.
required: false
default: "no"
choices: [ "yes", "no" ]
autorefresh:
description:
- Enable autorefresh of the repository.
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "refresh" ]
priority:
description:
- Set priority of repository. Packages will always be installed
from the repository with the smallest priority number.
- Needs zypper version >= 1.12.25.
required: false
version_added: "2.1"
overwrite_multiple:
description:
- Overwrite multiple repository entries, if repositories with both name and
URL already exist.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.1"
auto_import_keys:
description:
- Automatically import the gpg signing key of the new or changed repository.
- Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
- Implies runrefresh.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.2"
runrefresh:
description:
- Refresh the package list of the given repository.
- Can be used with repo=* to refresh all repositories.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.2"
enabled:
description:
- Set repository to enabled (or disabled).
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "2.2"
requirements:
- "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0"
- python-xml
'''
EXAMPLES = '''
# Add NVIDIA repository for graphics drivers
- zypper_repository:
name: nvidia-repo
repo: 'ftp://download.nvidia.com/opensuse/12.2'
state: present
# Remove NVIDIA repository
- zypper_repository:
name: nvidia-repo
repo: 'ftp://download.nvidia.com/opensuse/12.2'
state: absent
# Add python development repository
- zypper_repository:
repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo'
# Refresh all repos
- zypper_repository:
repo: '*'
runrefresh: yes
# Add a repo and add it's gpg key
- zypper_repository:
repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
auto_import_keys: yes
# Force refresh of a repository
- zypper_repository:
repo: 'http://my_internal_ci_repo/repo'
name: my_ci_repo
state: present
runrefresh: yes
'''
REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
from distutils.version import LooseVersion
def _get_cmd(*args):
"""Combines the non-interactive zypper command with arguments/subcommands"""
cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive']
cmd.extend(args)
return cmd
def _parse_repos(module):
"""parses the output of zypper --xmlout repos and return a parse repo dictionary"""
cmd = _get_cmd('--xmlout', 'repos')
from xml.dom.minidom import parseString as parseXML
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
repos = []
dom = parseXML(stdout)
repo_list = dom.getElementsByTagName('repo')
for repo in repo_list:
opts = {}
for o in REPO_OPTS:
opts[o] = repo.getAttribute(o)
opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
# A repo can be uniquely identified by an alias + url
repos.append(opts)
return repos
# exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined)
elif rc == 6:
return []
else:
module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
def _repo_changes(realrepo, repocmp):
"Check whether the 2 given repos have different settings."
for k in repocmp:
if repocmp[k] and k not in realrepo:
return True
for k, v in realrepo.items():
if k in repocmp and repocmp[k]:
valold = str(repocmp[k] or "")
valnew = v or ""
if k == "url":
valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
if valold != valnew:
return True
return False
def repo_exists(module, repodata, overwrite_multiple):
"""Check whether the repository already exists.
returns (exists, mod, old_repos)
exists: whether a matching (name, URL) repo exists
mod: whether there are changes compared to the existing repo
old_repos: list of matching repos
"""
existing_repos = _parse_repos(module)
# look for repos that have matching alias or url to the one searched
repos = []
for kw in ['alias', 'url']:
name = repodata[kw]
for oldr in existing_repos:
if repodata[kw] == oldr[kw] and oldr not in repos:
repos.append(oldr)
if len(repos) == 0:
# Repo does not exist yet
return (False, False, None)
elif len(repos) == 1:
# Found an existing repo, look for changes
has_changes = _repo_changes(repos[0], repodata)
return (True, has_changes, repos)
elif len(repos) >= 2:
if overwrite_multiple:
# Found two repos and want to overwrite_multiple
return (True, True, repos)
else:
errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
module.fail_json(msg=errmsg)
def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
"Adds the repo, removes old repos before, that would conflict."
repo = repodata['url']
cmd = _get_cmd('addrepo', '--check')
if repodata['name']:
cmd.extend(['--name', repodata['name']])
# priority on addrepo available since 1.12.25
# https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
if repodata['priority']:
if zypper_version >= LooseVersion('1.12.25'):
cmd.extend(['--priority', str(repodata['priority'])])
else:
warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
if repodata['enabled'] == '0':
cmd.append('--disable')
# gpgcheck available since 1.6.2
# https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
# the default changed in the past, so don't assume a default here and show warning for old zypper versions
if zypper_version >= LooseVersion('1.6.2'):
if repodata['gpgcheck'] == '1':
cmd.append('--gpgcheck')
else:
cmd.append('--no-gpgcheck')
else:
warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
if repodata['autorefresh'] == '1':
cmd.append('--refresh')
cmd.append(repo)
if not repo.endswith('.repo'):
cmd.append(repodata['alias'])
if old_repos is not None:
for oldrepo in old_repos:
remove_repo(module, oldrepo['url'])
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
return rc, stdout, stderr
def remove_repo(module, repo):
"Removes the repo."
cmd = _get_cmd('removerepo', repo)
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
return rc, stdout, stderr
def get_zypper_version(module):
rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version'])
if rc != 0 or not stdout.startswith('zypper '):
return LooseVersion('1.0')
return LooseVersion(stdout.split()[1])
def runrefreshrepo(module, auto_import_keys=False, shortname=None):
"Forces zypper to refresh repo metadata."
if auto_import_keys:
cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force')
else:
cmd = _get_cmd('refresh', '--force')
if shortname is not None:
cmd.extend(['-r', shortname])
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
return rc, stdout, stderr
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False),
repo=dict(required=False),
state=dict(choices=['present', 'absent'], default='present'),
runrefresh=dict(required=False, default='no', type='bool'),
description=dict(required=False),
disable_gpg_check = dict(required=False, default=False, type='bool'),
autorefresh = dict(required=False, default=True, type='bool', aliases=['refresh']),
priority = dict(required=False, type='int'),
enabled = dict(required=False, default=True, type='bool'),
overwrite_multiple = dict(required=False, default=False, type='bool'),
auto_import_keys = dict(required=False, default=False, type='bool'),
),
supports_check_mode=False,
required_one_of = [['state','runrefresh']],
)
repo = module.params['repo']
alias = module.params['name']
state = module.params['state']
overwrite_multiple = module.params['overwrite_multiple']
auto_import_keys = module.params['auto_import_keys']
runrefresh = module.params['runrefresh']
zypper_version = get_zypper_version(module)
warnings = [] # collect warning messages for final output
repodata = {
'url': repo,
'alias': alias,
'name': module.params['description'],
'priority': module.params['priority'],
}
# rewrite bools in the language that zypper lr -x provides for easier comparison
if module.params['enabled']:
repodata['enabled'] = '1'
else:
repodata['enabled'] = '0'
if module.params['disable_gpg_check']:
repodata['gpgcheck'] = '0'
else:
repodata['gpgcheck'] = '1'
if module.params['autorefresh']:
repodata['autorefresh'] = '1'
else:
repodata['autorefresh'] = '0'
def exit_unchanged():
module.exit_json(changed=False, repodata=repodata, state=state)
# Check run-time module parameters
if repo == '*' or alias == '*':
if runrefresh:
runrefreshrepo(module, auto_import_keys)
module.exit_json(changed=False, runrefresh=True)
else:
module.fail_json(msg='repo=* can only be used with the runrefresh option.')
if state == 'present' and not repo:
module.fail_json(msg='Module option state=present requires repo')
if state == 'absent' and not repo and not alias:
module.fail_json(msg='Alias or repo parameter required when state=absent')
if repo and repo.endswith('.repo'):
if alias:
module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files')
else:
if not alias and state == "present":
module.fail_json(msg='Name required when adding non-repo files.')
exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
if repo:
shortname = repo
else:
shortname = alias
if state == 'present':
if exists and not mod:
if runrefresh:
runrefreshrepo(module, auto_import_keys, shortname)
exit_unchanged()
rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
if rc == 0 and (runrefresh or auto_import_keys):
runrefreshrepo(module, auto_import_keys, shortname)
elif state == 'absent':
if not exists:
exit_unchanged()
rc, stdout, stderr = remove_repo(module, shortname)
if rc == 0:
module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
else:
module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
abhiQmar/servo | tests/wpt/web-platform-tests/tools/manifest/XMLParser.py | 97 | 4413 | from os.path import dirname, join
from collections import OrderedDict
from xml.parsers import expat
import xml.etree.ElementTree as etree
_catalog = join(dirname(__file__), "catalog")
def _wrap_error(e):
err = etree.ParseError(e)
err.code = e.code
err.position = e.lineno, e.offset
raise err
_names = {}
def _fixname(key):
try:
name = _names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
_names[key] = name
return name
class XMLParser(object):
"""
An XML parser with support for XHTML DTDs and all Python-supported encodings
This implements the API defined by
xml.etree.ElementTree.XMLParser, but supports XHTML DTDs
(therefore allowing XHTML entities) and supports all encodings
Python does, rather than just those supported by expat.
"""
def __init__(self, encoding=None):
self._parser = expat.ParserCreate(encoding, "}")
self._target = etree.TreeBuilder()
# parser settings
self._parser.buffer_text = 1
self._parser.ordered_attributes = 1
self._parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
# parser callbacks
self._parser.XmlDeclHandler = self._xml_decl
self._parser.StartElementHandler = self._start
self._parser.EndElementHandler = self._end
self._parser.CharacterDataHandler = self._data
self._parser.ExternalEntityRefHandler = self._external
self._parser.SkippedEntityHandler = self._skipped
# used for our horrible re-encoding hack
self._fed_data = []
self._read_encoding = None
def _xml_decl(self, version, encoding, standalone):
self._read_encoding = encoding
def _start(self, tag, attrib_in):
self._fed_data = None
tag = _fixname(tag)
attrib = OrderedDict()
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[_fixname(attrib_in[i])] = attrib_in[i+1]
return self._target.start(tag, attrib)
def _data(self, text):
return self._target.data(text)
def _end(self, tag):
return self._target.end(_fixname(tag))
def _external(self, context, base, systemId, publicId):
if publicId in {
"-//W3C//DTD XHTML 1.0 Transitional//EN",
"-//W3C//DTD XHTML 1.1//EN",
"-//W3C//DTD XHTML 1.0 Strict//EN",
"-//W3C//DTD XHTML 1.0 Frameset//EN",
"-//W3C//DTD XHTML Basic 1.0//EN",
"-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN",
"-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN",
"-//W3C//DTD MathML 2.0//EN",
"-//WAPFORUM//DTD XHTML Mobile 1.0//EN"
}:
parser = self._parser.ExternalEntityParserCreate(context)
with open(join(_catalog, "xhtml.dtd"), "rb") as fp:
try:
parser.ParseFile(fp)
except expat.error:
return False
return True
def _skipped(self, name, is_parameter_entity):
err = expat.error("undefined entity %s: line %d, column %d" %
(name, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber))
err.code = expat.errors.XML_ERROR_UNDEFINED_ENTITY
err.lineno = self._parser.ErrorLineNumber
err.offset = self._parser.ErrorColumnNumber
raise err
def feed(self, data):
if self._fed_data is not None:
self._fed_data.append(data)
try:
self._parser.Parse(data, False)
except expat.error as v:
_wrap_error(v)
except ValueError as e:
if e.args[0] == 'multi-byte encodings are not supported':
assert self._read_encoding is not None
xml = b"".join(self._fed_data).decode(self._read_encoding).encode("utf-8")
new_parser = XMLParser("utf-8")
self._parser = new_parser._parser
self._target = new_parser._target
self._fed_data = None
self.feed(xml)
def close(self):
try:
self._parser.Parse("", True)
except expat.error as v:
_wrap_error(v)
tree = self._target.close()
return tree
| mpl-2.0 |
vijayendrabvs/hap | neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py | 9 | 7125 | # Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import utils
from neutron.db import agents_db
from neutron.openstack.common import timeutils
from neutron.tests import base
class TestDhcpAgentNotifyAPI(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgentNotifyAPI, self).setUp()
self.notifier = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI(plugin=mock.Mock()))
mock_util_p = mock.patch.object(utils, 'is_extension_supported')
mock_log_p = mock.patch.object(dhcp_rpc_agent_api, 'LOG')
mock_fanout_p = mock.patch.object(self.notifier, '_fanout_message')
mock_cast_p = mock.patch.object(self.notifier, '_cast_message')
self.mock_util = mock_util_p.start()
self.mock_log = mock_log_p.start()
self.mock_fanout = mock_fanout_p.start()
self.mock_cast = mock_cast_p.start()
def _test__schedule_network(self, network,
new_agents=None, existing_agents=None,
expected_casts=0, expected_warnings=0):
self.notifier.plugin.schedule_network.return_value = new_agents
agents = self.notifier._schedule_network(
mock.ANY, network, existing_agents)
if new_agents is None:
new_agents = []
self.assertEqual(new_agents + existing_agents, agents)
self.assertEqual(expected_casts, self.mock_cast.call_count)
self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
def test__schedule_network(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
network = {'id': 'foo_net_id'}
self._test__schedule_network(network,
new_agents=[agent], existing_agents=[],
expected_casts=1, expected_warnings=0)
def test__schedule_network_no_existing_agents(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
network = {'id': 'foo_net_id'}
self._test__schedule_network(network,
new_agents=None, existing_agents=[agent],
expected_casts=0, expected_warnings=0)
def test__schedule_network_no_new_agents(self):
network = {'id': 'foo_net_id'}
self._test__schedule_network(network,
new_agents=None, existing_agents=[],
expected_casts=0, expected_warnings=1)
def _test__get_enabled_agents(self, network,
agents=None, port_count=0,
expected_warnings=0, expected_errors=0):
self.notifier.plugin.get_ports_count.return_value = port_count
enabled_agents = self.notifier._get_enabled_agents(
mock.ANY, network, agents, mock.ANY, mock.ANY)
self.assertEqual(agents, enabled_agents)
self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
self.assertEqual(expected_errors, self.mock_log.error.call_count)
def test__get_enabled_agents(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
network = {'id': 'foo_network_id'}
self._test__get_enabled_agents(network, agents=[agent])
def test__get_enabled_agents_with_inactive_ones(self):
agent1 = agents_db.Agent()
agent1.admin_state_up = True
agent1.heartbeat_timestamp = timeutils.utcnow()
agent2 = agents_db.Agent()
agent2.admin_state_up = True
# This is effectively an inactive agent
agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0)
network = {'id': 'foo_network_id'}
self._test__get_enabled_agents(network,
agents=[agent1, agent2],
expected_warnings=1, expected_errors=0)
def test__get_enabled_agents_with_notification_required(self):
network = {'id': 'foo_network_id', 'subnets': ['foo_subnet_id']}
self._test__get_enabled_agents(network, [], port_count=20,
expected_warnings=0, expected_errors=1)
def test__notify_agents_fanout_required(self):
self.notifier._notify_agents(mock.ANY,
'network_delete_end',
mock.ANY, 'foo_network_id')
self.assertEqual(1, self.mock_fanout.call_count)
def _test__notify_agents(self, method,
expected_scheduling=0, expected_casts=0):
with mock.patch.object(self.notifier, '_schedule_network') as f:
with mock.patch.object(self.notifier, '_get_enabled_agents') as g:
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
g.return_value = [agent]
self.notifier._notify_agents(mock.Mock(), method,
mock.ANY, 'foo_network_id')
self.assertEqual(expected_scheduling, f.call_count)
self.assertEqual(expected_casts, self.mock_cast.call_count)
def test__notify_agents_cast_required_with_scheduling(self):
self._test__notify_agents('port_create_end',
expected_scheduling=1, expected_casts=1)
def test__notify_agents_cast_required_wo_scheduling_on_port_update(self):
self._test__notify_agents('port_update_end',
expected_scheduling=0, expected_casts=1)
def test__notify_agents_cast_required_wo_scheduling_on_subnet_create(self):
self._test__notify_agents('subnet_create_end',
expected_scheduling=0, expected_casts=1)
def test__notify_agents_no_action(self):
self._test__notify_agents('network_create_end',
expected_scheduling=0, expected_casts=0)
def test__fanout_message(self):
self.notifier._fanout_message(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_fanout.call_count)
def test__cast_message(self):
self.notifier._cast_message(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_cast.call_count)
| apache-2.0 |
wangcaihua/splearn | SPlearn/splearn/test/test_NGSLM.py | 1 | 1035 | from splearn import loadata, accmse, NGSLM
NonGroupedLoss = ['squarederror', #regression
'huberloss', #regression
'logit', #classification
'modifiedhuberloss', #classification
'huberizedhinge', #classification
'squaredhinge' #classification
]
NonGroupedRegu = ['lasso',
'elasticnet',
'fusedlasso']
########################## Parameters #################################
loss = 'huberloss'
delta = 10
regu = 'lasso'
reargs = {'lam':30}
########################################################################
if loss in ['squarederror', 'huberloss']:
fname = 'dataset/regudata'
calmod = 'mse'
else:
fname = 'dataset/clasdata'
calmod = 'acc'
########################################################################
# load data set
data, target = loadata(fname)
# learning and predict ...
LS = NGSLM(loss=loss, delta=delta, regu=regu, reargs=reargs)
LS.fit(data, target)
pred = LS.predict(data)
res = accmse(target, pred, calmod=calmod)
print LS.coeff_
print res
| gpl-2.0 |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/lib/sspicon.py | 21 | 16178 | # Generated by h2py from c:\microsoft sdk\include\sspi.h
ISSP_LEVEL = 32
ISSP_MODE = 1
ISSP_LEVEL = 32
ISSP_MODE = 0
ISSP_LEVEL = 32
ISSP_MODE = 1
def SEC_SUCCESS(Status): return ((Status) >= 0)
SECPKG_FLAG_INTEGRITY = 1
SECPKG_FLAG_PRIVACY = 2
SECPKG_FLAG_TOKEN_ONLY = 4
SECPKG_FLAG_DATAGRAM = 8
SECPKG_FLAG_CONNECTION = 16
SECPKG_FLAG_MULTI_REQUIRED = 32
SECPKG_FLAG_CLIENT_ONLY = 64
SECPKG_FLAG_EXTENDED_ERROR = 128
SECPKG_FLAG_IMPERSONATION = 256
SECPKG_FLAG_ACCEPT_WIN32_NAME = 512
SECPKG_FLAG_STREAM = 1024
SECPKG_FLAG_NEGOTIABLE = 2048
SECPKG_FLAG_GSS_COMPATIBLE = 4096
SECPKG_FLAG_LOGON = 8192
SECPKG_FLAG_ASCII_BUFFERS = 16384
SECPKG_FLAG_FRAGMENT = 32768
SECPKG_FLAG_MUTUAL_AUTH = 65536
SECPKG_FLAG_DELEGATION = 131072
SECPKG_FLAG_READONLY_WITH_CHECKSUM = 262144
SECPKG_ID_NONE = 65535
SECBUFFER_VERSION = 0
SECBUFFER_EMPTY = 0
SECBUFFER_DATA = 1
SECBUFFER_TOKEN = 2
SECBUFFER_PKG_PARAMS = 3
SECBUFFER_MISSING = 4
SECBUFFER_EXTRA = 5
SECBUFFER_STREAM_TRAILER = 6
SECBUFFER_STREAM_HEADER = 7
SECBUFFER_NEGOTIATION_INFO = 8
SECBUFFER_PADDING = 9
SECBUFFER_STREAM = 10
SECBUFFER_MECHLIST = 11
SECBUFFER_MECHLIST_SIGNATURE = 12
SECBUFFER_TARGET = 13
SECBUFFER_CHANNEL_BINDINGS = 14
SECBUFFER_ATTRMASK = (-268435456)
SECBUFFER_READONLY = (-2147483648)
SECBUFFER_READONLY_WITH_CHECKSUM = 268435456
SECBUFFER_RESERVED = 1610612736
SECURITY_NATIVE_DREP = 16
SECURITY_NETWORK_DREP = 0
SECPKG_CRED_INBOUND = 1
SECPKG_CRED_OUTBOUND = 2
SECPKG_CRED_BOTH = 3
SECPKG_CRED_DEFAULT = 4
SECPKG_CRED_RESERVED = -268435456
ISC_REQ_DELEGATE = 1
ISC_REQ_MUTUAL_AUTH = 2
ISC_REQ_REPLAY_DETECT = 4
ISC_REQ_SEQUENCE_DETECT = 8
ISC_REQ_CONFIDENTIALITY = 16
ISC_REQ_USE_SESSION_KEY = 32
ISC_REQ_PROMPT_FOR_CREDS = 64
ISC_REQ_USE_SUPPLIED_CREDS = 128
ISC_REQ_ALLOCATE_MEMORY = 256
ISC_REQ_USE_DCE_STYLE = 512
ISC_REQ_DATAGRAM = 1024
ISC_REQ_CONNECTION = 2048
ISC_REQ_CALL_LEVEL = 4096
ISC_REQ_FRAGMENT_SUPPLIED = 8192
ISC_REQ_EXTENDED_ERROR = 16384
ISC_REQ_STREAM = 32768
ISC_REQ_INTEGRITY = 65536
ISC_REQ_IDENTIFY = 131072
ISC_REQ_NULL_SESSION = 262144
ISC_REQ_MANUAL_CRED_VALIDATION = 524288
ISC_REQ_RESERVED1 = 1048576
ISC_REQ_FRAGMENT_TO_FIT = 2097152
ISC_REQ_HTTP = 0x10000000
ISC_RET_DELEGATE = 1
ISC_RET_MUTUAL_AUTH = 2
ISC_RET_REPLAY_DETECT = 4
ISC_RET_SEQUENCE_DETECT = 8
ISC_RET_CONFIDENTIALITY = 16
ISC_RET_USE_SESSION_KEY = 32
ISC_RET_USED_COLLECTED_CREDS = 64
ISC_RET_USED_SUPPLIED_CREDS = 128
ISC_RET_ALLOCATED_MEMORY = 256
ISC_RET_USED_DCE_STYLE = 512
ISC_RET_DATAGRAM = 1024
ISC_RET_CONNECTION = 2048
ISC_RET_INTERMEDIATE_RETURN = 4096
ISC_RET_CALL_LEVEL = 8192
ISC_RET_EXTENDED_ERROR = 16384
ISC_RET_STREAM = 32768
ISC_RET_INTEGRITY = 65536
ISC_RET_IDENTIFY = 131072
ISC_RET_NULL_SESSION = 262144
ISC_RET_MANUAL_CRED_VALIDATION = 524288
ISC_RET_RESERVED1 = 1048576
ISC_RET_FRAGMENT_ONLY = 2097152
ASC_REQ_DELEGATE = 1
ASC_REQ_MUTUAL_AUTH = 2
ASC_REQ_REPLAY_DETECT = 4
ASC_REQ_SEQUENCE_DETECT = 8
ASC_REQ_CONFIDENTIALITY = 16
ASC_REQ_USE_SESSION_KEY = 32
ASC_REQ_ALLOCATE_MEMORY = 256
ASC_REQ_USE_DCE_STYLE = 512
ASC_REQ_DATAGRAM = 1024
ASC_REQ_CONNECTION = 2048
ASC_REQ_CALL_LEVEL = 4096
ASC_REQ_EXTENDED_ERROR = 32768
ASC_REQ_STREAM = 65536
ASC_REQ_INTEGRITY = 131072
ASC_REQ_LICENSING = 262144
ASC_REQ_IDENTIFY = 524288
ASC_REQ_ALLOW_NULL_SESSION = 1048576
ASC_REQ_ALLOW_NON_USER_LOGONS = 2097152
ASC_REQ_ALLOW_CONTEXT_REPLAY = 4194304
ASC_REQ_FRAGMENT_TO_FIT = 8388608
ASC_REQ_FRAGMENT_SUPPLIED = 8192
ASC_REQ_NO_TOKEN = 16777216
ASC_RET_DELEGATE = 1
ASC_RET_MUTUAL_AUTH = 2
ASC_RET_REPLAY_DETECT = 4
ASC_RET_SEQUENCE_DETECT = 8
ASC_RET_CONFIDENTIALITY = 16
ASC_RET_USE_SESSION_KEY = 32
ASC_RET_ALLOCATED_MEMORY = 256
ASC_RET_USED_DCE_STYLE = 512
ASC_RET_DATAGRAM = 1024
ASC_RET_CONNECTION = 2048
ASC_RET_CALL_LEVEL = 8192
ASC_RET_THIRD_LEG_FAILED = 16384
ASC_RET_EXTENDED_ERROR = 32768
ASC_RET_STREAM = 65536
ASC_RET_INTEGRITY = 131072
ASC_RET_LICENSING = 262144
ASC_RET_IDENTIFY = 524288
ASC_RET_NULL_SESSION = 1048576
ASC_RET_ALLOW_NON_USER_LOGONS = 2097152
ASC_RET_ALLOW_CONTEXT_REPLAY = 4194304
ASC_RET_FRAGMENT_ONLY = 8388608
SECPKG_CRED_ATTR_NAMES = 1
SECPKG_ATTR_SIZES = 0
SECPKG_ATTR_NAMES = 1
SECPKG_ATTR_LIFESPAN = 2
SECPKG_ATTR_DCE_INFO = 3
SECPKG_ATTR_STREAM_SIZES = 4
SECPKG_ATTR_KEY_INFO = 5
SECPKG_ATTR_AUTHORITY = 6
SECPKG_ATTR_PROTO_INFO = 7
SECPKG_ATTR_PASSWORD_EXPIRY = 8
SECPKG_ATTR_SESSION_KEY = 9
SECPKG_ATTR_PACKAGE_INFO = 10
SECPKG_ATTR_USER_FLAGS = 11
SECPKG_ATTR_NEGOTIATION_INFO = 12
SECPKG_ATTR_NATIVE_NAMES = 13
SECPKG_ATTR_FLAGS = 14
SECPKG_ATTR_USE_VALIDATED = 15
SECPKG_ATTR_CREDENTIAL_NAME = 16
SECPKG_ATTR_TARGET_INFORMATION = 17
SECPKG_ATTR_ACCESS_TOKEN = 18
SECPKG_ATTR_TARGET = 19
SECPKG_ATTR_AUTHENTICATION_ID = 20
## attributes from schannel.h
SECPKG_ATTR_REMOTE_CERT_CONTEXT = 83
SECPKG_ATTR_LOCAL_CERT_CONTEXT = 84
SECPKG_ATTR_ROOT_STORE = 85
SECPKG_ATTR_SUPPORTED_ALGS = 86
SECPKG_ATTR_CIPHER_STRENGTHS = 87
SECPKG_ATTR_SUPPORTED_PROTOCOLS = 88
SECPKG_ATTR_ISSUER_LIST_EX = 89
SECPKG_ATTR_CONNECTION_INFO = 90
SECPKG_ATTR_EAP_KEY_BLOCK = 91
SECPKG_ATTR_MAPPED_CRED_ATTR = 92
SECPKG_ATTR_SESSION_INFO = 93
SECPKG_ATTR_APP_DATA = 94
SECPKG_NEGOTIATION_COMPLETE = 0
SECPKG_NEGOTIATION_OPTIMISTIC = 1
SECPKG_NEGOTIATION_IN_PROGRESS = 2
SECPKG_NEGOTIATION_DIRECT = 3
SECPKG_NEGOTIATION_TRY_MULTICRED = 4
SECPKG_CONTEXT_EXPORT_RESET_NEW = 1
SECPKG_CONTEXT_EXPORT_DELETE_OLD = 2
SECQOP_WRAP_NO_ENCRYPT = (-2147483647)
SECURITY_ENTRYPOINT_ANSIW = "InitSecurityInterfaceW"
SECURITY_ENTRYPOINT_ANSIA = "InitSecurityInterfaceA"
SECURITY_ENTRYPOINT16 = "INITSECURITYINTERFACEA"
SECURITY_ENTRYPOINT_ANSI = SECURITY_ENTRYPOINT_ANSIW
SECURITY_ENTRYPOINT_ANSI = SECURITY_ENTRYPOINT_ANSIA
SECURITY_ENTRYPOINT = SECURITY_ENTRYPOINT16
SECURITY_ENTRYPOINT_ANSI = SECURITY_ENTRYPOINT16
SECURITY_SUPPORT_PROVIDER_INTERFACE_VERSION = 1
SECURITY_SUPPORT_PROVIDER_INTERFACE_VERSION_2 = 2
SASL_OPTION_SEND_SIZE = 1
SASL_OPTION_RECV_SIZE = 2
SASL_OPTION_AUTHZ_STRING = 3
SASL_OPTION_AUTHZ_PROCESSING = 4
SEC_WINNT_AUTH_IDENTITY_ANSI = 1
SEC_WINNT_AUTH_IDENTITY_UNICODE = 2
SEC_WINNT_AUTH_IDENTITY_VERSION = 512
SEC_WINNT_AUTH_IDENTITY_MARSHALLED = 4
SEC_WINNT_AUTH_IDENTITY_ONLY = 8
SECPKG_OPTIONS_TYPE_UNKNOWN = 0
SECPKG_OPTIONS_TYPE_LSA = 1
SECPKG_OPTIONS_TYPE_SSPI = 2
SECPKG_OPTIONS_PERMANENT = 1
SEC_E_INSUFFICIENT_MEMORY = -2146893056
SEC_E_INVALID_HANDLE = -2146893055
SEC_E_UNSUPPORTED_FUNCTION = -2146893054
SEC_E_TARGET_UNKNOWN = -2146893053
SEC_E_INTERNAL_ERROR = -2146893052
SEC_E_SECPKG_NOT_FOUND = -2146893051
SEC_E_NOT_OWNER = -2146893050
SEC_E_CANNOT_INSTALL = -2146893049
SEC_E_INVALID_TOKEN = -2146893048
SEC_E_CANNOT_PACK = -2146893047
SEC_E_QOP_NOT_SUPPORTED = -2146893046
SEC_E_NO_IMPERSONATION = -2146893045
SEC_E_LOGON_DENIED = -2146893044
SEC_E_UNKNOWN_CREDENTIALS = -2146893043
SEC_E_NO_CREDENTIALS = -2146893042
SEC_E_MESSAGE_ALTERED = -2146893041
SEC_E_OUT_OF_SEQUENCE = -2146893040
SEC_E_NO_AUTHENTICATING_AUTHORITY = -2146893039
SEC_I_CONTINUE_NEEDED = 590610
SEC_I_COMPLETE_NEEDED = 590611
SEC_I_COMPLETE_AND_CONTINUE = 590612
SEC_I_LOCAL_LOGON = 590613
SEC_E_BAD_PKGID = -2146893034
SEC_E_CONTEXT_EXPIRED = -2146893033
SEC_I_CONTEXT_EXPIRED = 590615
SEC_E_INCOMPLETE_MESSAGE = -2146893032
SEC_E_INCOMPLETE_CREDENTIALS = -2146893024
SEC_E_BUFFER_TOO_SMALL = -2146893023
SEC_I_INCOMPLETE_CREDENTIALS = 590624
SEC_I_RENEGOTIATE = 590625
SEC_E_WRONG_PRINCIPAL = -2146893022
SEC_I_NO_LSA_CONTEXT = 590627
SEC_E_TIME_SKEW = -2146893020
SEC_E_UNTRUSTED_ROOT = -2146893019
SEC_E_ILLEGAL_MESSAGE = -2146893018
SEC_E_CERT_UNKNOWN = -2146893017
SEC_E_CERT_EXPIRED = -2146893016
SEC_E_ENCRYPT_FAILURE = -2146893015
SEC_E_DECRYPT_FAILURE = -2146893008
SEC_E_ALGORITHM_MISMATCH = -2146893007
SEC_E_SECURITY_QOS_FAILED = -2146893006
SEC_E_UNFINISHED_CONTEXT_DELETED = -2146893005
SEC_E_NO_TGT_REPLY = -2146893004
SEC_E_NO_IP_ADDRESSES = -2146893003
SEC_E_WRONG_CREDENTIAL_HANDLE = -2146893002
SEC_E_CRYPTO_SYSTEM_INVALID = -2146893001
SEC_E_MAX_REFERRALS_EXCEEDED = -2146893000
SEC_E_MUST_BE_KDC = -2146892999
SEC_E_STRONG_CRYPTO_NOT_SUPPORTED = -2146892998
SEC_E_TOO_MANY_PRINCIPALS = -2146892997
SEC_E_NO_PA_DATA = -2146892996
SEC_E_PKINIT_NAME_MISMATCH = -2146892995
SEC_E_SMARTCARD_LOGON_REQUIRED = -2146892994
SEC_E_SHUTDOWN_IN_PROGRESS = -2146892993
SEC_E_KDC_INVALID_REQUEST = -2146892992
SEC_E_KDC_UNABLE_TO_REFER = -2146892991
SEC_E_KDC_UNKNOWN_ETYPE = -2146892990
SEC_E_UNSUPPORTED_PREAUTH = -2146892989
SEC_E_DELEGATION_REQUIRED = -2146892987
SEC_E_BAD_BINDINGS = -2146892986
SEC_E_MULTIPLE_ACCOUNTS = -2146892985
SEC_E_NO_KERB_KEY = -2146892984
ERROR_IPSEC_QM_POLICY_EXISTS = 13000L
ERROR_IPSEC_QM_POLICY_NOT_FOUND = 13001L
ERROR_IPSEC_QM_POLICY_IN_USE = 13002L
ERROR_IPSEC_MM_POLICY_EXISTS = 13003L
ERROR_IPSEC_MM_POLICY_NOT_FOUND = 13004L
ERROR_IPSEC_MM_POLICY_IN_USE = 13005L
ERROR_IPSEC_MM_FILTER_EXISTS = 13006L
ERROR_IPSEC_MM_FILTER_NOT_FOUND = 13007L
ERROR_IPSEC_TRANSPORT_FILTER_EXISTS = 13008L
ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND = 13009L
ERROR_IPSEC_MM_AUTH_EXISTS = 13010L
ERROR_IPSEC_MM_AUTH_NOT_FOUND = 13011L
ERROR_IPSEC_MM_AUTH_IN_USE = 13012L
ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND = 13013L
ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND = 13014L
ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND = 13015L
ERROR_IPSEC_TUNNEL_FILTER_EXISTS = 13016L
ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND = 13017L
ERROR_IPSEC_MM_FILTER_PENDING_DELETION = 13018L
ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION = 13019L
ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION = 13020L
ERROR_IPSEC_MM_POLICY_PENDING_DELETION = 13021L
ERROR_IPSEC_MM_AUTH_PENDING_DELETION = 13022L
ERROR_IPSEC_QM_POLICY_PENDING_DELETION = 13023L
WARNING_IPSEC_MM_POLICY_PRUNED = 13024L
WARNING_IPSEC_QM_POLICY_PRUNED = 13025L
ERROR_IPSEC_IKE_NEG_STATUS_BEGIN = 13800L
ERROR_IPSEC_IKE_AUTH_FAIL = 13801L
ERROR_IPSEC_IKE_ATTRIB_FAIL = 13802L
ERROR_IPSEC_IKE_NEGOTIATION_PENDING = 13803L
ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR = 13804L
ERROR_IPSEC_IKE_TIMED_OUT = 13805L
ERROR_IPSEC_IKE_NO_CERT = 13806L
ERROR_IPSEC_IKE_SA_DELETED = 13807L
ERROR_IPSEC_IKE_SA_REAPED = 13808L
ERROR_IPSEC_IKE_MM_ACQUIRE_DROP = 13809L
ERROR_IPSEC_IKE_QM_ACQUIRE_DROP = 13810L
ERROR_IPSEC_IKE_QUEUE_DROP_MM = 13811L
ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM = 13812L
ERROR_IPSEC_IKE_DROP_NO_RESPONSE = 13813L
ERROR_IPSEC_IKE_MM_DELAY_DROP = 13814L
ERROR_IPSEC_IKE_QM_DELAY_DROP = 13815L
ERROR_IPSEC_IKE_ERROR = 13816L
ERROR_IPSEC_IKE_CRL_FAILED = 13817L
ERROR_IPSEC_IKE_INVALID_KEY_USAGE = 13818L
ERROR_IPSEC_IKE_INVALID_CERT_TYPE = 13819L
ERROR_IPSEC_IKE_NO_PRIVATE_KEY = 13820L
ERROR_IPSEC_IKE_DH_FAIL = 13822L
ERROR_IPSEC_IKE_INVALID_HEADER = 13824L
ERROR_IPSEC_IKE_NO_POLICY = 13825L
ERROR_IPSEC_IKE_INVALID_SIGNATURE = 13826L
ERROR_IPSEC_IKE_KERBEROS_ERROR = 13827L
ERROR_IPSEC_IKE_NO_PUBLIC_KEY = 13828L
ERROR_IPSEC_IKE_PROCESS_ERR = 13829L
ERROR_IPSEC_IKE_PROCESS_ERR_SA = 13830L
ERROR_IPSEC_IKE_PROCESS_ERR_PROP = 13831L
ERROR_IPSEC_IKE_PROCESS_ERR_TRANS = 13832L
ERROR_IPSEC_IKE_PROCESS_ERR_KE = 13833L
ERROR_IPSEC_IKE_PROCESS_ERR_ID = 13834L
ERROR_IPSEC_IKE_PROCESS_ERR_CERT = 13835L
ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ = 13836L
ERROR_IPSEC_IKE_PROCESS_ERR_HASH = 13837L
ERROR_IPSEC_IKE_PROCESS_ERR_SIG = 13838L
ERROR_IPSEC_IKE_PROCESS_ERR_NONCE = 13839L
ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY = 13840L
ERROR_IPSEC_IKE_PROCESS_ERR_DELETE = 13841L
ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR = 13842L
ERROR_IPSEC_IKE_INVALID_PAYLOAD = 13843L
ERROR_IPSEC_IKE_LOAD_SOFT_SA = 13844L
ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN = 13845L
ERROR_IPSEC_IKE_INVALID_COOKIE = 13846L
ERROR_IPSEC_IKE_NO_PEER_CERT = 13847L
ERROR_IPSEC_IKE_PEER_CRL_FAILED = 13848L
ERROR_IPSEC_IKE_POLICY_CHANGE = 13849L
ERROR_IPSEC_IKE_NO_MM_POLICY = 13850L
ERROR_IPSEC_IKE_NOTCBPRIV = 13851L
ERROR_IPSEC_IKE_SECLOADFAIL = 13852L
ERROR_IPSEC_IKE_FAILSSPINIT = 13853L
ERROR_IPSEC_IKE_FAILQUERYSSP = 13854L
ERROR_IPSEC_IKE_SRVACQFAIL = 13855L
ERROR_IPSEC_IKE_SRVQUERYCRED = 13856L
ERROR_IPSEC_IKE_GETSPIFAIL = 13857L
ERROR_IPSEC_IKE_INVALID_FILTER = 13858L
ERROR_IPSEC_IKE_OUT_OF_MEMORY = 13859L
ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED = 13860L
ERROR_IPSEC_IKE_INVALID_POLICY = 13861L
ERROR_IPSEC_IKE_UNKNOWN_DOI = 13862L
ERROR_IPSEC_IKE_INVALID_SITUATION = 13863L
ERROR_IPSEC_IKE_DH_FAILURE = 13864L
ERROR_IPSEC_IKE_INVALID_GROUP = 13865L
ERROR_IPSEC_IKE_ENCRYPT = 13866L
ERROR_IPSEC_IKE_DECRYPT = 13867L
ERROR_IPSEC_IKE_POLICY_MATCH = 13868L
ERROR_IPSEC_IKE_UNSUPPORTED_ID = 13869L
ERROR_IPSEC_IKE_INVALID_HASH = 13870L
ERROR_IPSEC_IKE_INVALID_HASH_ALG = 13871L
ERROR_IPSEC_IKE_INVALID_HASH_SIZE = 13872L
ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG = 13873L
ERROR_IPSEC_IKE_INVALID_AUTH_ALG = 13874L
ERROR_IPSEC_IKE_INVALID_SIG = 13875L
ERROR_IPSEC_IKE_LOAD_FAILED = 13876L
ERROR_IPSEC_IKE_RPC_DELETE = 13877L
ERROR_IPSEC_IKE_BENIGN_REINIT = 13878L
ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY = 13879L
ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN = 13881L
ERROR_IPSEC_IKE_MM_LIMIT = 13882L
ERROR_IPSEC_IKE_NEGOTIATION_DISABLED = 13883L
ERROR_IPSEC_IKE_NEG_STATUS_END = 13884L
CRYPT_E_MSG_ERROR = ((-2146889727))
CRYPT_E_UNKNOWN_ALGO = ((-2146889726))
CRYPT_E_OID_FORMAT = ((-2146889725))
CRYPT_E_INVALID_MSG_TYPE = ((-2146889724))
CRYPT_E_UNEXPECTED_ENCODING = ((-2146889723))
CRYPT_E_AUTH_ATTR_MISSING = ((-2146889722))
CRYPT_E_HASH_VALUE = ((-2146889721))
CRYPT_E_INVALID_INDEX = ((-2146889720))
CRYPT_E_ALREADY_DECRYPTED = ((-2146889719))
CRYPT_E_NOT_DECRYPTED = ((-2146889718))
CRYPT_E_RECIPIENT_NOT_FOUND = ((-2146889717))
CRYPT_E_CONTROL_TYPE = ((-2146889716))
CRYPT_E_ISSUER_SERIALNUMBER = ((-2146889715))
CRYPT_E_SIGNER_NOT_FOUND = ((-2146889714))
CRYPT_E_ATTRIBUTES_MISSING = ((-2146889713))
CRYPT_E_STREAM_MSG_NOT_READY = ((-2146889712))
CRYPT_E_STREAM_INSUFFICIENT_DATA = ((-2146889711))
CRYPT_I_NEW_PROTECTION_REQUIRED = (593938)
CRYPT_E_BAD_LEN = ((-2146885631))
CRYPT_E_BAD_ENCODE = ((-2146885630))
CRYPT_E_FILE_ERROR = ((-2146885629))
CRYPT_E_NOT_FOUND = ((-2146885628))
CRYPT_E_EXISTS = ((-2146885627))
CRYPT_E_NO_PROVIDER = ((-2146885626))
CRYPT_E_SELF_SIGNED = ((-2146885625))
CRYPT_E_DELETED_PREV = ((-2146885624))
CRYPT_E_NO_MATCH = ((-2146885623))
CRYPT_E_UNEXPECTED_MSG_TYPE = ((-2146885622))
CRYPT_E_NO_KEY_PROPERTY = ((-2146885621))
CRYPT_E_NO_DECRYPT_CERT = ((-2146885620))
CRYPT_E_BAD_MSG = ((-2146885619))
CRYPT_E_NO_SIGNER = ((-2146885618))
CRYPT_E_PENDING_CLOSE = ((-2146885617))
CRYPT_E_REVOKED = ((-2146885616))
CRYPT_E_NO_REVOCATION_DLL = ((-2146885615))
CRYPT_E_NO_REVOCATION_CHECK = ((-2146885614))
CRYPT_E_REVOCATION_OFFLINE = ((-2146885613))
CRYPT_E_NOT_IN_REVOCATION_DATABASE = ((-2146885612))
CRYPT_E_INVALID_NUMERIC_STRING = ((-2146885600))
CRYPT_E_INVALID_PRINTABLE_STRING = ((-2146885599))
CRYPT_E_INVALID_IA5_STRING = ((-2146885598))
CRYPT_E_INVALID_X500_STRING = ((-2146885597))
CRYPT_E_NOT_CHAR_STRING = ((-2146885596))
CRYPT_E_FILERESIZED = ((-2146885595))
CRYPT_E_SECURITY_SETTINGS = ((-2146885594))
CRYPT_E_NO_VERIFY_USAGE_DLL = ((-2146885593))
CRYPT_E_NO_VERIFY_USAGE_CHECK = ((-2146885592))
CRYPT_E_VERIFY_USAGE_OFFLINE = ((-2146885591))
CRYPT_E_NOT_IN_CTL = ((-2146885590))
CRYPT_E_NO_TRUSTED_SIGNER = ((-2146885589))
CRYPT_E_MISSING_PUBKEY_PARA = ((-2146885588))
CRYPT_E_OSS_ERROR = ((-2146881536))
## Kerberos message types for LsaCallAuthenticationPackage (from ntsecapi.h)
KerbDebugRequestMessage = 0
KerbQueryTicketCacheMessage = 1
KerbChangeMachinePasswordMessage = 2
KerbVerifyPacMessage = 3
KerbRetrieveTicketMessage = 4
KerbUpdateAddressesMessage = 5
KerbPurgeTicketCacheMessage = 6
KerbChangePasswordMessage = 7
KerbRetrieveEncodedTicketMessage = 8
KerbDecryptDataMessage = 9
KerbAddBindingCacheEntryMessage = 10
KerbSetPasswordMessage = 11
KerbSetPasswordExMessage = 12
KerbVerifyCredentialsMessage = 13
KerbQueryTicketCacheExMessage = 14
KerbPurgeTicketCacheExMessage = 15
KerbRefreshSmartcardCredentialsMessage = 16
KerbAddExtraCredentialsMessage = 17
KerbQuerySupplementalCredentialsMessage = 18
## messages used with msv1_0 from ntsecapi.h
MsV1_0Lm20ChallengeRequest = 0
MsV1_0Lm20GetChallengeResponse = 1
MsV1_0EnumerateUsers = 2
MsV1_0GetUserInfo = 3
MsV1_0ReLogonUsers = 4
MsV1_0ChangePassword = 5
MsV1_0ChangeCachedPassword = 6
MsV1_0GenericPassthrough = 7
MsV1_0CacheLogon = 8
MsV1_0SubAuth = 9
MsV1_0DeriveCredential = 10
MsV1_0CacheLookup = 11
MsV1_0SetProcessOption = 12
SEC_E_OK = 0
| apache-2.0 |
drewtalati/talaticoin | qa/rpc-tests/rest.py | 128 | 3258 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework import BitcoinTestFramework
from util import *
import json
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def http_get_call(host, port, path, response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
bb_hash = self.nodes[0].getbestblockhash()
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check json format
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['hash'], bb_hash)
# do tx test
tx_hash = json_obj['tx'][0]['txid'];
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# now mine the transactions
newblockhash = self.nodes[1].setgenerate(True, 1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
if __name__ == '__main__':
RESTTest ().main ()
| mit |
mozilla/build-relengapi | relengapi/lib/aws.py | 3 | 5194 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import importlib
import json
import logging
import threading
import time
import boto
import structlog
import wsme.rest.json
from boto.sqs import message as sqs_message
logger = structlog.get_logger()
class _StopListening(Exception):
pass
class AWS(object):
def __init__(self, config):
self.config = config
self._connections = {}
self._queues = {}
self._listeners = []
def connect_to(self, service_name, region_name):
key = service_name, region_name
if key in self._connections:
return self._connections[key]
# handle special cases
try:
fn = getattr(self, 'connect_to_' + service_name)
except AttributeError:
fn = self.connect_to_default
conn = fn(service_name, region_name)
self._connections[key] = conn
return conn
def connect_to_default(self, service_name, region_name):
# for the service, import 'boto.$service'
service = importlib.import_module('boto.' + service_name)
for region in service.regions():
if region.name == region_name:
break
else:
raise RuntimeError("invalid region %r" % (region_name,))
connect_fn = getattr(boto, 'connect_' + service_name)
return connect_fn(
aws_access_key_id=self.config.get('access_key_id'),
aws_secret_access_key=self.config.get('secret_access_key'),
region=region)
def connect_to_s3(self, service_name, region_name):
# special case for S3, which boto does differently than
# the other services
import boto.s3
return boto.s3.connect_to_region(region_name=region_name,
aws_access_key_id=self.config.get('access_key_id'),
aws_secret_access_key=self.config.get('secret_access_key'))
def get_sqs_queue(self, region_name, queue_name):
key = (region_name, queue_name)
if key in self._queues:
return self._queues[key]
sqs = self.connect_to('sqs', region_name)
queue = sqs.get_queue(queue_name)
if not queue:
raise RuntimeError("no such queue %r in %s" %
(queue_name, region_name))
self._queues[key] = queue
return queue
def sqs_write(self, region_name, queue_name, body):
body = wsme.rest.json.tojson(type(body), body)
queue = self.get_sqs_queue(region_name, queue_name)
m = sqs_message.Message(body=json.dumps(body))
queue.write(m)
def sqs_listen(self, region_name, queue_name, read_args=None):
def decorate(func):
self._listeners.append(
(region_name, queue_name, read_args or {}, func))
return func
return decorate
def _listen_thd(self, region_name, queue_name, read_args, listener):
logger.info(
"Listening to SQS queue %r in region %s", queue_name, region_name)
try:
queue = self.get_sqs_queue(region_name, queue_name)
except Exception:
logger.exception("While getting queue %r in region %s; listening cancelled",
queue_name, region_name)
return
while True:
msg = queue.read(wait_time_seconds=20, **read_args)
if msg:
try:
listener(msg)
except _StopListening: # for tests
break
except Exception:
logger.exception("while invoking %r", listener)
# note that we do nothing with the message; it will
# remain invisible for a while, then reappear and maybe
# cause another exception
continue
msg.delete()
def _spawn_sqs_listeners(self, _testing=False):
# launch a listening thread for each SQS queue
threads = []
for region_name, queue_name, read_args, listener in self._listeners:
thd = threading.Thread(
name="%s/%r -> %r" % (region_name, queue_name, listener),
target=self._listen_thd,
args=(region_name, queue_name, read_args, listener))
# set the thread to daemon so that SIGINT will kill the process
thd.daemon = True
thd.start()
threads.append(thd)
# sleep forever, or until we get a SIGINT, at which point the remaining
# threads will be killed during process shutdown
if not _testing: # pragma: no cover
while True:
time.sleep(2 ** 31)
return threads
def init_app(app):
app.aws = AWS(app.config.get('AWS', {}))
# disable boto debug logging unless DEBUG = True
if not app.debug:
logging.getLogger('boto').setLevel(logging.INFO)
| mpl-2.0 |
nmittler/grpc | src/python/interop/setup.py | 1 | 2029 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A setup module for the GRPC Python interop testing package."""
from distutils import core as _core
_PACKAGES = (
'interop',
)
_PACKAGE_DIRECTORIES = {
'interop': 'interop',
}
_PACKAGE_DATA = {
'interop': ['credentials/server1.key', 'credentials/server1.pem',]
}
_INSTALL_REQUIRES = ['grpc-2015>=0.0.1']
_core.setup(
name='interop', version='0.0.1', packages=_PACKAGES,
package_dir=_PACKAGE_DIRECTORIES, package_data=_PACKAGE_DATA,
install_requires=_INSTALL_REQUIRES)
| bsd-3-clause |
JianyuWang/nova | nova/tests/functional/api_sample_tests/test_config_drive.py | 21 | 3002 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import test_servers
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class ConfigDriveSampleJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-config-drive'
def _get_flags(self):
f = super(ConfigDriveSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.config_drive.Config_drive')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips.Extended_ips')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips_mac.'
'Extended_ips_mac')
return f
def setUp(self):
super(ConfigDriveSampleJsonTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fake.stub_out_image_service(self.stubs)
def test_config_drive_show(self):
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
# config drive can be a string for True or empty value for False
subs['cdrive'] = '.*'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('server-config-drive-get-resp', subs,
response, 200)
def test_config_drive_detail(self):
self._post_server(use_common_server_api_samples=False)
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
# config drive can be a string for True or empty value for False
subs['cdrive'] = '.*'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('servers-config-drive-details-resp',
subs, response, 200)
| apache-2.0 |
haya14busa/alc-etm-searcher | nltk-3.0a3/build/lib/nltk/corpus/reader/wordnet.py | 2 | 69558 | # Natural Language Toolkit: WordNet
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Steven Bethard <[email protected]>
# Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# Nitin Madnani <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
import math
import re
from itertools import islice, chain
from operator import itemgetter, attrgetter
from collections import defaultdict
from nltk.corpus.reader import CorpusReader
from nltk.util import binary_search_file as _binary_search_file
from nltk.probability import FreqDist
from nltk.compat import xrange, python_2_unicode_compatible, total_ordering
######################################################################
## Table of Contents
######################################################################
## - Constants
## - Data Classes
## - WordNetError
## - Lemma
## - Synset
## - WordNet Corpus Reader
## - WordNet Information Content Corpus Reader
## - Similarity Metrics
## - Demo
######################################################################
## Constants
######################################################################
#: Positive infinity (for similarity functions)
_INF = 1e300
#{ Part-of-speech constants
ADJ, ADJ_SAT, ADV, NOUN, VERB = 'a', 's', 'r', 'n', 'v'
#}
POS_LIST = [NOUN, VERB, ADJ, ADV]
#: A table of strings that are used to express verb frames.
VERB_FRAME_STRINGS = (
None,
"Something %s",
"Somebody %s",
"It is %sing",
"Something is %sing PP",
"Something %s something Adjective/Noun",
"Something %s Adjective/Noun",
"Somebody %s Adjective",
"Somebody %s something",
"Somebody %s somebody",
"Something %s somebody",
"Something %s something",
"Something %s to somebody",
"Somebody %s on something",
"Somebody %s somebody something",
"Somebody %s something to somebody",
"Somebody %s something from somebody",
"Somebody %s somebody with something",
"Somebody %s somebody of something",
"Somebody %s something on somebody",
"Somebody %s somebody PP",
"Somebody %s something PP",
"Somebody %s PP",
"Somebody's (body part) %s",
"Somebody %s somebody to INFINITIVE",
"Somebody %s somebody INFINITIVE",
"Somebody %s that CLAUSE",
"Somebody %s to somebody",
"Somebody %s to INFINITIVE",
"Somebody %s whether INFINITIVE",
"Somebody %s somebody into V-ing something",
"Somebody %s something with something",
"Somebody %s INFINITIVE",
"Somebody %s VERB-ing",
"It %s that CLAUSE",
"Something %s INFINITIVE")
SENSENUM_RE = re.compile(r'\.\d\d\.')
######################################################################
## Data Classes
######################################################################
class WordNetError(Exception):
"""An exception class for wordnet-related errors."""
@total_ordering
class _WordNetObject(object):
"""A common base class for lemmas and synsets."""
def hypernyms(self):
return self._related('@')
def instance_hypernyms(self):
return self._related('@i')
def hyponyms(self):
return self._related('~')
def instance_hyponyms(self):
return self._related('~i')
def member_holonyms(self):
return self._related('#m')
def substance_holonyms(self):
return self._related('#s')
def part_holonyms(self):
return self._related('#p')
def member_meronyms(self):
return self._related('%m')
def substance_meronyms(self):
return self._related('%s')
def part_meronyms(self):
return self._related('%p')
def topic_domains(self):
return self._related(';c')
def region_domains(self):
return self._related(';r')
def usage_domains(self):
return self._related(';u')
def attributes(self):
return self._related('=')
def entailments(self):
return self._related('*')
def causes(self):
return self._related('>')
def also_sees(self):
return self._related('^')
def verb_groups(self):
return self._related('$')
def similar_tos(self):
return self._related('&')
def __hash__(self):
return hash(self._name)
def __eq__(self, other):
return self._name == other._name
def __ne__(self, other):
return self._name != other._name
def __lt__(self, other):
return self._name < other._name
@python_2_unicode_compatible
class Lemma(_WordNetObject):
"""
The lexical entry for a single morphological form of a
sense-disambiguated word.
Create a Lemma from a "<word>.<pos>.<number>.<lemma>" string where:
<word> is the morphological stem identifying the synset
<pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
<number> is the sense number, counting from 0.
<lemma> is the morphological form of interest
Note that <word> and <lemma> can be different, e.g. the Synset
'salt.n.03' has the Lemmas 'salt.n.03.salt', 'salt.n.03.saltiness' and
'salt.n.03.salinity'.
Lemma attributes:
- name: The canonical name of this lemma.
- synset: The synset that this lemma belongs to.
- syntactic_marker: For adjectives, the WordNet string identifying the
syntactic position relative modified noun. See:
http://wordnet.princeton.edu/man/wninput.5WN.html#sect10
For all other parts of speech, this attribute is None.
Lemma methods:
Lemmas have the following methods for retrieving related Lemmas. They
correspond to the names for the pointer symbols defined here:
http://wordnet.princeton.edu/man/wninput.5WN.html#sect3
These methods all return lists of Lemmas:
- antonyms
- hypernyms, instance_hypernyms
- hyponyms, instance_hyponyms
- member_holonyms, substance_holonyms, part_holonyms
- member_meronyms, substance_meronyms, part_meronyms
- topic_domains, region_domains, usage_domains
- attributes
- derivationally_related_forms
- entailments
- causes
- also_sees
- verb_groups
- similar_tos
- pertainyms
"""
__slots__ = ['_wordnet_corpus_reader', '_name', '_syntactic_marker',
'_synset', '_frame_strings', '_frame_ids',
'_lexname_index', '_lex_id', '_key']
def __init__(self, wordnet_corpus_reader, synset, name,
lexname_index, lex_id, syntactic_marker):
self._wordnet_corpus_reader = wordnet_corpus_reader
self._name = name
self._syntactic_marker = syntactic_marker
self._synset = synset
self._frame_strings = []
self._frame_ids = []
self._lexname_index = lexname_index
self._lex_id = lex_id
self._key = None # gets set later.
def name(self):
return self._name
def syntactic_marker(self):
return self._syntactic_marker
def synset(self):
return self._synset
def frame_strings(self):
return self._frame_strings
def frame_ids(self):
return self._frame_ids
def key(self):
return self._key
def __repr__(self):
tup = type(self).__name__, self._synset._name, self._name
return "%s('%s.%s')" % tup
def _related(self, relation_symbol):
get_synset = self._wordnet_corpus_reader._synset_from_pos_and_offset
return sorted([get_synset(pos, offset)._lemmas[lemma_index]
for pos, offset, lemma_index
in self._synset._lemma_pointers[self._name, relation_symbol]])
def count(self):
"""Return the frequency count for this Lemma"""
return self._wordnet_corpus_reader.lemma_count(self)
def antonyms(self):
return self._related('!')
def derivationally_related_forms(self):
return self._related('+')
def pertainyms(self):
return self._related('\\')
@python_2_unicode_compatible
class Synset(_WordNetObject):
"""Create a Synset from a "<lemma>.<pos>.<number>" string where:
<lemma> is the word's morphological stem
<pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
<number> is the sense number, counting from 0.
Synset attributes:
- name: The canonical name of this synset, formed using the first lemma
of this synset. Note that this may be different from the name
passed to the constructor if that string used a different lemma to
identify the synset.
- pos: The synset's part of speech, matching one of the module level
attributes ADJ, ADJ_SAT, ADV, NOUN or VERB.
- lemmas: A list of the Lemma objects for this synset.
- definition: The definition for this synset.
- examples: A list of example strings for this synset.
- offset: The offset in the WordNet dict file of this synset.
- #lexname: The name of the lexicographer file containing this synset.
Synset methods:
Synsets have the following methods for retrieving related Synsets.
They correspond to the names for the pointer symbols defined here:
http://wordnet.princeton.edu/man/wninput.5WN.html#sect3
These methods all return lists of Synsets.
- hypernyms, instance_hypernyms
- hyponyms, instance_hyponyms
- member_holonyms, substance_holonyms, part_holonyms
- member_meronyms, substance_meronyms, part_meronyms
- attributes
- entailments
- causes
- also_sees
- verb_groups
- similar_tos
Additionally, Synsets support the following methods specific to the
hypernym relation:
- root_hypernyms
- common_hypernyms
- lowest_common_hypernyms
Note that Synsets do not support the following relations because
these are defined by WordNet as lexical relations:
- antonyms
- derivationally_related_forms
- pertainyms
"""
__slots__ = ['_pos', '_offset', '_name', '_frame_ids',
'_lemmas', '_lemma_names',
'_definition', '_examples', '_lexname',
'_pointers', '_lemma_pointers', '_max_depth',
'_min_depth', ]
def __init__(self, wordnet_corpus_reader):
self._wordnet_corpus_reader = wordnet_corpus_reader
# All of these attributes get initialized by
# WordNetCorpusReader._synset_from_pos_and_line()
self._pos = None
self._offset = None
self._name = None
self._frame_ids = []
self._lemmas = []
self._lemma_names = []
self._definition = None
self._examples = []
self._lexname = None # lexicographer name
self._pointers = defaultdict(set)
self._lemma_pointers = defaultdict(set)
def pos(self):
return self._pos
def offset(self):
return self._offset
def name(self):
return self._name
def frame_ids(self):
return self._frame_ids
def lemmas(self):
return self._lemmas
def lemma_names(self):
return self._lemma_names
def definition(self):
return self._definition
def examples(self):
return self._examples
def lexname(self):
return self._lexname
def _needs_root(self):
if self._pos == NOUN:
if self._wordnet_corpus_reader.get_version() == '1.6':
return True
else:
return False
elif self._pos == VERB:
return True
def root_hypernyms(self):
"""Get the topmost hypernyms of this synset in WordNet."""
result = []
seen = set()
todo = [self]
while todo:
next_synset = todo.pop()
if next_synset not in seen:
seen.add(next_synset)
next_hypernyms = next_synset.hypernyms() + \
next_synset.instance_hypernyms()
if not next_hypernyms:
result.append(next_synset)
else:
todo.extend(next_hypernyms)
return result
# Simpler implementation which makes incorrect assumption that
# hypernym hierarchy is acyclic:
#
# if not self.hypernyms():
# return [self]
# else:
# return list(set(root for h in self.hypernyms()
# for root in h.root_hypernyms()))
def max_depth(self):
"""
:return: The length of the longest hypernym path from this
synset to the root.
"""
if "_max_depth" not in self.__dict__:
hypernyms = self.hypernyms() + self.instance_hypernyms()
if not hypernyms:
self._max_depth = 0
else:
self._max_depth = 1 + max(h.max_depth() for h in hypernyms)
return self._max_depth
def min_depth(self):
"""
:return: The length of the shortest hypernym path from this
synset to the root.
"""
if "_min_depth" not in self.__dict__:
hypernyms = self.hypernyms() + self.instance_hypernyms()
if not hypernyms:
self._min_depth = 0
else:
self._min_depth = 1 + min(h.min_depth() for h in hypernyms)
return self._min_depth
def closure(self, rel, depth=-1):
"""Return the transitive closure of source under the rel
relationship, breadth-first
>>> from nltk.corpus import wordnet as wn
>>> dog = wn.synset('dog.n.01')
>>> hyp = lambda s:s.hypernyms()
>>> list(dog.closure(hyp))
[Synset('canine.n.02'), Synset('domestic_animal.n.01'),
Synset('carnivore.n.01'), Synset('animal.n.01'),
Synset('placental.n.01'), Synset('organism.n.01'),
Synset('mammal.n.01'), Synset('living_thing.n.01'),
Synset('vertebrate.n.01'), Synset('whole.n.02'),
Synset('chordate.n.01'), Synset('object.n.01'),
Synset('physical_entity.n.01'), Synset('entity.n.01')]
"""
from nltk.util import breadth_first
synset_offsets = []
for synset in breadth_first(self, rel, depth):
if synset._offset != self._offset:
if synset._offset not in synset_offsets:
synset_offsets.append(synset._offset)
yield synset
def hypernym_paths(self):
"""
Get the path(s) from this synset to the root, where each path is a
list of the synset nodes traversed on the way to the root.
:return: A list of lists, where each list gives the node sequence
connecting the initial ``Synset`` node and a root node.
"""
paths = []
hypernyms = self.hypernyms() + self.instance_hypernyms()
if len(hypernyms) == 0:
paths = [[self]]
for hypernym in hypernyms:
for ancestor_list in hypernym.hypernym_paths():
ancestor_list.append(self)
paths.append(ancestor_list)
return paths
def common_hypernyms(self, other):
"""
Find all synsets that are hypernyms of this synset and the
other synset.
:type other: Synset
:param other: other input synset.
:return: The synsets that are hypernyms of both synsets.
"""
self_synsets = set(self_synset
for self_synsets in self._iter_hypernym_lists()
for self_synset in self_synsets)
other_synsets = set(other_synset
for other_synsets in other._iter_hypernym_lists()
for other_synset in other_synsets)
return list(self_synsets.intersection(other_synsets))
def lowest_common_hypernyms(self, other, simulate_root=False, use_min_depth=False):
"""
Get a list of lowest synset(s) that both synsets have as a hypernym.
When `use_min_depth == False` this means that the synset which appears as a
hypernym of both `self` and `other` with the lowest maximum depth is returned
or if there are multiple such synsets at the same depth they are all returned
However, if `use_min_depth == True` then the synset(s) which has/have the lowest
minimum depth and appear(s) in both paths is/are returned.
By setting the use_min_depth flag to True, the behavior of NLTK2 can be preserved.
This was changed in NLTK3 to give more accurate results in a small set of cases,
generally with synsets concerning people. (eg: 'chef.n.01', 'fireman.n.01', etc.)
This method is an implementation of Ted Pedersen's "Lowest Common Subsumer" method
from the Perl Wordnet module. It can return either "self" or "other" if they are a
hypernym of the other.
:type other: Synset
:param other: other input synset
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (False by default)
creates a fake root that connects all the taxonomies. Set it
to True to enable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will need to be added
for nouns as well.
:type use_min_depth: bool
:param use_min_depth: This setting mimics older (v2) behavior of NLTK wordnet
If True, will use the min_depth function to calculate the lowest common
hypernyms. This is known to give strange results for some synset pairs
(eg: 'chef.n.01', 'fireman.n.01') but is retained for backwards compatibility
:return: The synsets that are the lowest common hypernyms of both synsets
"""
fake_synset = Synset(None)
fake_synset._name = '*ROOT*'
fake_synset.hypernyms = lambda: []
fake_synset.instance_hypernyms = lambda: []
if simulate_root:
self_hypernyms = chain(self._iter_hypernym_lists(), [[fake_synset]])
other_hypernyms = chain(other._iter_hypernym_lists(), [[fake_synset]])
else:
self_hypernyms = self._iter_hypernym_lists()
other_hypernyms = other._iter_hypernym_lists()
synsets = set(s for synsets in self_hypernyms for s in synsets)
others = set(s for synsets in other_hypernyms for s in synsets)
synsets.intersection_update(others)
try:
if use_min_depth:
max_depth = max(s.min_depth() for s in synsets)
unsorted_lch = [s for s in synsets if s.min_depth() == max_depth]
else:
max_depth = max(s.max_depth() for s in synsets)
unsorted_lch = [s for s in synsets if s.max_depth() == max_depth]
return sorted(unsorted_lch)
except ValueError:
return []
def hypernym_distances(self, distance=0, simulate_root=False):
"""
Get the path(s) from this synset to the root, counting the distance
of each node from the initial node on the way. A set of
(synset, distance) tuples is returned.
:type distance: int
:param distance: the distance (number of edges) from this hypernym to
the original hypernym ``Synset`` on which this method was called.
:return: A set of ``(Synset, int)`` tuples where each ``Synset`` is
a hypernym of the first ``Synset``.
"""
distances = set([(self, distance)])
for hypernym in self.hypernyms() + self.instance_hypernyms():
distances |= hypernym.hypernym_distances(distance+1, simulate_root=False)
if simulate_root:
fake_synset = Synset(None)
fake_synset._name = '*ROOT*'
fake_synset_distance = max(distances, key=itemgetter(1))[1]
distances.add((fake_synset, fake_synset_distance+1))
return distances
def shortest_path_distance(self, other, simulate_root=False):
"""
Returns the distance of the shortest path linking the two synsets (if
one exists). For each synset, all the ancestor nodes and their
distances are recorded and compared. The ancestor node common to both
synsets that can be reached with the minimum number of traversals is
used. If no ancestor nodes are common, None is returned. If a node is
compared with itself 0 is returned.
:type other: Synset
:param other: The Synset to which the shortest path will be found.
:return: The number of edges in the shortest path connecting the two
nodes, or None if no path exists.
"""
if self == other:
return 0
path_distance = None
dist_list1 = self.hypernym_distances(simulate_root=simulate_root)
dist_dict1 = {}
dist_list2 = other.hypernym_distances(simulate_root=simulate_root)
dist_dict2 = {}
# Transform each distance list into a dictionary. In cases where
# there are duplicate nodes in the list (due to there being multiple
# paths to the root) the duplicate with the shortest distance from
# the original node is entered.
for (l, d) in [(dist_list1, dist_dict1), (dist_list2, dist_dict2)]:
for (key, value) in l:
if key in d:
if value < d[key]:
d[key] = value
else:
d[key] = value
# For each ancestor synset common to both subject synsets, find the
# connecting path length. Return the shortest of these.
for synset1 in dist_dict1.keys():
for synset2 in dist_dict2.keys():
if synset1 == synset2:
new_distance = dist_dict1[synset1] + dist_dict2[synset2]
if path_distance is None or path_distance < 0 or new_distance < path_distance:
path_distance = new_distance
return path_distance
def tree(self, rel, depth=-1, cut_mark=None):
"""
>>> from nltk.corpus import wordnet as wn
>>> dog = wn.synset('dog.n.01')
>>> hyp = lambda s:s.hypernyms()
>>> from pprint import pprint
>>> pprint(dog.tree(hyp))
[Synset('dog.n.01'),
[Synset('canine.n.02'),
[Synset('carnivore.n.01'),
[Synset('placental.n.01'),
[Synset('mammal.n.01'),
[Synset('vertebrate.n.01'),
[Synset('chordate.n.01'),
[Synset('animal.n.01'),
[Synset('organism.n.01'),
[Synset('living_thing.n.01'),
[Synset('whole.n.02'),
[Synset('object.n.01'),
[Synset('physical_entity.n.01'),
[Synset('entity.n.01')]]]]]]]]]]]]],
[Synset('domestic_animal.n.01'),
[Synset('animal.n.01'),
[Synset('organism.n.01'),
[Synset('living_thing.n.01'),
[Synset('whole.n.02'),
[Synset('object.n.01'),
[Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]]
"""
tree = [self]
if depth != 0:
tree += [x.tree(rel, depth-1, cut_mark) for x in rel(self)]
elif cut_mark:
tree += [cut_mark]
return tree
# interface to similarity methods
def path_similarity(self, other, verbose=False, simulate_root=True):
"""
Path Distance Similarity:
Return a score denoting how similar two word senses are, based on the
shortest path that connects the senses in the is-a (hypernym/hypnoym)
taxonomy. The score is in the range 0 to 1, except in those cases where
a path cannot be found (will only be true for verbs as there are many
distinct verb taxonomies), in which case None is returned. A score of
1 represents identity i.e. comparing a sense with itself will return 1.
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (True by default)
creates a fake root that connects all the taxonomies. Set it
to false to disable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will be added for nouns
as well.
:return: A score denoting the similarity of the two ``Synset`` objects,
normally between 0 and 1. None is returned if no connecting path
could be found. 1 is returned if a ``Synset`` is compared with
itself.
"""
distance = self.shortest_path_distance(other, simulate_root=simulate_root and self._needs_root())
if distance is None or distance < 0:
return None
return 1.0 / (distance + 1)
def lch_similarity(self, other, verbose=False, simulate_root=True):
"""
Leacock Chodorow Similarity:
Return a score denoting how similar two word senses are, based on the
shortest path that connects the senses (as above) and the maximum depth
of the taxonomy in which the senses occur. The relationship is given as
-log(p/2d) where p is the shortest path length and d is the taxonomy
depth.
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (True by default)
creates a fake root that connects all the taxonomies. Set it
to false to disable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will be added for nouns
as well.
:return: A score denoting the similarity of the two ``Synset`` objects,
normally greater than 0. None is returned if no connecting path
could be found. If a ``Synset`` is compared with itself, the
maximum score is returned, which varies depending on the taxonomy
depth.
"""
if self._pos != other._pos:
raise WordNetError('Computing the lch similarity requires ' + \
'%s and %s to have the same part of speech.' % \
(self, other))
need_root = self._needs_root()
if self._pos not in self._wordnet_corpus_reader._max_depth:
self._wordnet_corpus_reader._compute_max_depth(self._pos, need_root)
depth = self._wordnet_corpus_reader._max_depth[self._pos]
distance = self.shortest_path_distance(other, simulate_root=simulate_root and need_root)
if distance is None or distance < 0 or depth == 0:
return None
return -math.log((distance + 1) / (2.0 * depth))
def wup_similarity(self, other, verbose=False, simulate_root=True):
"""
Wu-Palmer Similarity:
Return a score denoting how similar two word senses are, based on the
depth of the two senses in the taxonomy and that of their Least Common
Subsumer (most specific ancestor node). Previously, the scores computed
by this implementation did _not_ always agree with those given by
Pedersen's Perl implementation of WordNet Similarity. However, with
the addition of the simulate_root flag (see below), the score for
verbs now almost always agree but not always for nouns.
The LCS does not necessarily feature in the shortest path connecting
the two senses, as it is by definition the common ancestor deepest in
the taxonomy, not closest to the two senses. Typically, however, it
will so feature. Where multiple candidates for the LCS exist, that
whose shortest path to the root node is the longest will be selected.
Where the LCS has multiple paths to the root, the longer path is used
for the purposes of the calculation.
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (True by default)
creates a fake root that connects all the taxonomies. Set it
to false to disable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will be added for nouns
as well.
:return: A float score denoting the similarity of the two ``Synset`` objects,
normally greater than zero. If no connecting path between the two
senses can be found, None is returned.
"""
need_root = self._needs_root()
# Note that to preserve behavior from NLTK2 we set use_min_depth=True
# It is possible that more accurate results could be obtained by
# removing this setting and it should be tested later on
subsumers = self.lowest_common_hypernyms(other, simulate_root=simulate_root and need_root, use_min_depth=True)
# If no LCS was found return None
if len(subsumers) == 0:
return None
subsumer = subsumers[0]
# Get the longest path from the LCS to the root,
# including a correction:
# - add one because the calculations include both the start and end
# nodes
depth = subsumer.max_depth() + 1
# Note: No need for an additional add-one correction for non-nouns
# to account for an imaginary root node because that is now automatically
# handled by simulate_root
# if subsumer._pos != NOUN:
# depth += 1
# Get the shortest path from the LCS to each of the synsets it is
# subsuming. Add this to the LCS path length to get the path
# length from each synset to the root.
len1 = self.shortest_path_distance(subsumer, simulate_root=simulate_root and need_root)
len2 = other.shortest_path_distance(subsumer, simulate_root=simulate_root and need_root)
if len1 is None or len2 is None:
return None
len1 += depth
len2 += depth
return (2.0 * depth) / (len1 + len2)
def res_similarity(self, other, ic, verbose=False):
"""
Resnik Similarity:
Return a score denoting how similar two word senses are, based on the
Information Content (IC) of the Least Common Subsumer (most specific
ancestor node).
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type ic: dict
:param ic: an information content object (as returned by ``nltk.corpus.wordnet_ic.ic()``).
:return: A float score denoting the similarity of the two ``Synset`` objects.
Synsets whose LCS is the root node of the taxonomy will have a
score of 0 (e.g. N['dog'][0] and N['table'][0]).
"""
ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
return lcs_ic
def jcn_similarity(self, other, ic, verbose=False):
"""
Jiang-Conrath Similarity:
Return a score denoting how similar two word senses are, based on the
Information Content (IC) of the Least Common Subsumer (most specific
ancestor node) and that of the two input Synsets. The relationship is
given by the equation 1 / (IC(s1) + IC(s2) - 2 * IC(lcs)).
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type ic: dict
:param ic: an information content object (as returned by ``nltk.corpus.wordnet_ic.ic()``).
:return: A float score denoting the similarity of the two ``Synset`` objects.
"""
if self == other:
return _INF
ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
# If either of the input synsets are the root synset, or have a
# frequency of 0 (sparse data problem), return 0.
if ic1 == 0 or ic2 == 0:
return 0
ic_difference = ic1 + ic2 - 2 * lcs_ic
if ic_difference == 0:
return _INF
return 1 / ic_difference
def lin_similarity(self, other, ic, verbose=False):
"""
Lin Similarity:
Return a score denoting how similar two word senses are, based on the
Information Content (IC) of the Least Common Subsumer (most specific
ancestor node) and that of the two input Synsets. The relationship is
given by the equation 2 * IC(lcs) / (IC(s1) + IC(s2)).
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type ic: dict
:param ic: an information content object (as returned by ``nltk.corpus.wordnet_ic.ic()``).
:return: A float score denoting the similarity of the two ``Synset`` objects,
in the range 0 to 1.
"""
ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
return (2.0 * lcs_ic) / (ic1 + ic2)
def _iter_hypernym_lists(self):
"""
:return: An iterator over ``Synset`` objects that are either proper
hypernyms or instance of hypernyms of the synset.
"""
todo = [self]
seen = set()
while todo:
for synset in todo:
seen.add(synset)
yield todo
todo = [hypernym
for synset in todo
for hypernym in (synset.hypernyms() + \
synset.instance_hypernyms())
if hypernym not in seen]
def __repr__(self):
return "%s('%s')" % (type(self).__name__, self._name)
def _related(self, relation_symbol):
get_synset = self._wordnet_corpus_reader._synset_from_pos_and_offset
pointer_tuples = self._pointers[relation_symbol]
return sorted([get_synset(pos, offset) for pos, offset in pointer_tuples])
######################################################################
## WordNet Corpus Reader
######################################################################
class WordNetCorpusReader(CorpusReader):
"""
A corpus reader used to access wordnet or its variants.
"""
_ENCODING = 'utf8'
#{ Part-of-speech constants
ADJ, ADJ_SAT, ADV, NOUN, VERB = 'a', 's', 'r', 'n', 'v'
#}
#{ Filename constants
_FILEMAP = {ADJ: 'adj', ADV: 'adv', NOUN: 'noun', VERB: 'verb'}
#}
#{ Part of speech constants
_pos_numbers = {NOUN: 1, VERB: 2, ADJ: 3, ADV: 4, ADJ_SAT: 5}
_pos_names = dict(tup[::-1] for tup in _pos_numbers.items())
#}
#: A list of file identifiers for all the fileids used by this
#: corpus reader.
_FILES = ('cntlist.rev', 'lexnames', 'index.sense',
'index.adj', 'index.adv', 'index.noun', 'index.verb',
'data.adj', 'data.adv', 'data.noun', 'data.verb',
'adj.exc', 'adv.exc', 'noun.exc', 'verb.exc', )
def __init__(self, root):
"""
Construct a new wordnet corpus reader, with the given root
directory.
"""
super(WordNetCorpusReader, self).__init__(root, self._FILES,
encoding=self._ENCODING)
self._lemma_pos_offset_map = defaultdict(dict)
"""A index that provides the file offset
Map from lemma -> pos -> synset_index -> offset"""
self._synset_offset_cache = defaultdict(dict)
"""A cache so we don't have to reconstuct synsets
Map from pos -> offset -> synset"""
self._max_depth = defaultdict(dict)
"""A lookup for the maximum depth of each part of speech. Useful for
the lch similarity metric.
"""
self._data_file_map = {}
self._exception_map = {}
self._lexnames = []
self._key_count_file = None
self._key_synset_file = None
# Load the lexnames
for i, line in enumerate(self.open('lexnames')):
index, lexname, _ = line.split()
assert int(index) == i
self._lexnames.append(lexname)
# Load the indices for lemmas and synset offsets
self._load_lemma_pos_offset_map()
# load the exception file data into memory
self._load_exception_map()
def _load_lemma_pos_offset_map(self):
for suffix in self._FILEMAP.values():
# parse each line of the file (ignoring comment lines)
for i, line in enumerate(self.open('index.%s' % suffix)):
if line.startswith(' '):
continue
_iter = iter(line.split())
_next_token = lambda: next(_iter)
try:
# get the lemma and part-of-speech
lemma = _next_token()
pos = _next_token()
# get the number of synsets for this lemma
n_synsets = int(_next_token())
assert n_synsets > 0
# get the pointer symbols for all synsets of this lemma
n_pointers = int(_next_token())
_ = [_next_token() for _ in xrange(n_pointers)]
# same as number of synsets
n_senses = int(_next_token())
assert n_synsets == n_senses
# get number of senses ranked according to frequency
_ = int(_next_token())
# get synset offsets
synset_offsets = [int(_next_token()) for _ in xrange(n_synsets)]
# raise more informative error with file name and line number
except (AssertionError, ValueError) as e:
tup = ('index.%s' % suffix), (i + 1), e
raise WordNetError('file %s, line %i: %s' % tup)
# map lemmas and parts of speech to synsets
self._lemma_pos_offset_map[lemma][pos] = synset_offsets
if pos == ADJ:
self._lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets
def _load_exception_map(self):
# load the exception file data into memory
for pos, suffix in self._FILEMAP.items():
self._exception_map[pos] = {}
for line in self.open('%s.exc' % suffix):
terms = line.split()
self._exception_map[pos][terms[0]] = terms[1:]
self._exception_map[ADJ_SAT] = self._exception_map[ADJ]
def _compute_max_depth(self, pos, simulate_root):
"""
Compute the max depth for the given part of speech. This is
used by the lch similarity metric.
"""
depth = 0
for ii in self.all_synsets(pos):
try:
depth = max(depth, ii.max_depth())
except RuntimeError:
print(ii)
if simulate_root:
depth += 1
self._max_depth[pos] = depth
def get_version(self):
fh = self._data_file(ADJ)
for line in fh:
match = re.search(r'WordNet (\d+\.\d+) Copyright', line)
if match is not None:
version = match.group(1)
fh.seek(0)
return version
#////////////////////////////////////////////////////////////
# Loading Lemmas
#////////////////////////////////////////////////////////////
def lemma(self, name):
# e.g.: '.45_caliber.a.01..45_caliber'
separator = SENSENUM_RE.search(name).start()
synset_name, lemma_name = name[:separator+3], name[separator+4:]
synset = self.synset(synset_name)
for lemma in synset._lemmas:
if lemma._name == lemma_name:
return lemma
raise WordNetError('no lemma %r in %r' % (lemma_name, synset_name))
def lemma_from_key(self, key):
# Keys are case sensitive and always lower-case
key = key.lower()
lemma_name, lex_sense = key.split('%')
pos_number, lexname_index, lex_id, _, _ = lex_sense.split(':')
pos = self._pos_names[int(pos_number)]
# open the key -> synset file if necessary
if self._key_synset_file is None:
self._key_synset_file = self.open('index.sense')
# Find the synset for the lemma.
synset_line = _binary_search_file(self._key_synset_file, key)
if not synset_line:
raise WordNetError("No synset found for key %r" % key)
offset = int(synset_line.split()[1])
synset = self._synset_from_pos_and_offset(pos, offset)
# return the corresponding lemma
for lemma in synset._lemmas:
if lemma._key == key:
return lemma
raise WordNetError("No lemma found for for key %r" % key)
#////////////////////////////////////////////////////////////
# Loading Synsets
#////////////////////////////////////////////////////////////
def synset(self, name):
# split name into lemma, part of speech and synset number
lemma, pos, synset_index_str = name.lower().rsplit('.', 2)
synset_index = int(synset_index_str) - 1
# get the offset for this synset
try:
offset = self._lemma_pos_offset_map[lemma][pos][synset_index]
except KeyError:
message = 'no lemma %r with part of speech %r'
raise WordNetError(message % (lemma, pos))
except IndexError:
n_senses = len(self._lemma_pos_offset_map[lemma][pos])
message = "lemma %r with part of speech %r has only %i %s"
if n_senses == 1:
tup = lemma, pos, n_senses, "sense"
else:
tup = lemma, pos, n_senses, "senses"
raise WordNetError(message % tup)
# load synset information from the appropriate file
synset = self._synset_from_pos_and_offset(pos, offset)
# some basic sanity checks on loaded attributes
if pos == 's' and synset._pos == 'a':
message = ('adjective satellite requested but only plain '
'adjective found for lemma %r')
raise WordNetError(message % lemma)
assert synset._pos == pos or (pos == 'a' and synset._pos == 's')
# Return the synset object.
return synset
def _data_file(self, pos):
"""
Return an open file pointer for the data file for the given
part of speech.
"""
if pos == ADJ_SAT:
pos = ADJ
if self._data_file_map.get(pos) is None:
fileid = 'data.%s' % self._FILEMAP[pos]
self._data_file_map[pos] = self.open(fileid)
return self._data_file_map[pos]
def _synset_from_pos_and_offset(self, pos, offset):
# Check to see if the synset is in the cache
if offset in self._synset_offset_cache[pos]:
return self._synset_offset_cache[pos][offset]
data_file = self._data_file(pos)
data_file.seek(offset)
data_file_line = data_file.readline()
synset = self._synset_from_pos_and_line(pos, data_file_line)
assert synset._offset == offset
self._synset_offset_cache[pos][offset] = synset
return synset
def _synset_from_pos_and_line(self, pos, data_file_line):
# Construct a new (empty) synset.
synset = Synset(self)
# parse the entry for this synset
try:
# parse out the definitions and examples from the gloss
columns_str, gloss = data_file_line.split('|')
gloss = gloss.strip()
definitions = []
for gloss_part in gloss.split(';'):
gloss_part = gloss_part.strip()
if gloss_part.startswith('"'):
synset._examples.append(gloss_part.strip('"'))
else:
definitions.append(gloss_part)
synset._definition = '; '.join(definitions)
# split the other info into fields
_iter = iter(columns_str.split())
_next_token = lambda: next(_iter)
# get the offset
synset._offset = int(_next_token())
# determine the lexicographer file name
lexname_index = int(_next_token())
synset._lexname = self._lexnames[lexname_index]
# get the part of speech
synset._pos = _next_token()
# create Lemma objects for each lemma
n_lemmas = int(_next_token(), 16)
for _ in xrange(n_lemmas):
# get the lemma name
lemma_name = _next_token()
# get the lex_id (used for sense_keys)
lex_id = int(_next_token(), 16)
# If the lemma has a syntactic marker, extract it.
m = re.match(r'(.*?)(\(.*\))?$', lemma_name)
lemma_name, syn_mark = m.groups()
# create the lemma object
lemma = Lemma(self, synset, lemma_name, lexname_index,
lex_id, syn_mark)
synset._lemmas.append(lemma)
synset._lemma_names.append(lemma._name)
# collect the pointer tuples
n_pointers = int(_next_token())
for _ in xrange(n_pointers):
symbol = _next_token()
offset = int(_next_token())
pos = _next_token()
lemma_ids_str = _next_token()
if lemma_ids_str == '0000':
synset._pointers[symbol].add((pos, offset))
else:
source_index = int(lemma_ids_str[:2], 16) - 1
target_index = int(lemma_ids_str[2:], 16) - 1
source_lemma_name = synset._lemmas[source_index]._name
lemma_pointers = synset._lemma_pointers
tups = lemma_pointers[source_lemma_name, symbol]
tups.add((pos, offset, target_index))
# read the verb frames
try:
frame_count = int(_next_token())
except StopIteration:
pass
else:
for _ in xrange(frame_count):
# read the plus sign
plus = _next_token()
assert plus == '+'
# read the frame and lemma number
frame_number = int(_next_token())
frame_string_fmt = VERB_FRAME_STRINGS[frame_number]
lemma_number = int(_next_token(), 16)
# lemma number of 00 means all words in the synset
if lemma_number == 0:
synset._frame_ids.append(frame_number)
for lemma in synset._lemmas:
lemma._frame_ids.append(frame_number)
lemma._frame_strings.append(frame_string_fmt %
lemma._name)
# only a specific word in the synset
else:
lemma = synset._lemmas[lemma_number - 1]
lemma._frame_ids.append(frame_number)
lemma._frame_strings.append(frame_string_fmt %
lemma._name)
# raise a more informative error with line text
except ValueError as e:
raise WordNetError('line %r: %s' % (data_file_line, e))
# set sense keys for Lemma objects - note that this has to be
# done afterwards so that the relations are available
for lemma in synset._lemmas:
if synset._pos == ADJ_SAT:
head_lemma = synset.similar_tos()[0]._lemmas[0]
head_name = head_lemma._name
head_id = '%02d' % head_lemma._lex_id
else:
head_name = head_id = ''
tup = (lemma._name, WordNetCorpusReader._pos_numbers[synset._pos],
lemma._lexname_index, lemma._lex_id, head_name, head_id)
lemma._key = ('%s%%%d:%02d:%02d:%s:%s' % tup).lower()
# the canonical name is based on the first lemma
lemma_name = synset._lemmas[0]._name.lower()
offsets = self._lemma_pos_offset_map[lemma_name][synset._pos]
sense_index = offsets.index(synset._offset)
tup = lemma_name, synset._pos, sense_index + 1
synset._name = '%s.%s.%02i' % tup
return synset
#////////////////////////////////////////////////////////////
# Retrieve synsets and lemmas.
#////////////////////////////////////////////////////////////
def synsets(self, lemma, pos=None):
"""Load all synsets with a given lemma and part of speech tag.
If no pos is specified, all synsets for all parts of speech
will be loaded.
"""
lemma = lemma.lower()
get_synset = self._synset_from_pos_and_offset
index = self._lemma_pos_offset_map
if pos is None:
pos = POS_LIST
return [get_synset(p, offset)
for p in pos
for form in self._morphy(lemma, p)
for offset in index[form].get(p, [])]
def lemmas(self, lemma, pos=None):
"""Return all Lemma objects with a name matching the specified lemma
name and part of speech tag. Matches any part of speech tag if none is
specified."""
lemma = lemma.lower()
return [lemma_obj
for synset in self.synsets(lemma, pos)
for lemma_obj in synset._lemmas
if lemma_obj._name.lower() == lemma]
def all_lemma_names(self, pos=None):
"""Return all lemma names for all synsets for the given
part of speech tag. If pos is not specified, all synsets
for all parts of speech will be used.
"""
if pos is None:
return iter(self._lemma_pos_offset_map)
else:
return (lemma
for lemma in self._lemma_pos_offset_map
if pos in self._lemma_pos_offset_map[lemma])
def all_synsets(self, pos=None):
"""Iterate over all synsets with a given part of speech tag.
If no pos is specified, all synsets for all parts of speech
will be loaded.
"""
if pos is None:
pos_tags = self._FILEMAP.keys()
else:
pos_tags = [pos]
cache = self._synset_offset_cache
from_pos_and_line = self._synset_from_pos_and_line
# generate all synsets for each part of speech
for pos_tag in pos_tags:
# Open the file for reading. Note that we can not re-use
# the file poitners from self._data_file_map here, because
# we're defining an iterator, and those file pointers might
# be moved while we're not looking.
if pos_tag == ADJ_SAT:
pos_tag = ADJ
fileid = 'data.%s' % self._FILEMAP[pos_tag]
data_file = self.open(fileid)
try:
# generate synsets for each line in the POS file
offset = data_file.tell()
line = data_file.readline()
while line:
if not line[0].isspace():
if offset in cache[pos_tag]:
# See if the synset is cached
synset = cache[pos_tag][offset]
else:
# Otherwise, parse the line
synset = from_pos_and_line(pos_tag, line)
cache[pos_tag][offset] = synset
# adjective satellites are in the same file as
# adjectives so only yield the synset if it's actually
# a satellite
if pos_tag == ADJ_SAT:
if synset._pos == pos_tag:
yield synset
# for all other POS tags, yield all synsets (this means
# that adjectives also include adjective satellites)
else:
yield synset
offset = data_file.tell()
line = data_file.readline()
# close the extra file handle we opened
except:
data_file.close()
raise
else:
data_file.close()
#////////////////////////////////////////////////////////////
# Misc
#////////////////////////////////////////////////////////////
def lemma_count(self, lemma):
"""Return the frequency count for this Lemma"""
# open the count file if we haven't already
if self._key_count_file is None:
self._key_count_file = self.open('cntlist.rev')
# find the key in the counts file and return the count
line = _binary_search_file(self._key_count_file, lemma._key)
if line:
return int(line.rsplit(' ', 1)[-1])
else:
return 0
def path_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
return synset1.path_similarity(synset2, verbose, simulate_root)
path_similarity.__doc__ = Synset.path_similarity.__doc__
def lch_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
return synset1.lch_similarity(synset2, verbose, simulate_root)
lch_similarity.__doc__ = Synset.lch_similarity.__doc__
def wup_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
return synset1.wup_similarity(synset2, verbose, simulate_root)
wup_similarity.__doc__ = Synset.wup_similarity.__doc__
def res_similarity(self, synset1, synset2, ic, verbose=False):
return synset1.res_similarity(synset2, ic, verbose)
res_similarity.__doc__ = Synset.res_similarity.__doc__
def jcn_similarity(self, synset1, synset2, ic, verbose=False):
return synset1.jcn_similarity(synset2, ic, verbose)
jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
def lin_similarity(self, synset1, synset2, ic, verbose=False):
return synset1.lin_similarity(synset2, ic, verbose)
lin_similarity.__doc__ = Synset.lin_similarity.__doc__
#////////////////////////////////////////////////////////////
# Morphy
#////////////////////////////////////////////////////////////
# Morphy, adapted from Oliver Steele's pywordnet
def morphy(self, form, pos=None):
"""
Find a possible base form for the given form, with the given
part of speech, by checking WordNet's list of exceptional
forms, and by recursively stripping affixes for this part of
speech until a form in WordNet is found.
>>> from nltk.corpus import wordnet as wn
>>> print(wn.morphy('dogs'))
dog
>>> print(wn.morphy('churches'))
church
>>> print(wn.morphy('aardwolves'))
aardwolf
>>> print(wn.morphy('abaci'))
abacus
>>> wn.morphy('hardrock', wn.ADV)
>>> print(wn.morphy('book', wn.NOUN))
book
>>> wn.morphy('book', wn.ADJ)
"""
if pos is None:
morphy = self._morphy
analyses = chain(a for p in POS_LIST for a in morphy(form, p))
else:
analyses = self._morphy(form, pos)
# get the first one we find
first = list(islice(analyses, 1))
if len(first) == 1:
return first[0]
else:
return None
MORPHOLOGICAL_SUBSTITUTIONS = {
NOUN: [('s', ''), ('ses', 's'), ('ves', 'f'), ('xes', 'x'),
('zes', 'z'), ('ches', 'ch'), ('shes', 'sh'),
('men', 'man'), ('ies', 'y')],
VERB: [('s', ''), ('ies', 'y'), ('es', 'e'), ('es', ''),
('ed', 'e'), ('ed', ''), ('ing', 'e'), ('ing', '')],
ADJ: [('er', ''), ('est', ''), ('er', 'e'), ('est', 'e')],
ADV: []}
def _morphy(self, form, pos):
# from jordanbg:
# Given an original string x
# 1. Apply rules once to the input to get y1, y2, y3, etc.
# 2. Return all that are in the database
# 3. If there are no matches, keep applying rules until you either
# find a match or you can't go any further
exceptions = self._exception_map[pos]
substitutions = self.MORPHOLOGICAL_SUBSTITUTIONS[pos]
def apply_rules(forms):
return [form[:-len(old)] + new
for form in forms
for old, new in substitutions
if form.endswith(old)]
def filter_forms(forms):
result = []
seen = set()
for form in forms:
if form in self._lemma_pos_offset_map:
if pos in self._lemma_pos_offset_map[form]:
if form not in seen:
result.append(form)
seen.add(form)
return result
# 0. Check the exception lists
if form in exceptions:
return filter_forms([form] + exceptions[form])
# 1. Apply rules once to the input to get y1, y2, y3, etc.
forms = apply_rules([form])
# 2. Return all that are in the database (and check the original too)
results = filter_forms([form] + forms)
if results:
return results
# 3. If there are no matches, keep applying rules until we find a match
while forms:
forms = apply_rules(forms)
results = filter_forms(forms)
if results:
return results
# Return an empty list if we can't find anything
return []
#////////////////////////////////////////////////////////////
# Create information content from corpus
#////////////////////////////////////////////////////////////
def ic(self, corpus, weight_senses_equally = False, smoothing = 1.0):
"""
Creates an information content lookup dictionary from a corpus.
:type corpus: CorpusReader
:param corpus: The corpus from which we create an information
content dictionary.
:type weight_senses_equally: bool
:param weight_senses_equally: If this is True, gives all
possible senses equal weight rather than dividing by the
number of possible senses. (If a word has 3 synses, each
sense gets 0.3333 per appearance when this is False, 1.0 when
it is true.)
:param smoothing: How much do we smooth synset counts (default is 1.0)
:type smoothing: float
:return: An information content dictionary
"""
counts = FreqDist()
for ww in corpus.words():
counts.inc(ww)
ic = {}
for pp in POS_LIST:
ic[pp] = defaultdict(float)
# Initialize the counts with the smoothing value
if smoothing > 0.0:
for ss in self.all_synsets():
pos = ss._pos
if pos == ADJ_SAT:
pos = ADJ
ic[pos][ss._offset] = smoothing
for ww in counts:
possible_synsets = self.synsets(ww)
if len(possible_synsets) == 0:
continue
# Distribute weight among possible synsets
weight = float(counts[ww])
if not weight_senses_equally:
weight /= float(len(possible_synsets))
for ss in possible_synsets:
pos = ss._pos
if pos == ADJ_SAT:
pos = ADJ
for level in ss._iter_hypernym_lists():
for hh in level:
ic[pos][hh._offset] += weight
# Add the weight to the root
ic[pos][0] += weight
return ic
######################################################################
## WordNet Information Content Corpus Reader
######################################################################
class WordNetICCorpusReader(CorpusReader):
"""
A corpus reader for the WordNet information content corpus.
"""
def __init__(self, root, fileids):
CorpusReader.__init__(self, root, fileids, encoding='utf8')
# this load function would be more efficient if the data was pickled
# Note that we can't use NLTK's frequency distributions because
# synsets are overlapping (each instance of a synset also counts
# as an instance of its hypernyms)
def ic(self, icfile):
"""
Load an information content file from the wordnet_ic corpus
and return a dictionary. This dictionary has just two keys,
NOUN and VERB, whose values are dictionaries that map from
synsets to information content values.
:type icfile: str
:param icfile: The name of the wordnet_ic file (e.g. "ic-brown.dat")
:return: An information content dictionary
"""
ic = {}
ic[NOUN] = defaultdict(float)
ic[VERB] = defaultdict(float)
for num, line in enumerate(self.open(icfile)):
if num == 0: # skip the header
continue
fields = line.split()
offset = int(fields[0][:-1])
value = float(fields[1])
pos = _get_pos(fields[0])
if len(fields) == 3 and fields[2] == "ROOT":
# Store root count.
ic[pos][0] += value
if value != 0:
ic[pos][offset] = value
return ic
######################################################################
# Similarity metrics
######################################################################
# TODO: Add in the option to manually add a new root node; this will be
# useful for verb similarity as there exist multiple verb taxonomies.
# More information about the metrics is available at
# http://marimba.d.umn.edu/similarity/measures.html
def path_similarity(synset1, synset2, verbose=False, simulate_root=True):
return synset1.path_similarity(synset2, verbose, simulate_root)
path_similarity.__doc__ = Synset.path_similarity.__doc__
def lch_similarity(synset1, synset2, verbose=False, simulate_root=True):
return synset1.lch_similarity(synset2, verbose, simulate_root)
lch_similarity.__doc__ = Synset.lch_similarity.__doc__
def wup_similarity(synset1, synset2, verbose=False, simulate_root=True):
return synset1.wup_similarity(synset2, verbose, simulate_root)
wup_similarity.__doc__ = Synset.wup_similarity.__doc__
def res_similarity(synset1, synset2, ic, verbose=False):
return synset1.res_similarity(synset2, verbose)
res_similarity.__doc__ = Synset.res_similarity.__doc__
def jcn_similarity(synset1, synset2, ic, verbose=False):
return synset1.jcn_similarity(synset2, verbose)
jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
def lin_similarity(synset1, synset2, ic, verbose=False):
return synset1.lin_similarity(synset2, verbose)
lin_similarity.__doc__ = Synset.lin_similarity.__doc__
def _lcs_ic(synset1, synset2, ic, verbose=False):
"""
Get the information content of the least common subsumer that has
the highest information content value. If two nodes have no
explicit common subsumer, assume that they share an artificial
root node that is the hypernym of all explicit roots.
:type synset1: Synset
:param synset1: First input synset.
:type synset2: Synset
:param synset2: Second input synset. Must be the same part of
speech as the first synset.
:type ic: dict
:param ic: an information content object (as returned by ``load_ic()``).
:return: The information content of the two synsets and their most
informative subsumer
"""
if synset1._pos != synset2._pos:
raise WordNetError('Computing the least common subsumer requires ' + \
'%s and %s to have the same part of speech.' % \
(synset1, synset2))
ic1 = information_content(synset1, ic)
ic2 = information_content(synset2, ic)
subsumers = synset1.common_hypernyms(synset2)
if len(subsumers) == 0:
subsumer_ic = 0
else:
subsumer_ic = max(information_content(s, ic) for s in subsumers)
if verbose:
print("> LCS Subsumer by content:", subsumer_ic)
return ic1, ic2, subsumer_ic
# Utility functions
def information_content(synset, ic):
try:
icpos = ic[synset._pos]
except KeyError:
msg = 'Information content file has no entries for part-of-speech: %s'
raise WordNetError(msg % synset._pos)
counts = icpos[synset._offset]
if counts == 0:
return _INF
else:
return -math.log(counts / icpos[0])
# get the part of speech (NOUN or VERB) from the information content record
# (each identifier has a 'n' or 'v' suffix)
def _get_pos(field):
if field[-1] == 'n':
return NOUN
elif field[-1] == 'v':
return VERB
else:
msg = "Unidentified part of speech in WordNet Information Content file for field %s" % field
raise ValueError(msg)
# unload corpus after tests
def teardown_module(module=None):
from nltk.corpus import wordnet
wordnet._unload()
######################################################################
# Demo
######################################################################
def demo():
import nltk
print('loading wordnet')
wn = WordNetCorpusReader(nltk.data.find('corpora/wordnet'))
print('done loading')
S = wn.synset()
L = wn.lemma()
print('getting a synset for go')
move_synset = S('go.v.21')
print(move_synset.name(), move_synset.pos(), move_synset.lexname())
print(move_synset.lemma_names())
print(move_synset.definition())
print(move_synset.examples())
zap_n = ['zap.n.01']
zap_v = ['zap.v.01', 'zap.v.02', 'nuke.v.01', 'microwave.v.01']
def _get_synsets(synset_strings):
return [S(synset) for synset in synset_strings]
zap_n_synsets = _get_synsets(zap_n)
zap_v_synsets = _get_synsets(zap_v)
zap_synsets = set(zap_n_synsets + zap_v_synsets)
print(zap_n_synsets)
print(zap_v_synsets)
print("Navigations:")
print(S('travel.v.01').hypernyms())
print(S('travel.v.02').hypernyms())
print(S('travel.v.03').hypernyms())
print(L('zap.v.03.nuke').derivationally_related_forms())
print(L('zap.v.03.atomize').derivationally_related_forms())
print(L('zap.v.03.atomise').derivationally_related_forms())
print(L('zap.v.03.zap').derivationally_related_forms())
print(S('dog.n.01').member_holonyms())
print(S('dog.n.01').part_meronyms())
print(S('breakfast.n.1').hypernyms())
print(S('meal.n.1').hyponyms())
print(S('Austen.n.1').instance_hypernyms())
print(S('composer.n.1').instance_hyponyms())
print(S('faculty.n.2').member_meronyms())
print(S('copilot.n.1').member_holonyms())
print(S('table.n.2').part_meronyms())
print(S('course.n.7').part_holonyms())
print(S('water.n.1').substance_meronyms())
print(S('gin.n.1').substance_holonyms())
print(L('leader.n.1.leader').antonyms())
print(L('increase.v.1.increase').antonyms())
print(S('snore.v.1').entailments())
print(S('heavy.a.1').similar_tos())
print(S('light.a.1').attributes())
print(S('heavy.a.1').attributes())
print(L('English.a.1.English').pertainyms())
print(S('person.n.01').root_hypernyms())
print(S('sail.v.01').root_hypernyms())
print(S('fall.v.12').root_hypernyms())
print(S('person.n.01').lowest_common_hypernyms(S('dog.n.01')))
print(S('woman.n.01').lowest_common_hypernyms(S('girlfriend.n.02')))
print(S('dog.n.01').path_similarity(S('cat.n.01')))
print(S('dog.n.01').lch_similarity(S('cat.n.01')))
print(S('dog.n.01').wup_similarity(S('cat.n.01')))
wnic = WordNetICCorpusReader(nltk.data.find('corpora/wordnet_ic'),
'.*\.dat')
ic = wnic.ic('ic-brown.dat')
print(S('dog.n.01').jcn_similarity(S('cat.n.01'), ic))
ic = wnic.ic('ic-semcor.dat')
print(S('dog.n.01').lin_similarity(S('cat.n.01'), ic))
print(S('code.n.03').topic_domains())
print(S('pukka.a.01').region_domains())
print(S('freaky.a.01').usage_domains())
if __name__ == '__main__':
demo()
| mit |
avsd/django-allauth | allauth/socialaccount/providers/edmodo/tests.py | 37 | 1168 | from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import EdmodoProvider
class EdmodoTests(create_oauth2_tests(registry.by_id(EdmodoProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"url": "https://api.edmodo.com/users/74721257",
"id": 74721257,
"type": "teacher",
"username": "getacclaim-teacher1",
"user_title": null,
"first_name": "Edmodo Test",
"last_name": "Teacher",
"time_zone": "America/New_York",
"utc_offset": -18000,
"locale": "en",
"gender": null,
"start_level": null,
"end_level": null,
"about": null,
"premium": false,
"school": {"url": "https://api.edmodo.com/schools/559253", "id": 559253},
"verified_institution_member": true,
"coppa_verified": false,
"subjects": null,
"avatars": {
"small": "https://api.edmodo.com/users/74721257/avatar?type=small&u=670329ncqnf8fxv7tya24byn5",
"large": "https://api.edmodo.com/users/74721257/avatar?type=large&u=670329ncqnf8fxv7tya24byn5"
},
"email":"[email protected]",
"sync_enabled": false
}
""")
| mit |
jbloom/phyloExpCM | src/data/make_KOSI07_exchangeabilities.py | 1 | 5415 | """Makes ``HYPHY`` include batch file with KOSI07 exchangeabilities.
This script reads in the supplementary information file *ECMunrest.dat* from
Kosiol, Holmes, and Goldman, "An empirical model for protein sequence evolution,"
Mol Biol Evol, 24:1464-1479 (2007)
http://www.ncbi.nlm.nih.gov/pubmed/17400572
It converts the data in that file into a new file *KOSI07_exchangeabilities.ibf*
which is an include batch file for ``HYPHY``.
This file defines a ``HYPHY`` 61x61 matrix called *KOSI07_exchangeabilities* which
contains the exchangeabilities for all non-diagonal entries in the KOSI07 model.
If you include this file in a ``HYPHY`` batch file using::
#include "KOSI07_exchangeabilities.ibf";
and then then create a 61x1 vector called *codonfreqs* that contains the
codon equilibrium frequencies and then use the ``HYPHY`` command::
Model model = (KOSI07_exchangeabilities, codonfreqs, 1);
you will create a ``HYPHY`` substitution model that is reversible.
In *KOSI07_exchangeabilities* (and in the *codonfreqs* variable that you should
create) the codons are ordered in alphabetical order ("AAA", "AAC", ..., "TTT")
with the exception that the three stop codons ("TAA", "TAG", "TGA")
are excluded. This requires re-ording of the codons in the *ECMunrest.dat*
which orders them using another non-alphabetical ordering scheme.
The exchangeabilities all multiply a branch length denoted *t*, a
rate parameter denoted as *rateparaemter*, and also
multiply a parameter called *omega* if the mutation is non-synonymous
and a parameter called *kappa* a number of times equal to the number
of tranversions. This sets up the model denoted as *ECM+F+w+1k(tv)*
in the original Kosiol et al, 2007 paper::
KOSI07_exchangeabilities[58][60] := t * rateparameter * 16.0115;
KOSI07_exchangeabilities[60][58] := t * rateparameter * 16.0115;
KOSI07_exchangeabilities[57][60] := t * rateparameter * omega * kappa * 2.39582;
KOSI07_exchangeabilities[60][57] := t * rateparameter * omega * kappa * 2.39582;
"""
import mapmuts.sequtils
def NTransversions(codon1, codon2):
"""Returns the number of transversions that separate two codons."""
assert len(codon1) == len(codon2) == 3
ntransversions = 0
for i in range(3):
(nt1, nt2) = (codon1[i], codon2[i])
if nt1 == nt2:
pass # no mutation
elif (nt1 == 'A' and nt2 == 'G') or (nt1 == 'G' and nt2 == 'A') or (nt1 == 'C' and nt2 == 'T') or (nt1 == 'T' and nt2 == 'C'):
pass # transition
else:
ntransversions += 1
return ntransversions
def main():
"""Main body of script."""
infile = 'ECMunrest.dat'
outfile = 'KOSI07_exchangeabilities.ibf'
print "Reading exchangeabilities from %s and creating file %s." % (infile, outfile)
ncodons = 61
stopcodons = ['TAA', 'TAG', 'TGA']
nts = ['A', 'C', 'G', 'T'] # codons in alphabetical order
hyphy_codons = [] # list of codons in HYPHY (alphabetical) order
for nt1 in nts:
for nt2 in nts:
for nt3 in nts:
codon = "%s%s%s" % (nt1, nt2, nt3)
if codon not in stopcodons:
hyphy_codons.append(codon)
assert len(hyphy_codons) == ncodons
incodons = 'TTT TTC TTA TTG TCT TCC TCA TCG TAT TAC TGT TGC TGG CTT CTC CTA CTG CCT CCC CCA CCG CAT CAC CAA CAG CGT CGC CGA CGG ATT ATC ATA ATG ACT ACC ACA ACG AAT AAC AAA AAG AGT AGC AGA AGG GTT GTC GTA GTG GCT GCC GCA GCG GAT GAC GAA GAG GGT GGC GGA GGG'.split() # list with codons in the order in infile, as taken from that file
assert len(incodons) == ncodons
indexmapping = {} # maps infile index to outfile (hyphy) index
i_incodon = 0
for incodon in incodons:
indexmapping[i_incodon] = hyphy_codons.index(incodon)
i_incodon += 1
assert len(indexmapping) == ncodons
assert len(dict([(i, codon) for (codon, i) in indexmapping.iteritems()])) == ncodons
lines = open(infile).readlines()[ : ncodons - 1]
exchangeabilities = {} # indexed by (codon_1_index, codon_2_index) hyphy indices
f = open(outfile, 'w')
f.write('KOSI07_exchangeabilities = {%d, %d};\n' % (ncodons, ncodons))
for iline in range(ncodons - 1):
icodon1 = indexmapping[iline + 1]
entries = [float(x) for x in lines[iline].split()]
assert len(entries) == iline + 1
for ientry in range(iline + 1):
icodon2 = indexmapping[ientry]
assert icodon1 != icodon2, "identical codons"
codon1 = hyphy_codons[icodon1]
codon2 = hyphy_codons[icodon2]
aa1 = mapmuts.sequtils.Translate([('head', codon1)])
aa2 = mapmuts.sequtils.Translate([('head', codon2)])
if aa1 != aa2:
omega = "omega * "
else:
omega = ""
kappa = ''.join(["kappa * " for i in range(NTransversions(codon1, codon2))])
x = float(entries[ientry])
if x == 0:
f.write('KOSI07_exchangeabilities[%d][%d] := 0.0;\nKOSI07_exchangeabilities[%d][%d] := 0.0;\n' % (icodon1, icodon2, icodon2, icodon1))
else:
f.write('KOSI07_exchangeabilities[%d][%d] := t * rateparameter * %s%s%g;\nKOSI07_exchangeabilities[%d][%d] := t * rateparameter * %s%s%g;\n' % (icodon1, icodon2, omega, kappa, x, icodon2, icodon1, omega, kappa, x))
f.close()
main() # run the script
| gpl-3.0 |
unomena/tunobase | tunobase/poll/forms.py | 1 | 2978 | """
POLL APP
This module provides an interface into the poll and answer forms.
Classes:
PollAnswerForm
Functions:
n/a
Created on 26 Mar 2013
@author: michael
"""
from django import forms
from django.contrib import messages
from django.conf import settings
class PollAnswerForm(forms.Form):
"""Form for handling Poll answers."""
multiple_answers = forms.BooleanField(
widget=forms.HiddenInput, required=False
)
def __init__(self, *args, **kwargs):
"""Add poll to initialised variables."""
self.poll = kwargs.pop('poll', None)
self.multiple_answers = kwargs.pop('multiple_answers', False)
self.randomize_answers = kwargs.pop('randomize_answers', False)
super(PollAnswerForm, self).__init__(*args, **kwargs)
if self.poll is not None:
queryset = self.poll.answers.permitted()
if self.randomize_answers:
queryset = queryset.order_by('?')
if self.poll.multiple_choice:
self.fields['answers'] = forms.ModelMultipleChoiceField(
queryset=queryset,
widget=forms.CheckboxSelectMultiple
)
else:
self.fields['answers'] = forms.ModelChoiceField(
queryset=queryset,
widget=forms.RadioSelect,
empty_label=None
)
self.fields['answers'].widget.attrs.update({'class': 'required'})
if self.multiple_answers:
self.fields['multiple_answers'].initial = self.multiple_answers
def increment_vote_count(self, answer):
"""Increment a vote on a poll."""
answer.vote_count += 1
answer.save()
def save(self, request, cookie_name, pk):
"""
Handle saving of a vote on a poll. Ensure the user
hasn't previously voted on the same poll.
"""
if request.user.is_authenticated():
user = request.user
else:
user = None
multiple_answers_allowed = getattr(
settings, 'ALLOW_CERTAIN_POLL_MULTIPLE_ANSWERS', False
)
if multiple_answers_allowed and self.cleaned_data['multiple_answers']:
poll_voted = False
elif user is None:
poll_voted = request.COOKIES.get(cookie_name, False)
else:
poll_voted = user.polls_answered.filter(pk=pk).exists()
if poll_voted:
messages.error(request, 'You have already voted in this poll.')
else:
answers = self.cleaned_data['answers']
if isinstance(answers, (list, tuple)):
for answer in answers:
self.increment_vote_count(answer)
else:
self.increment_vote_count(answers)
if user is not None:
self.poll.users_answered.add(user)
messages.success(request, 'You have voted.')
| bsd-3-clause |
jacksondebuhr/dashmm | trace/readtrace/scripts/utilization.py | 1 | 3092 | #!/usr/bin/python
# so this is going to start as a fixed script, and once it works, will
# be improved into a more general use thing.
# Here are the possible parameters that will be made into arguments on the
# command line.
dbFile = 'trace.db'
nBins = 100
outFile = 'test.txt'
import sqlite3
# Create a connection to the db
conn = sqlite3.connect('trace.db')
# Now create a cursor
c = conn.cursor()
# Get the bounds of the event times
c.execute('SELECT min(TimeNs), max(TimeNs) FROM Event;')
row = c.fetchone()
assert row != None
(tMin, tMax) = row
tDelta = tMax - tMin
binDelta = tDelta / nBins
binBoundaries = [tMin + x * tDelta / nBins for x in range(0, nBins + 1)]
# the data we are collecting is a dictionary from segment type into the
# total utilization of that type.
utilization = {}
# We loop over segments, compute the bins the segment overlaps, and then
# makes contributions.
for row in conn.execute('SELECT SegmentId, StartNs, EndNs FROM Segment;'):
# Give the segment type a set of zero bins if this is the first in that
# segment
if not row[0] in utilization:
utilization[row[0]] = [0 for x in range(0, nBins)]
# Compute which bins the segment overlaps
sBin = (row[1] - tMin) / binDelta
eBin = (row[2] - tMin) / binDelta
if eBin == nBins:
eBin = nBins - 1
# if they are the same, all contribution goes to one bin, otherwise, in
# each bin with overlap
if sBin == eBin:
utilization[row[0]][sBin] += row[2] - row[1]
else:
utilization[row[0]][sBin] += binBoundaries[sBin + 1] - row[1]
utilization[row[0]][eBin] += row[2] - binBoundaries[eBin]
for x in range(sBin + 1, eBin):
# should I just use binDelta here?
utilization[row[0]][x] += binBoundaries[x + 1] - binBoundaries[x]
# next get the number of workers
nWorkers = 1
for row in conn.execute('SELECT count(id) FROM Worker;'):
nWorkers = row[0];
# Now scale the ns times into utilization fractions
scaled = {}
for k, v in utilization.iteritems():
scaled[k] = [x / (float(binDelta) * nWorkers) for x in v]
# now we get the column names for the output
headings = {}
for row in conn.execute('SELECT * FROM Segmenttype;'):
if row[0] != 0:
headings[row[0]] = row[1]
# collect segment ids
segments = []
for k, v in headings.iteritems():
if k != 0:
segments.append(k)
# compute the total
total = [0 for x in range(0, nBins)]
for k, v in scaled.iteritems():
if k == 7:
continue # We skip ELCO as that double counts some event classes
for i in range(0, nBins):
total[i] += v[i]
# compute the bin central time in correct units (ms)
binTimes = [(binBoundaries[i] + binBoundaries[i + 1]) / (2.0 * 1.0e6)
for i in range(0, nBins)]
# now we put out the data
ofd = open(outFile, 'w')
ofd.write("Bin T[ms] " + " ".join([v[:-1] for (k, v) in headings.items()])
+ " Total\n")
for bidx in range(0, nBins):
ofd.write(str(bidx))
ofd.write(" " + str(binTimes[bidx]))
for segtype in segments:
ofd.write(" " + str(scaled[segtype][bidx]))
ofd.write(" " + str(total[bidx]) + "\n")
ofd.write("\n")
ofd.close()
| bsd-3-clause |
spirrello/spirrello-pynet-work | applied_python/lib/python2.7/site-packages/pysnmp/entity/rfc3413/ntforg.py | 4 | 16381 | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <[email protected]>
# License: http://pysnmp.sf.net/license.html
#
import sys
from pyasn1.compat.octets import null
from pysnmp.entity.rfc3413 import config
from pysnmp.proto.proxy import rfc2576
from pysnmp.proto import rfc3411
from pysnmp.proto.api import v2c
from pysnmp.proto import error
from pysnmp.smi import view, rfc1902
from pysnmp import nextid
from pysnmp import debug
getNextHandle = nextid.Integer(0x7fffffff)
class NotificationOriginator:
acmID = 3 # default MIB access control method to use
def __init__(self, snmpContext=None):
self.__pendingReqs = {}
self.__pendingNotifications = {}
self.snmpContext = snmpContext # this is deprecated
def processResponsePdu(self, snmpEngine, messageProcessingModel,
securityModel, securityName, securityLevel,
contextEngineId, contextName, pduVersion,
PDU, statusInformation, sendPduHandle, cbInfo):
sendRequestHandle, cbFun, cbCtx = cbInfo
# 3.3.6d
if sendPduHandle not in self.__pendingReqs:
raise error.ProtocolError('Missing sendPduHandle %s' % sendPduHandle)
(origTransportDomain, origTransportAddress,
origMessageProcessingModel, origSecurityModel,
origSecurityName, origSecurityLevel, origContextEngineId,
origContextName, origPdu, origTimeout,
origRetryCount, origRetries) = self.__pendingReqs.pop(sendPduHandle)
snmpEngine.transportDispatcher.jobFinished(id(self))
if statusInformation:
debug.logger & debug.flagApp and debug.logger('processResponsePdu: sendRequestHandle %s, sendPduHandle %s statusInformation %s' % (sendRequestHandle, sendPduHandle, statusInformation))
if origRetries == origRetryCount:
debug.logger & debug.flagApp and debug.logger('processResponsePdu: sendRequestHandle %s, sendPduHandle %s retry count %d exceeded' % (sendRequestHandle, sendPduHandle, origRetries))
cbFun(snmpEngine, sendRequestHandle,
statusInformation['errorIndication'], None, cbCtx)
return
# Convert timeout in seconds into timeout in timer ticks
timeoutInTicks = float(origTimeout)/100/snmpEngine.transportDispatcher.getTimerResolution()
# User-side API assumes SMIv2
if messageProcessingModel == 0:
reqPDU = rfc2576.v2ToV1(origPdu)
pduVersion = 0
else:
reqPDU = origPdu
pduVersion = 1
# 3.3.6a
try:
sendPduHandle = snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine, origTransportDomain, origTransportAddress,
origMessageProcessingModel, origSecurityModel,
origSecurityName, origSecurityLevel,
origContextEngineId, origContextName, pduVersion,
reqPDU, True, timeoutInTicks, self.processResponsePdu,
(sendRequestHandle, cbFun, cbCtx)
)
except error.StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('processResponsePdu: sendRequestHandle %s: sendPdu() failed with %r ' % (sendRequestHandle, statusInformation))
cbFun(snmpEngine, sendRequestHandle,
statusInformation['errorIndication'], None, cbCtx)
return
snmpEngine.transportDispatcher.jobStarted(id(self))
debug.logger & debug.flagApp and debug.logger('processResponsePdu: sendRequestHandle %s, sendPduHandle %s, timeout %d, retry %d of %d' % (sendRequestHandle, sendPduHandle, origTimeout, origRetries, origRetryCount))
# 3.3.6b
self.__pendingReqs[sendPduHandle] = (
origTransportDomain, origTransportAddress,
origMessageProcessingModel, origSecurityModel,
origSecurityName, origSecurityLevel,
origContextEngineId, origContextName, origPdu,
origTimeout, origRetryCount, origRetries + 1
)
return
# 3.3.6c
# User-side API assumes SMIv2
if messageProcessingModel == 0:
PDU = rfc2576.v1ToV2(PDU, origPdu)
cbFun(snmpEngine, sendRequestHandle, None, PDU, cbCtx)
def sendPdu(self, snmpEngine, targetName, contextEngineId,
contextName, pdu, cbFun=None, cbCtx=None):
(transportDomain, transportAddress, timeout,
retryCount, params) = config.getTargetAddr(snmpEngine, targetName)
(messageProcessingModel, securityModel, securityName,
securityLevel) = config.getTargetParams(snmpEngine, params)
# User-side API assumes SMIv2
if messageProcessingModel == 0:
reqPDU = rfc2576.v2ToV1(pdu)
pduVersion = 0
else:
reqPDU = pdu
pduVersion = 1
# 3.3.5
if reqPDU.tagSet in rfc3411.confirmedClassPDUs:
# Convert timeout in seconds into timeout in timer ticks
timeoutInTicks = float(timeout)/100/snmpEngine.transportDispatcher.getTimerResolution()
sendRequestHandle = getNextHandle()
# 3.3.6a
sendPduHandle = snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine, transportDomain, transportAddress,
messageProcessingModel, securityModel, securityName,
securityLevel, contextEngineId, contextName,
pduVersion, reqPDU, True, timeoutInTicks,
self.processResponsePdu, (sendRequestHandle, cbFun, cbCtx)
)
debug.logger & debug.flagApp and debug.logger('sendPdu: sendPduHandle %s, timeout %d' % (sendPduHandle, timeout))
# 3.3.6b
self.__pendingReqs[sendPduHandle] = (
transportDomain, transportAddress, messageProcessingModel,
securityModel, securityName, securityLevel, contextEngineId,
contextName, pdu, timeout, retryCount, True
)
snmpEngine.transportDispatcher.jobStarted(id(self))
else:
snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine, transportDomain, transportAddress,
messageProcessingModel, securityModel,
securityName, securityLevel, contextEngineId,
contextName, pduVersion, reqPDU, False
)
sendRequestHandle = None
debug.logger & debug.flagApp and debug.logger('sendPdu: message sent')
return sendRequestHandle
def processResponseVarBinds(self, snmpEngine, sendRequestHandle,
errorIndication, pdu, cbCtx):
notificationHandle, cbFun, cbCtx = cbCtx
self.__pendingNotifications[notificationHandle].remove(sendRequestHandle)
debug.logger & debug.flagApp and debug.logger('processResponseVarBinds: notificationHandle %s, sendRequestHandle %s, errorIndication %s, pending requests %s' % (notificationHandle, sendRequestHandle, errorIndication, self.__pendingNotifications[notificationHandle]))
if not self.__pendingNotifications[notificationHandle]:
debug.logger & debug.flagApp and debug.logger('processResponseVarBinds: notificationHandle %s, sendRequestHandle %s -- completed' % (notificationHandle, sendRequestHandle))
del self.__pendingNotifications[notificationHandle]
cbFun(snmpEngine, sendRequestHandle, errorIndication,
pdu and v2c.apiPDU.getErrorStatus(pdu) or 0,
pdu and v2c.apiPDU.getErrorIndex(pdu, muteErrors=True) or 0,
pdu and v2c.apiPDU.getVarBinds(pdu) or (),
cbCtx)
#
# Higher-level API to Notification Originator. Supports multiple
# targets, automatic var-binding formation and is fully LCD-driven.
#
def sendVarBinds(self, snmpEngine, notificationTarget, contextEngineId,
contextName, varBinds=(), cbFun=None, cbCtx=None):
debug.logger & debug.flagApp and debug.logger('sendVarBinds: notificationTarget %s, contextEngineId %s, contextName "%s", varBinds %s' % (notificationTarget, contextEngineId or '<default>', contextName, varBinds))
if contextName:
__SnmpAdminString, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('SNMP-FRAMEWORK-MIB', 'SnmpAdminString')
contextName = __SnmpAdminString(contextName)
# 3.3
(notifyTag, notifyType) = config.getNotificationInfo(snmpEngine, notificationTarget)
notificationHandle = getNextHandle()
debug.logger & debug.flagApp and debug.logger('sendVarBinds: notificationHandle %s, notifyTag %s, notifyType %s' % (notificationHandle, notifyTag, notifyType))
varBinds = [(v2c.ObjectIdentifier(x), y) for x, y in varBinds]
# 3.3.2 & 3.3.3
snmpTrapOID, sysUpTime = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpTrapOID', 'sysUpTime')
for idx in range(len(varBinds)):
if idx and varBinds[idx][0] == sysUpTime.getName():
if varBinds[0][0] == sysUpTime.getName():
varBinds[0] = varBinds[idx]
else:
varBinds.insert(0, varBinds[idx])
del varBinds[idx]
if varBinds[0][0] != sysUpTime.getName():
varBinds.insert(0, (v2c.ObjectIdentifier(sysUpTime.getName()),
sysUpTime.getSyntax().clone()))
if len(varBinds) < 2 or varBinds[1][0] != snmpTrapOID.getName():
varBinds.insert(1, (v2c.ObjectIdentifier(snmpTrapOID.getName()),
snmpTrapOID.getSyntax()))
debug.logger & debug.flagApp and debug.logger('sendVarBinds: final varBinds %s' % (varBinds,))
for targetAddrName in config.getTargetNames(snmpEngine, notifyTag):
(transportDomain, transportAddress, timeout,
retryCount, params) = config.getTargetAddr(snmpEngine,
targetAddrName)
(messageProcessingModel, securityModel, securityName,
securityLevel) = config.getTargetParams(snmpEngine, params)
# 3.3.1 XXX
# XXX filtering's yet to be implemented
# filterProfileName = config.getNotifyFilterProfile(params)
# (filterSubtree, filterMask,
# filterType) = config.getNotifyFilter(filterProfileName)
debug.logger & debug.flagApp and debug.logger('sendVarBinds: notificationHandle %s, notifyTag %s yields: transportDomain %s, transportAddress %r, securityModel %s, securityName %s, securityLevel %s' % (notificationHandle, notifyTag, transportDomain, transportAddress, securityModel, securityName, securityLevel))
for varName, varVal in varBinds:
if varName in (sysUpTime.name, snmpTrapOID.name):
continue
try:
snmpEngine.accessControlModel[self.acmID].isAccessAllowed(
snmpEngine, securityModel, securityName,
securityLevel, 'notify', contextName, varName
)
debug.logger & debug.flagApp and debug.logger('sendVarBinds: ACL succeeded for OID %s securityName %s' % (varName, securityName))
except error.StatusInformation:
debug.logger & debug.flagApp and debug.logger('sendVarBinds: ACL denied access for OID %s securityName %s, droppping notification' % (varName, securityName))
return
# 3.3.4
if notifyType == 1:
pdu = v2c.SNMPv2TrapPDU()
elif notifyType == 2:
pdu = v2c.InformRequestPDU()
else:
raise error.ProtocolError('Unknown notify-type %r', notifyType)
v2c.apiPDU.setDefaults(pdu)
v2c.apiPDU.setVarBinds(pdu, varBinds)
# 3.3.5
try:
sendRequestHandle = self.sendPdu(
snmpEngine, targetAddrName, contextEngineId,
contextName, pdu, self.processResponseVarBinds,
(notificationHandle, cbFun, cbCtx)
)
except error.StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('sendVarBinds: sendRequestHandle %s: sendPdu() failed with %r' % (sendRequestHandle, statusInformation))
if notificationHandle not in self.__pendingNotifications or \
not self.__pendingNotifications[notificationHandle]:
if notificationHandle in self.__pendingNotifications:
del self.__pendingNotifications[notificationHandle]
if cbFun:
cbFun(snmpEngine, notificationHandle,
statusInformation['errorIndication'], 0, 0, (),
cbCtx)
return notificationHandle
debug.logger & debug.flagApp and debug.logger('sendVarBinds: notificationHandle %s, sendRequestHandle %s, timeout %d' % (notificationHandle, sendRequestHandle, timeout))
if notifyType == 2:
if notificationHandle not in self.__pendingNotifications:
self.__pendingNotifications[notificationHandle] = set()
self.__pendingNotifications[notificationHandle].add(sendRequestHandle)
debug.logger & debug.flagApp and debug.logger('sendVarBinds: notificationHandle %s, sendRequestHandle %s, notification(s) sent' % (notificationHandle, sendRequestHandle))
return notificationHandle
#
# Obsolete, compatibility interfaces.
#
def _sendNotificationCbFun(snmpEngine, sendRequestHandle, errorIndication,
errorStatus, errorIndex, varBinds, cbCtx):
cbFun, cbCtx = cbCtx
try:
# we need to pass response PDU information to user for INFORMs
cbFun(sendRequestHandle, errorIndication,
errorStatus, errorIndex, varBinds, cbCtx)
except TypeError:
# a backward compatible way of calling user function
cbFun(sendRequestHandle, errorIndication, cbCtx)
def _sendNotification(self, snmpEngine, notificationTarget, notificationName,
additionalVarBinds=(), cbFun=None, cbCtx=None,
contextName=null, instanceIndex=None):
if self.snmpContext is None:
raise error.ProtocolError('SNMP context not specified')
#
# Here we first expand trap OID into associated OBJECTS
# and then look them up at context-specific MIB
#
mibViewController = snmpEngine.getUserContext('mibViewController')
if not mibViewController:
mibViewController = view.MibViewController(snmpEngine.getMibBuilder())
snmpEngine.setUserContext(mibViewController=mibViewController)
# Support the following syntax:
# '1.2.3.4'
# (1,2,3,4)
# ('MIB', 'symbol')
if isinstance(notificationName, (tuple, list)) and \
notificationName and isinstance(notificationName[0], str):
notificationName = rfc1902.ObjectIdentity(*notificationName)
else:
notificationName = rfc1902.ObjectIdentity(notificationName)
varBinds = rfc1902.NotificationType(notificationName,
instanceIndex=instanceIndex)
varBinds.resolveWithMib(mibViewController)
mibInstrumController = self.snmpContext.getMibInstrum(contextName)
varBinds = varBinds[:1] + mibInstrumController.readVars(varBinds[1:])
return self.sendVarBinds(snmpEngine, notificationTarget,
self.snmpContext.contextEngineId,
contextName, varBinds + list(additionalVarBinds),
_sendNotificationCbFun, (cbFun, cbCtx))
# install compatibility wrapper
NotificationOriginator.sendNotification = _sendNotification
# XXX
# move/group/implement config setting/retrieval at a stand-alone module
| gpl-3.0 |
lightcn/odoo | addons/account_bank_statement_extensions/wizard/confirm_statement_line.py | 381 | 1490 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class confirm_statement_line(osv.osv_memory):
_name = 'confirm.statement.line'
_description = 'Confirm selected statement lines'
def confirm_lines(self, cr, uid, ids, context):
line_ids = context['active_ids']
line_obj = self.pool.get('account.bank.statement.line')
line_obj.write(cr, uid, line_ids, {'state': 'confirm'}, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lck/SharpTAL | Docs/conf.py | 1 | 6218 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SharpTAL'
copyright = u'2010-2014 by Roman Lacko'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0'
# The full version, including alpha/beta/rc tags.
release = '3.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "SharpTAL %s documentation" % version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bchameleonm,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'SharpTALdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'SharpTAL.tex', u'SharpTAL Documentation',
u'Roman Lacko', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| apache-2.0 |
numerigraphe/odoo | addons/anonymization/anonymization.py | 77 | 28690 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import os
import base64
try:
import cPickle as pickle
except ImportError:
import pickle
import random
import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
from itertools import groupby
from operator import itemgetter
FIELD_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('not_existing', 'Not Existing'), ('new', 'New')]
ANONYMIZATION_STATES = FIELD_STATES + [('unstable', 'Unstable')]
WIZARD_ANONYMIZATION_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('unstable', 'Unstable')]
ANONYMIZATION_HISTORY_STATE = [('started', 'Started'), ('done', 'Done'), ('in_exception', 'Exception occured')]
ANONYMIZATION_DIRECTION = [('clear -> anonymized', 'clear -> anonymized'), ('anonymized -> clear', 'anonymized -> clear')]
def group(lst, cols):
if isinstance(cols, basestring):
cols = [cols]
return dict((k, [v for v in itr]) for k, itr in groupby(sorted(lst, key=itemgetter(*cols)), itemgetter(*cols)))
class ir_model_fields_anonymization(osv.osv):
_name = 'ir.model.fields.anonymization'
_rec_name = 'field_id'
_columns = {
'model_name': fields.char('Object Name', required=True),
'model_id': fields.many2one('ir.model', 'Object', ondelete='set null'),
'field_name': fields.char('Field Name', required=True),
'field_id': fields.many2one('ir.model.fields', 'Field', ondelete='set null'),
'state': fields.selection(selection=FIELD_STATES, String='Status', required=True, readonly=True),
}
_sql_constraints = [
('model_id_field_id_uniq', 'unique (model_name, field_name)', _("You cannot have two fields with the same name on the same object!")),
]
def _get_global_state(self, cr, uid, context=None):
ids = self.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = self.browse(cr, uid, ids, context=context)
if not len(fields) or len(fields) == len([f for f in fields if f.state == 'clear']):
state = 'clear' # all fields are clear
elif len(fields) == len([f for f in fields if f.state == 'anonymized']):
state = 'anonymized' # all fields are anonymized
else:
state = 'unstable' # fields are mixed: this should be fixed
return state
def _check_write(self, cr, uid, context=None):
"""check that the field is created from the menu and not from an database update
otherwise the database update can crash:"""
if context is None:
context = {}
if context.get('manual'):
global_state = self._get_global_state(cr, uid, context=context)
if global_state == 'anonymized':
raise osv.except_osv('Error!', "The database is currently anonymized, you cannot create, modify or delete fields.")
elif global_state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to create, write or delete fields.")
raise osv.except_osv('Error!', msg)
return True
def _get_model_and_field_ids(self, cr, uid, vals, context=None):
model_and_field_ids = (False, False)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
ir_model_fields_obj = self.pool.get('ir.model.fields')
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', vals['model_name'])], context=context)
if model_ids:
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', vals['field_name']), ('model_id', '=', model_ids[0])], context=context)
if field_ids:
field_id = field_ids[0]
model_and_field_ids = (model_ids[0], field_id)
return model_and_field_ids
def create(self, cr, uid, vals, context=None):
# check field state: all should be clear before we can add a new field to anonymize:
self._check_write(cr, uid, context=context)
global_state = self._get_global_state(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).create(cr, uid, vals, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
# check field state: all should be clear before we can modify a field:
if not (len(vals.keys()) == 1 and vals.get('state') == 'clear'):
self._check_write(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if 'field_id' in vals:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
global_state = self._get_global_state(cr, uid, context)
if global_state != 'unstable':
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).write(cr, uid, ids, vals, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
# check field state: all should be clear before we can unlink a field:
self._check_write(cr, uid, context=context)
res = super(ir_model_fields_anonymization, self).unlink(cr, uid, ids, context=context)
return res
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_name': False,
}}
if model_id:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('id', '=', model_id)])
model_id = model_ids and model_ids[0] or None
model_name = model_id and ir_model_obj.browse(cr, uid, model_id).model or False
res['value']['model_name'] = model_name
return res
def onchange_model_name(self, cr, uid, ids, model_name, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_id': False,
}}
if model_name:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', model_name)])
model_id = model_ids and model_ids[0] or False
res['value']['model_id'] = model_id
return res
def onchange_field_name(self, cr, uid, ids, field_name, model_name):
res = {'value': {
'field_id': False,
}}
if field_name and model_name:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', field_name), ('model', '=', model_name)])
field_id = field_ids and field_ids[0] or False
res['value']['field_id'] = field_id
return res
def onchange_field_id(self, cr, uid, ids, field_id, model_name):
res = {'value': {
'field_name': False,
}}
if field_id:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field = ir_model_fields_obj.browse(cr, uid, field_id)
res['value']['field_name'] = field.name
return res
_defaults = {
'state': lambda *a: 'clear',
}
class ir_model_fields_anonymization_history(osv.osv):
_name = 'ir.model.fields.anonymization.history'
_order = "date desc"
_columns = {
'date': fields.datetime('Date', required=True, readonly=True),
'field_ids': fields.many2many('ir.model.fields.anonymization', 'anonymized_field_to_history_rel', 'field_id', 'history_id', 'Fields', readonly=True),
'state': fields.selection(selection=ANONYMIZATION_HISTORY_STATE, string='Status', required=True, readonly=True),
'direction': fields.selection(selection=ANONYMIZATION_DIRECTION, string='Direction', size=20, required=True, readonly=True),
'msg': fields.text('Message', readonly=True),
'filepath': fields.char(string='File path', readonly=True),
}
class ir_model_fields_anonymize_wizard(osv.osv_memory):
_name = 'ir.model.fields.anonymize.wizard'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
state = self._get_state_value(cr, uid, context=None)
for id in ids:
res[id] = state
return res
def _get_summary(self, cr, uid, ids, name, arg, context=None):
res = {}
summary = self._get_summary_value(cr, uid, context)
for id in ids:
res[id] = summary
return res
_columns = {
'name': fields.char(string='File Name'),
'summary': fields.function(_get_summary, type='text', string='Summary'),
'file_export': fields.binary(string='Export'),
'file_import': fields.binary(string='Import', help="This is the file created by the anonymization process. It should have the '.pickle' extention."),
'state': fields.function(_get_state, string='Status', type='selection', selection=WIZARD_ANONYMIZATION_STATES, readonly=False),
'msg': fields.text(string='Message'),
}
def _get_state_value(self, cr, uid, context=None):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
return state
def _get_summary_value(self, cr, uid, context=None):
summary = u''
anon_field_obj = self.pool.get('ir.model.fields.anonymization')
ir_model_fields_obj = self.pool.get('ir.model.fields')
anon_field_ids = anon_field_obj.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
anon_fields = anon_field_obj.browse(cr, uid, anon_field_ids, context=context)
field_ids = [anon_field.field_id.id for anon_field in anon_fields if anon_field.field_id]
fields = ir_model_fields_obj.browse(cr, uid, field_ids, context=context)
fields_by_id = dict([(f.id, f) for f in fields])
for anon_field in anon_fields:
field = fields_by_id.get(anon_field.field_id.id)
values = {
'model_name': field.model_id.name,
'model_code': field.model_id.model,
'field_code': field.name,
'field_name': field.field_description,
'state': anon_field.state,
}
summary += u" * %(model_name)s (%(model_code)s) -> %(field_name)s (%(field_code)s): state: (%(state)s)\n" % values
return summary
def default_get(self, cr, uid, fields_list, context=None):
res = {}
res['name'] = '.pickle'
res['summary'] = self._get_summary_value(cr, uid, context)
res['state'] = self._get_state_value(cr, uid, context)
res['msg'] = _("""Before executing the anonymization process, you should make a backup of your database.""")
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, *args, **kwargs):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if context is None:
context = {}
step = context.get('step', 'new_window')
res = super(ir_model_fields_anonymize_wizard, self).fields_view_get(cr, uid, view_id, view_type, context, *args, **kwargs)
eview = etree.fromstring(res['arch'])
placeholder = eview.xpath("group[@name='placeholder1']")
if len(placeholder):
placeholder = placeholder[0]
if step == 'new_window' and state == 'clear':
# clicked in the menu and the fields are not anonymized: warn the admin that backuping the db is very important
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Warning'}))
eview.remove(placeholder)
elif step == 'new_window' and state == 'anonymized':
# clicked in the menu and the fields are already anonymized
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_import', 'required': "1"}))
placeholder.addnext(etree.Element('label', {'string': 'Anonymization file'}))
eview.remove(placeholder)
elif step == 'just_anonymized':
# we just ran the anonymization process, we need the file export field
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_export'}))
# we need to remove the button:
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
elif step == 'just_desanonymized':
# we just reversed the anonymization process, we don't need any field
# we need to remove the button
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
else:
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything else.")
raise osv.except_osv('Error!', msg)
res['arch'] = etree.tostring(eview)
return res
def _raise_after_history_update(self, cr, uid, history_id, error_type, error_msg):
self.pool.get('ir.model.fields.anonymization.history').write(cr, uid, history_id, {
'state': 'in_exception',
'msg': error_msg,
})
raise osv.except_osv(error_type, error_msg)
def anonymize_database(self, cr, uid, ids, context=None):
"""Sets the 'anonymized' state to defined fields"""
# create a new history record:
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'clear -> anonymized',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'clear' state
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if state == 'anonymized':
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("The database is currently anonymized, you cannot anonymize it again."))
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# do the anonymization:
dirpath = os.environ.get('HOME') or os.getcwd()
rel_filepath = 'field_anonymization_%s_%s.pickle' % (cr.dbname, history_id)
abs_filepath = os.path.abspath(os.path.join(dirpath, rel_filepath))
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = ir_model_fields_anonymization_model.browse(cr, uid, field_ids, context=context)
if not fields:
msg = "No fields are going to be anonymized."
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
data = []
for field in fields:
model_name = field.model_id.model
field_name = field.field_id.name
field_type = field.field_id.ttype
table_name = self.pool[model_name]._table
# get the current value
sql = "select id, %s from %s" % (field_name, table_name)
cr.execute(sql)
records = cr.dictfetchall()
for record in records:
data.append({"model_id": model_name, "field_id": field_name, "id": record['id'], "value": record[field_name]})
# anonymize the value:
anonymized_value = None
sid = str(record['id'])
if field_type == 'char':
anonymized_value = 'xxx'+sid
elif field_type == 'selection':
anonymized_value = 'xxx'+sid
elif field_type == 'text':
anonymized_value = 'xxx'+sid
elif field_type == 'boolean':
anonymized_value = random.choice([True, False])
elif field_type == 'date':
anonymized_value = '2011-11-11'
elif field_type == 'datetime':
anonymized_value = '2011-11-11 11:11:11'
elif field_type == 'float':
anonymized_value = 0.0
elif field_type == 'integer':
anonymized_value = 0
elif field_type in ['binary', 'many2many', 'many2one', 'one2many', 'reference']: # cannot anonymize these kind of fields
msg = _("Cannot anonymize fields of these types: binary, many2many, many2one, one2many, reference.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
if anonymized_value is None:
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("Anonymized value is None. This cannot happens."))
sql = "update %(table)s set %(field)s = %%(anonymized_value)s where id = %%(id)s" % {
'table': table_name,
'field': field_name,
}
cr.execute(sql, {
'anonymized_value': anonymized_value,
'id': record['id']
})
# save pickle:
fn = open(abs_filepath, 'w')
pickle.dump(data, fn, pickle.HIGHEST_PROTOCOL)
# update the anonymization fields:
values = {
'state': 'anonymized',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msgs = ["Anonymization successful.",
"",
"Donot forget to save the resulting file to a safe place because you will not be able to revert the anonymization without this file.",
"",
"This file is also stored in the %s directory. The absolute file path is: %s.",
]
msg = '\n'.join(msgs) % (dirpath, abs_filepath)
fn = open(abs_filepath, 'r')
self.write(cr, uid, ids, {
'msg': msg,
'file_export': base64.encodestring(fn.read()),
})
fn.close()
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': abs_filepath,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_anonymized'},
'target':'new',
}
def reverse_anonymize_database(self, cr, uid, ids, context=None):
"""Set the 'clear' state to defined fields"""
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
# create a new history record:
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'anonymized -> clear',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'anonymized' state
state = ir_model_fields_anonymization_model._get_global_state(cr, uid, context=context)
if state == 'clear':
raise osv.except_osv_('Error!', "The database is not currently anonymized, you cannot reverse the anonymization.")
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
raise osv.except_osv('Error!', msg)
wizards = self.browse(cr, uid, ids, context=context)
for wizard in wizards:
if not wizard.file_import:
msg = _("It is not possible to reverse the anonymization process without supplying the anonymization export file.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# reverse the anonymization:
# load the pickle file content into a data structure:
data = pickle.loads(base64.decodestring(wizard.file_import))
migration_fix_obj = self.pool.get('ir.model.fields.anonymization.migration.fix')
fix_ids = migration_fix_obj.search(cr, uid, [('target_version', '=', '8.0')])
fixes = migration_fix_obj.read(cr, uid, fix_ids, ['model_name', 'field_name', 'query', 'query_type', 'sequence'])
fixes = group(fixes, ('model_name', 'field_name'))
for line in data:
queries = []
table_name = self.pool[line['model_id']]._table if line['model_id'] in self.pool else None
# check if custom sql exists:
key = (line['model_id'], line['field_id'])
custom_updates = fixes.get(key)
if custom_updates:
custom_updates.sort(key=itemgetter('sequence'))
queries = [(record['query'], record['query_type']) for record in custom_updates if record['query_type']]
elif table_name:
queries = [("update %(table)s set %(field)s = %%(value)s where id = %%(id)s" % {
'table': table_name,
'field': line['field_id'],
}, 'sql')]
for query in queries:
if query[1] == 'sql':
sql = query[0]
cr.execute(sql, {
'value': line['value'],
'id': line['id']
})
elif query[1] == 'python':
raw_code = query[0]
code = raw_code % line
eval(code)
else:
raise Exception("Unknown query type '%s'. Valid types are: sql, python." % (query['query_type'], ))
# update the anonymization fields:
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
values = {
'state': 'clear',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msg = '\n'.join(["Successfully reversed the anonymization.",
"",
])
self.write(cr, uid, ids, {'msg': msg})
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': False,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_desanonymized'},
'target':'new',
}
def _id_get(self, cr, uid, model, id_str, mod):
if '.' in id_str:
mod, id_str = id_str.split('.')
try:
idn = self.pool.get('ir.model.data')._get_id(cr, uid, mod, id_str)
res = int(self.pool.get('ir.model.data').read(cr, uid, [idn], ['res_id'])[0]['res_id'])
except:
res = None
return res
class ir_model_fields_anonymization_migration_fix(osv.osv):
_name = 'ir.model.fields.anonymization.migration.fix'
_order = "sequence"
_columns = {
'target_version': fields.char('Target Version'),
'model_name': fields.char('Model'),
'field_name': fields.char('Field'),
'query': fields.text('Query'),
'query_type': fields.selection(string='Query', selection=[('sql', 'sql'), ('python', 'python')]),
'sequence': fields.integer('Sequence'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sv-dev1/odoo | addons/email_template/wizard/email_template_preview.py | 377 | 3851 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class email_template_preview(osv.osv_memory):
_inherit = "email.template"
_name = "email_template.preview"
_description = "Email Template Preview"
def _get_records(self, cr, uid, context=None):
"""
Return Records of particular Email Template's Model
"""
if context is None:
context = {}
template_id = context.get('template_id', False)
if not template_id:
return []
email_template = self.pool.get('email.template')
template = email_template.browse(cr, uid, int(template_id), context=context)
template_object = template.model_id
model = self.pool[template_object.model]
record_ids = model.search(cr, uid, [], 0, 10, 'id', context=context)
default_id = context.get('default_res_id')
if default_id and default_id not in record_ids:
record_ids.insert(0, default_id)
return model.name_get(cr, uid, record_ids, context)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
result = super(email_template_preview, self).default_get(cr, uid, fields, context=context)
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
if 'res_id' in fields and not result.get('res_id'):
records = self._get_records(cr, uid, context=context)
result['res_id'] = records and records[0][0] or False # select first record as a Default
if template_id and 'model_id' in fields and not result.get('model_id'):
result['model_id'] = email_template.read(cr, uid, int(template_id), ['model_id'], context).get('model_id', False)
return result
_columns = {
'res_id': fields.selection(_get_records, 'Sample Document'),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
}
def on_change_res_id(self, cr, uid, ids, res_id, context=None):
if context is None:
context = {'value': {}}
if not res_id or not context.get('template_id'):
return {'value': {}}
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
template = email_template.browse(cr, uid, template_id, context=context)
# generate and get template values
mail_values = email_template.generate_email(cr, uid, template_id, res_id, context=context)
vals = dict((field, mail_values.get(field, False)) for field in ('email_from', 'email_to', 'email_cc', 'reply_to', 'subject', 'body_html', 'partner_to', 'partner_ids', 'attachment_ids'))
vals['name'] = template.name
return {'value': vals}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andrescodas/casadi | docs/api/examples/SX/ssym.py | 3 | 1508 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
#! SX.sym
#!======================
from casadi import *
#! Construct using a single name
#! =====================================
#! The names of the entries of the SX will be derived from the name provided as argument to SX.sym.
#! Without shape arguments, a 1-by-1 matrix is constructed:
x = SX.sym("x")
print(type(x), x)
#! Create a column matrix
print(SX.sym("x",2,1))
#! Create a row matrix
print(SX.sym("x",1,2))
#! Create a matrix
print(SX.sym("x",2,3))
| lgpl-3.0 |
zhouye/shareit | registration/tests/urls.py | 138 | 4645 | """
URLs used in the unit tests for django-registration.
You should not attempt to use these URLs in any sort of real or
development environment; instead, use
``registration/backends/default/urls.py``. This URLconf includes those
URLs, and also adds several additional URLs which serve no purpose
other than to test that optional keyword arguments are properly
handled.
"""
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from registration.views import activate
from registration.views import register
urlpatterns = patterns('',
# Test the 'activate' view with custom template
# name.
url(r'^activate-with-template-name/(?P<activation_key>\w+)/$',
activate,
{'template_name': 'registration/test_template_name.html',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_template_name'),
# Test the 'activate' view with
# extra_context_argument.
url(r'^activate-extra-context/(?P<activation_key>\w+)/$',
activate,
{'extra_context': {'foo': 'bar', 'callable': lambda: 'called'},
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_extra_context'),
# Test the 'activate' view with success_url argument.
url(r'^activate-with-success-url/(?P<activation_key>\w+)/$',
activate,
{'success_url': 'registration_test_custom_success_url',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_success_url'),
# Test the 'register' view with custom template
# name.
url(r'^register-with-template-name/$',
register,
{'template_name': 'registration/test_template_name.html',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_template_name'),
# Test the'register' view with extra_context
# argument.
url(r'^register-extra-context/$',
register,
{'extra_context': {'foo': 'bar', 'callable': lambda: 'called'},
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_extra_context'),
# Test the 'register' view with custom URL for
# closed registration.
url(r'^register-with-disallowed-url/$',
register,
{'disallowed_url': 'registration_test_custom_disallowed',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_disallowed_url'),
# Set up a pattern which will correspond to the
# custom 'disallowed_url' above.
url(r'^custom-disallowed/$',
direct_to_template,
{'template': 'registration/registration_closed.html'},
name='registration_test_custom_disallowed'),
# Test the 'register' view with custom redirect
# on successful registration.
url(r'^register-with-success_url/$',
register,
{'success_url': 'registration_test_custom_success_url',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_success_url'
),
# Pattern for custom redirect set above.
url(r'^custom-success/$',
direct_to_template,
{'template': 'registration/test_template_name.html'},
name='registration_test_custom_success_url'),
(r'', include('registration.backends.default.urls')),
)
| mit |
IRI-Research/django | django/contrib/messages/tests/test_api.py | 114 | 1442 | from django.test import TestCase, RequestFactory
from django.contrib import messages
class DummyStorage(object):
"""
dummy message-store to test the api methods
"""
def __init__(self):
self.store = []
def add(self, level, message, extra_tags=''):
self.store.append(message)
class ApiTest(TestCase):
def setUp(self):
self.rf = RequestFactory()
self.request = self.rf.request()
self.storage = DummyStorage()
def test_ok(self):
msg = 'some message'
self.request._messages = self.storage
messages.add_message(self.request, messages.DEBUG, msg)
self.assertIn(msg, self.storage.store)
def test_request_is_none(self):
msg = 'some message'
self.request._messages = self.storage
with self.assertRaises(TypeError):
messages.add_message(None, messages.DEBUG, msg)
self.assertEqual([], self.storage.store)
def test_middleware_missing(self):
msg = 'some message'
with self.assertRaises(messages.MessageFailure):
messages.add_message(self.request, messages.DEBUG, msg)
self.assertEqual([], self.storage.store)
def test_middleware_missing_silently(self):
msg = 'some message'
messages.add_message(self.request, messages.DEBUG, msg,
fail_silently=True)
self.assertEqual([], self.storage.store)
| bsd-3-clause |
zstackorg/zstack-woodpecker | integrationtest/vm/simulator/test_change_vm_image_in_multihosts_env.py | 2 | 2482 | '''
New Integration Test for changing vm image.
@author: Xiaoshuang
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.host_operations as host_ops
import time
vm = None
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Test Change VM Image In Multihosts Env')
global vm
image = test_lib.lib_get_image_by_name("centos")
vm = test_stub.create_vm(image_uuid=image.uuid)
last_l3network_uuid = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm())
last_primarystorage_uuid = test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid
last_host_uuid = test_lib.lib_get_vm_last_host(vm.get_vm()).uuid
image_uuid = test_lib.lib_get_image_by_name("image_for_sg_test").uuid
vm_uuid = vm.get_vm().uuid
host_ops.change_host_state(host_uuid = last_host_uuid, state = 'disable')
vm_ops.stop_vm(vm_uuid)
ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
#Disable vm's host.If ps is shared storage,the vm will be started on another host that meets the conditions and the operation of changing vm image will success.
if ps.type != 'LocalStorage':
vm_ops.change_vm_image(vm_uuid,image_uuid)
vm_ops.start_vm(vm_uuid)
#check whether the network config has changed
l3network_uuid_after = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm())
if l3network_uuid_after != last_l3network_uuid:
test_util.test_fail('Change VM Image Failed.The Network config has changed.')
#check whether primarystorage has changed
primarystorage_uuid_after = test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid
if primarystorage_uuid_after != last_primarystorage_uuid:
test_util.test_fail('Change VM Image Failed.Primarystorage has changed.')
vm.destroy()
test_util.test_pass('Change Vm Image Test Success In Multihosts Env Success')
#Disable vm's host.If ps is local storage,the operation of changing vm image will fail.
else:
try:
vm_ops.change_vm_image(vm_uuid, image_uuid)
except:
test_util.test_pass('Change Vm Image Test Success In Multihosts Env Success')
test_util.test_fail('Test Change VM Image In Multihosts Env Success Failed')
def error_cleanup():
global vm
if vm:
vm.destroy()
| apache-2.0 |
ZerpaTechnology/AsenZor | static/js/brython/Lib/linecache.py | 15 | 3998 | """Cache lines from files.
This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.
"""
import sys
import os
import tokenize
__all__ = ["getline", "clearcache", "checkcache"]
def getline(filename, lineno, module_globals=None):
lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
# The cache
cache = {} # The cache
def clearcache():
"""Clear the cache entirely."""
global cache
cache = {}
def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
else:
return updatecache(filename, module_globals)
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = list(cache.keys())
else:
if filename in cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del cache[filename]
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename]
if not filename or (filename.startswith('<') and filename.endswith('>')):
return []
fullname = filename
try:
stat = os.stat(fullname)
except OSError:
basename = filename
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
return []
try:
with tokenize.open(fullname) as fp:
lines = fp.readlines()
except IOError:
return []
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines
| lgpl-3.0 |
fzimmermann89/pyload | module/lib/jinja2/environment.py | 62 | 43946 | # -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import *
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode, _encode_filename
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instanciated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, basestring):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneus environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.newline_sequence = newline_sequence
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.bytecode_cache = None
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def add_extension(self, extension):
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in attributes.iteritems():
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in args.iteritems():
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in self.extensions.iteritems():
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, basestring):
try:
attr = str(argument)
except:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, _encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = unicode(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), unicode(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
def _generate(self, source, name, filename, defer_init=False):
"""Internal hook that can be overriden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate(source, self, name, filename, defer_init=defer_init)
def _compile(self, source, filename):
"""Internal hook that can be overriden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, 'exec')
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, basestring):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = self._generate(source, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = _encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Compiles all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
import imp, struct, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0755 << 16L
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError, e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, _encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = filter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
raise exc_type, exc_value, tb
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, basestring):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
newline_sequence, frozenset(extensions), optimized, undefined,
finalize, autoescape, None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec code in namespace
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(map(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return unicode(self).encode('utf-8')
# unicode goes after __str__ because we configured 2to3 to rename
# __unicode__ to __str__. because the 2to3 tree is not designed to
# remove nodes from it, we leave the above __str__ around and let
# it override at runtime.
def __unicode__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specifiy an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, basestring):
fp = file(fp, 'w')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = self._gen.next
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = generator(self._gen.next).next
def __iter__(self):
return self
def next(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| gpl-3.0 |
blorenz/indie-film-rentals | indiefilmrentals/api/serial_api.py | 1 | 14381 | from indiefilmrentals.base.models import *
from indiefilmrentals.products.models import *
from indiefilmrentals.api.serial_views import HybridDetailView, HybridListView
from session_csrf import anonymous_csrf_exempt
from django.utils.decorators import method_decorator
import json
from haystack.query import SearchQuerySet
XOR_KEY = 0x93B2E392A502C32D
AES_KEY = 'funtimes'
class BaseAPIListView(HybridListView):
serialized_output = True
# Default to JSON
response_format = 'json'
@method_decorator(anonymous_csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(BaseAPIListView, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
class BaseAPIDetailView(HybridDetailView):
serialized_output = True
# Default to JSON
response_format = 'json'
def throw_error(error=500):
context = {}
context.setdefault('status', 'error')
return context
class SubmoduleListView(BaseAPIListView):
context_object_name = 'submodules'
queryset = Submodule.objects.select_related()
def get_queryset(self):
st = {}
kwargs = {}
try:
st = json.loads(self.request.raw_post_data)
# module/slides/list
the_id = st.get('module', None)
kwargs['module__id'] = the_id
qs = self.queryset.filter(**kwargs)
except:
qs = self.queryset
return qs
def get_context_data(self, **kwargs):
old_context = super(SubmoduleListView, self).get_context_data(**kwargs)
context = {}
context['submodules'] = []
for x in list(old_context['submodules']):
if x.title == "Base":
pass
else:
context['submodules'].append({"title": x.title,
"id": x.id,
})
return context
class SlidesListView(BaseAPIListView):
context_object_name = 'slides'
queryset = Slide.objects.select_related()
def get_queryset(self):
st = {}
kwargs = {}
try:
st = json.loads(self.request.raw_post_data)
# module/slides/list
the_id = st.get('id', None)
kwargs['submodule__module__id'] = the_id
qs = self.queryset.filter(**kwargs)
except:
qs = self.queryset
return qs
def get_context_data(self, **kwargs):
old_context = super(SlidesListView, self).get_context_data(**kwargs)
context = {}
context['slides'] = create_slides_list(old_context['slides'])
return context
class PresentationListView(BaseAPIListView):
context_object_name = 'presentations'
queryset = Presentation.objects.select_related()
def get_queryset(self):
try:
qs = self.queryset.filter(user=PresentationUser.objects.get(user=self.request.user))
qs = qs.filter(active=True)
except:
qs = Presentation.objects.none()
return qs
def get_context_data(self, **kwargs):
old_context = super(PresentationListView, self).get_context_data(**kwargs)
context = {}
context['status'] = 'success'
context['presentations'] = create_presentations_list(old_context['presentations'])
return context
class TestSlidesListView(BaseAPIListView):
serialized_output = False
template = 'base/test_module.html'
context_object_name = 'slides'
queryset = Slide.objects.select_related()
def get_queryset(self):
kwargs = {}
the_id = 4
try:
# st = json.loads(self.request.raw_post_data)
# module/slides/list
kwargs['submodule__module__id'] = the_id
qs = self.queryset.filter(**kwargs)
except:
qs = self.queryset
return qs
def get_context_data(self, **kwargs):
old_context = super(TestSlidesListView, self).get_context_data(**kwargs)
context = {}
context['slides'] = create_slides_list(old_context['slides'])
return context
class FeedbackEmailView(BaseAPIListView):
context_object_name = 'feedback'
queryset = Slide.objects.select_related()
def get_queryset(self):
st = json.loads(self.request.raw_post_data)
message = st.get('message', 'No message')
try:
user = PresentationUser.objects.get(user=self.request.user)
except:
user = PresentationUser.objects.get(user__username='anon')
qs = self.queryset
return qs
def get_context_data(self, **kwargs):
old_context = super(FeedbackEmailView, self).get_context_data(**kwargs)
context = {}
context['success'] = "true"
return context
class PresentationSaveView(BaseAPIListView):
context_object_name = 'presentation'
queryset = Slide.objects.select_related()
errors = {}
def get_queryset(self):
st = json.loads(self.request.raw_post_data)
print st
name = st.get('name', 'unnamed')
welcome_slide = st.get('welcome_title',7)
presenter_name = st.get('presenter_name','')
presenter_audience = st.get('presenter_audience','')
title_override = st.get('title_override',False)
try:
user = PresentationUser.objects.get(user=self.request.user)
except:
user = PresentationUser.objects.get(user__username='anon')
title = PresentationTitle.objects.get(pk=int(welcome_slide))
if 'id' in st:
j = int(AESdecrypt(AES_KEY,str(st['id'])))
pres = Presentation.objects.get(pk=j)
pres.user = user
pres.title = title
pres.presenter_name = presenter_name
pres.presenter_audience = presenter_audience
pres.title_override = title_override
pres.save()
else:
if not len(Presentation.objects.filter(user=user, name__iexact=name)):
pres = Presentation.objects.create(user=user, title=title, name=name, presenter_name=presenter_name, presenter_audience=presenter_audience, title_override=title_override)
else:
self.errors = {'error': 'duplicate_name'}
return
pres.slide_set.clear()
for k, i in enumerate(st['slides']):
PresentationSlide.objects.create(presentation=pres, slide=Slide.objects.get(pk=i['id']), slide_order=k)
qs = Presentation.objects.filter(pk=pres.id)
return qs
def get_context_data(self, **kwargs):
old_context = super(PresentationSaveView, self).get_context_data(**kwargs)
context = {}
if not len(self.errors):
context['success'] = "true"
context['id'] = AESencrypt(AES_KEY,str(list(old_context['presentation'])[0].id))
else:
context['success'] = "false"
context['message'] = 'A presentation already exists with that name.'
context['errors'] = self.errors
return context
class PresentationShareListView(BaseAPIListView):
context_object_name = 'slides'
queryset = PresentationUser.objects.select_related()
users = []
def get_queryset(self):
st = {}
kwargs = {}
#try:
st = json.loads(self.request.raw_post_data)
the_id = st.get('id', None)
the_id = int(AESdecrypt(AES_KEY,the_id))
kwargs['presentation__id'] = the_id
try:
obj = Presentation.objects.get(pk=the_id)
self.users = get_share_presenters(obj)
#TESTING
self.users = [ { 'SpeakerId': x.speaker_id,
'FirstName': x.user.first_name,
'LastName': x.user.last_name,
} for x in PresentationUser.objects.filter(id__lte=11).filter(id__gte=4).order_by('user__last_name') ]
except:
pass
qs = Slide.objects.none()
return qs
def get_context_data(self, **kwargs):
old_context = super(PresentationShareListView, self).get_context_data(**kwargs)
context = {}
new_slides = []
context['users'] = [{
'id': AESencrypt(AES_KEY,str(x['SpeakerId'])),
'name': x['FirstName'] + ' ' + x['LastName'],
} for x in self.users ]
return context
class PresentationShareView(BaseAPIListView):
context_object_name = 'slides'
queryset = PresentationUser.objects.select_related()
users = []
def get_queryset(self):
st = {}
kwargs = {}
#try:
st = json.loads(self.request.raw_post_data)
the_id = st.get('id', None)
the_user = st.get('user', None)
the_id = int(AESdecrypt(AES_KEY,the_id))
the_user = int(AESdecrypt(AES_KEY,the_user))
kwargs['presentation__id'] = the_id
try:
obj = Presentation.objects.get(pk=the_id)
share_presentation(obj,PresentationUser.objects.get(speaker_id=the_user))
except:
pass
qs = Slide.objects.none()
return qs
def get_context_data(self, **kwargs):
old_context = super(PresentationShareView, self).get_context_data(**kwargs)
context = {}
new_slides = []
context['success'] = "true"
return context
class PresentationOpenView(BaseAPIListView):
context_object_name = 'slides'
queryset = Slide.objects.select_related()
def get_queryset(self):
st = {}
kwargs = {}
#try:
st = json.loads(self.request.raw_post_data)
self.title = None
the_id = st.get('id', None)
the_id = int(AESdecrypt(AES_KEY,the_id))
kwargs['presentation__id'] = the_id
self.title = None
self.title_override = None
self.presenter_name = None
self.presenter_audience = None
try:
qs = self.queryset.filter(**kwargs).order_by('presentation')
obj = Presentation.objects.get(pk=the_id)
self.title = obj.title.id
self.title_override = obj.title_override
self.presenter_name = obj.presenter_name
self.presenter_audience = obj.presenter_audience
except:
qs = Slide.objects.none()
return qs
def get_context_data(self, **kwargs):
old_context = super(PresentationOpenView, self).get_context_data(**kwargs)
context = {}
new_slides = []
context['welcome_slide'] = self.title
context['title_override'] = self.title_override
context['presenter_name'] = self.presenter_name
context['presenter_audience'] = self.presenter_audience
context['slides'] = create_slides_list(old_context['slides'])
return context
class PresentationPrintView(BaseAPIListView):
context_object_name = 'slides'
serialized_output = False
template_name = 'base/print.html'
queryset = Slide.objects.select_related()
def get_queryset(self, **kwargs):
st = {}
kwargs = {}
try:
st = self.kwargs
except:
pass
the_id = st.get('id', None)
the_id = int(AESdecrypt(AES_KEY,the_id))
self.the_type = PRINTING[int(st.get('type', 1))]
kwargs['presentation__id'] = the_id
try:
qs = self.queryset.filter(**kwargs).order_by('presentation')
except:
qs = Slide.objects.none()
return qs
def get_context_data(self, **kwargs):
old_context = super(PresentationPrintView, self).get_context_data(**kwargs)
context = {}
context['print_type'] = self.the_type + '.html'
context['slides'] = create_slides_list(old_context['slides'])
return context
class PresentationDownloadView(BaseAPIListView):
context_object_name = 'slides'
serialized_output = False
template_name = 'base/download.html'
queryset = Slide.objects.select_related()
def get_queryset(self):
st = {}
kwargs = {}
try:
st = json.loads(self.request.raw_post_data)
except:
pass
#the_id = st.get('id', None)
the_id = st.get('id', 'c75a0a2758486d7a3f300da3824bce326744d598835666e3a8e1cf1cf7a5efe4904a56ede2b37b68b0d83c9aeae4c1bd4c49bfb75bceb54b21ccb7fff21b71c2')
the_id = int(AESdecrypt(AES_KEY,the_id))
kwargs['presentation__id'] = the_id
try:
qs = self.queryset.filter(**kwargs).order_by('presentation')
except:
qs = Slide.objects.none()
return qs
def get_context_data(self, **kwargs):
old_context = super(PresentationPrintView, self).get_context_data(**kwargs)
context = {}
context['print_type'] = 'slides-with-notes.html'
context['slides'] = create_slides_list(old_context['slides'])
return context
class SearchListView(BaseAPIListView):
context_object_name = 'results'
def get_queryset(self):
try:
st = json.loads(self.request.raw_post_data)
except:
pass
text = st.get('query', None)
qs = SearchQuerySet().filter(content=text)
return qs
def get_context_data(self, **kwargs):
old_context = super(SearchListView, self).get_context_data(**kwargs)
context = {}
context['slides'] = create_slides_list([x.object for x in old_context['results']])
return context
class TitlesListView(BaseAPIListView):
context_object_name = 'titles'
queryset = PresentationTitle.objects.select_related()
def get_queryset(self):
qs = self.queryset
return qs
def get_context_data(self, **kwargs):
old_context = super(TitlesListView, self).get_context_data(**kwargs)
context = {}
new_slides = []
for k in list(old_context['titles']):
new_slides.append({
'id': k.id,
'title': k.title,
'module': k.module_set.all()[0].id if k.module_set.all() else -1,
})
context['titles'] = new_slides
return context
return context
| bsd-3-clause |
NoahFlowa/glowing-spoon | venv/lib/python2.7/site-packages/sqlalchemy/dialects/firebird/kinterbasdb.py | 33 | 6300 | # firebird/kinterbasdb.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird+kinterbasdb
:name: kinterbasdb
:dbapi: kinterbasdb
:connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db\
[?key=value&key=value...]
:url: http://firebirdsql.org/index.php?op=devel&sub=python
Arguments
----------
The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining``
arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect.
In addition, it also accepts the following:
* ``type_conv`` - select the kind of mapping done on the types: by default
SQLAlchemy uses 200 with Unicode, datetime and decimal support. See
the linked documents below for further information.
* ``concurrency_level`` - set the backend policy with regards to threading
issues: by default SQLAlchemy uses policy 1. See the linked documents
below for further information.
.. seealso::
http://sourceforge.net/projects/kinterbasdb
http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
"""
from .base import FBDialect, FBExecutionContext
from ... import util, types as sqltypes
from re import match
import decimal
class _kinterbasdb_numeric(object):
def bind_processor(self, dialect):
def process(value):
if isinstance(value, decimal.Decimal):
return str(value)
else:
return value
return process
class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric):
pass
class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float):
pass
class FBExecutionContext_kinterbasdb(FBExecutionContext):
@property
def rowcount(self):
if self.execution_options.get('enable_rowcount',
self.dialect.enable_rowcount):
return self.cursor.rowcount
else:
return -1
class FBDialect_kinterbasdb(FBDialect):
driver = 'kinterbasdb'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
execution_ctx_cls = FBExecutionContext_kinterbasdb
supports_native_decimal = True
colspecs = util.update_copy(
FBDialect.colspecs,
{
sqltypes.Numeric: _FBNumeric_kinterbasdb,
sqltypes.Float: _FBFloat_kinterbasdb,
}
)
def __init__(self, type_conv=200, concurrency_level=1,
enable_rowcount=True,
retaining=False, **kwargs):
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
self.enable_rowcount = enable_rowcount
self.type_conv = type_conv
self.concurrency_level = concurrency_level
self.retaining = retaining
if enable_rowcount:
self.supports_sane_rowcount = True
@classmethod
def dbapi(cls):
return __import__('kinterbasdb')
def do_execute(self, cursor, statement, parameters, context=None):
# kinterbase does not accept a None, but wants an empty list
# when there are no arguments.
cursor.execute(statement, parameters or [])
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback(self.retaining)
def do_commit(self, dbapi_connection):
dbapi_connection.commit(self.retaining)
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if opts.get('port'):
opts['host'] = "%s/%s" % (opts['host'], opts['port'])
del opts['port']
opts.update(url.query)
util.coerce_kw_type(opts, 'type_conv', int)
type_conv = opts.pop('type_conv', self.type_conv)
concurrency_level = opts.pop('concurrency_level',
self.concurrency_level)
if self.dbapi is not None:
initialized = getattr(self.dbapi, 'initialized', None)
if initialized is None:
# CVS rev 1.96 changed the name of the attribute:
# http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/
# Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
initialized = getattr(self.dbapi, '_initialized', False)
if not initialized:
self.dbapi.init(type_conv=type_conv,
concurrency_level=concurrency_level)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
fbconn = connection.connection
version = fbconn.server_version
return self._parse_version_info(version)
def _parse_version_info(self, version):
m = match(
r'\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % version)
if m.group(5) != None:
return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird'])
else:
return tuple([int(x) for x in m.group(1, 2, 3)] + ['interbase'])
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
self.dbapi.ProgrammingError)):
msg = str(e)
return ('Unable to complete network request to host' in msg or
'Invalid connection state' in msg or
'Invalid cursor state' in msg or
'connection shutdown' in msg)
else:
return False
dialect = FBDialect_kinterbasdb
| apache-2.0 |
CatsAndDogsbvba/odoo | addons/mail/mail_mail.py | 1 | 18837 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import logging
from email.utils import formataddr
from urlparse import urljoin
import psycopg2
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class mail_mail(osv.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_rec_name = 'subject'
_columns = {
'mail_message_id': fields.many2one('mail.message', 'Message', required=True, ondelete='cascade', auto_join=True),
'state': fields.selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True, copy=False),
'auto_delete': fields.boolean('Auto Delete',
help="Permanently delete this email after sending it, to save space"),
'references': fields.text('References', help='Message references, such as identifiers of previous messages', readonly=1),
'email_to': fields.text('To', help='Message recipients (emails)'),
'recipient_ids': fields.many2many('res.partner', string='To (Partners)'),
'email_cc': fields.char('Cc', help='Carbon copy message recipients'),
'body_html': fields.text('Rich-text Contents', help="Rich-text/HTML message"),
'headers': fields.text('Headers', copy=False),
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
'notification': fields.boolean('Is Notification',
help='Mail has been created to notify people of an existing mail.message'),
}
_defaults = {
'state': 'outgoing',
}
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
# To remove when automatic context propagation is removed in web client
if context and context.get('default_type') and context.get('default_type') not in self._all_columns['type'].column.selection:
context = dict(context, default_type=None)
return super(mail_mail, self).default_get(cr, uid, fields, context=context)
def create(self, cr, uid, values, context=None):
# notification field: if not set, set if mail comes from an existing mail.message
if 'notification' not in values and values.get('mail_message_id'):
values['notification'] = True
return super(mail_mail, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
# cascade-delete the parent message for all mails that are not created for a notification
ids_to_cascade = self.search(cr, uid, [('notification', '=', False), ('id', 'in', ids)])
parent_msg_ids = [m.mail_message_id.id for m in self.browse(cr, uid, ids_to_cascade, context=context)]
res = super(mail_mail, self).unlink(cr, uid, ids, context=context)
self.pool.get('mail.message').unlink(cr, uid, parent_msg_ids, context=context)
return res
def mark_outgoing(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'outgoing'}, context=context)
def cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
@api.cr_uid
def process_email_queue(self, cr, uid, ids=None, context=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
if context is None:
context = {}
if not ids:
filters = [('state', '=', 'outgoing')]
if 'filters' in context:
filters.extend(context['filters'])
ids = self.search(cr, uid, filters, context=context)
res = None
try:
# Force auto-commit - this is meant to be called by
# the scheduler, and we can't allow rolling back the status
# of previously sent emails!
res = self.send(cr, uid, ids, auto_commit=True, context=context)
except Exception:
_logger.exception("Failed processing mail queue")
return res
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:param browse_record mail: the mail that was just sent
:return: True
"""
if mail_sent and mail.auto_delete:
# done with SUPERUSER_ID to avoid giving large unlink access rights
self.unlink(cr, SUPERUSER_ID, [mail.id], context=context)
return True
#------------------------------------------------------
# mail_mail formatting, tools and send mechanism
#------------------------------------------------------
def _get_partner_access_link(self, cr, uid, mail, partner=None, context=None):
"""Generate URLs for links in mails: partner has access (is user):
link to action_mail_redirect action that will redirect to doc or Inbox """
if context is None:
context = {}
# if partner and partner.user_ids:
# base_url = self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'web.base.url')
# mail_model = mail.model or 'mail.thread'
# url = urljoin(base_url, self.pool[mail_model]._get_access_link(cr, uid, mail, partner, context=context))
# return "<span class='oe_mail_footer_access'><small>%(access_msg)s <a style='color:inherit' href='%(portal_link)s'>%(portal_msg)s</a></small></span>" % {
# 'access_msg': _('about') if mail.record_name else _('access'),
# 'portal_link': url,
# 'portal_msg': '%s %s' % (context.get('model_name', ''), mail.record_name) if mail.record_name else _('your messages'),
# }
# else:
# Do not add this footer
return None
def send_get_mail_subject(self, cr, uid, mail, force=False, partner=None, context=None):
"""If subject is void, set the subject as 'Re: <Resource>' or
'Re: <mail.parent_id.subject>'
:param boolean force: force the subject replacement
"""
if (force or not mail.subject) and mail.record_name:
return 'Re: %s' % (mail.record_name)
elif (force or not mail.subject) and mail.parent_id and mail.parent_id.subject:
return 'Re: %s' % (mail.parent_id.subject)
return mail.subject
def send_get_mail_body(self, cr, uid, mail, partner=None, context=None):
"""Return a specific ir_email body. The main purpose of this method
is to be inherited to add custom content depending on some module."""
body = mail.body_html
# generate access links for notifications or emails linked to a specific document with auto threading
link = None
if mail.notification or (mail.model and mail.res_id and not mail.no_auto_thread):
link = self._get_partner_access_link(cr, uid, mail, partner, context=context)
if link:
body = tools.append_content_to_html(body, link, plaintext=False, container_tag='div')
return body
def send_get_mail_to(self, cr, uid, mail, partner=None, context=None):
"""Forge the email_to with the following heuristic:
- if 'partner', recipient specific (Partner Name <email>)
- else fallback on mail.email_to splitting """
if partner:
email_to = [formataddr((partner.name, partner.email))]
else:
email_to = tools.email_split_and_format(mail.email_to)
return email_to
def send_get_email_dict(self, cr, uid, mail, partner=None, context=None):
"""Return a dictionary for specific email values, depending on a
partner, or generic to the whole recipients given by mail.email_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = self.send_get_mail_body(cr, uid, mail, partner=partner, context=context)
body_alternative = tools.html2plaintext(body)
res = {
'body': body,
'body_alternative': body_alternative,
'subject': self.send_get_mail_subject(cr, uid, mail, partner=partner, context=context),
'email_to': self.send_get_mail_to(cr, uid, mail, partner=partner, context=context),
}
return res
def send(self, cr, uid, ids, auto_commit=False, raise_exception=False, context=None):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param bool raise_exception: whether to raise an exception if the
email sending process has failed
:return: True
"""
context = dict(context or {})
ir_mail_server = self.pool.get('ir.mail_server')
ir_attachment = self.pool['ir.attachment']
for mail in self.browse(cr, SUPERUSER_ID, ids, context=context):
try:
# TDE note: remove me when model_id field is present on mail.message - done here to avoid doing it multiple times in the sub method
if mail.model:
model_id = self.pool['ir.model'].search(cr, SUPERUSER_ID, [('model', '=', mail.model)], context=context)[0]
model = self.pool['ir.model'].browse(cr, SUPERUSER_ID, model_id, context=context)
else:
model = None
if model:
context['model_name'] = model.name
# load attachment binary data with a separate read(), as prefetching all
# `datas` (binary field) could bloat the browse cache, triggerring
# soft/hard mem limits with temporary data.
attachment_ids = [a.id for a in mail.attachment_ids]
attachments = [(a['datas_fname'], base64.b64decode(a['datas']))
for a in ir_attachment.read(cr, SUPERUSER_ID, attachment_ids,
['datas_fname', 'datas'])]
# specific behavior to customize the send email for notified partners
email_list = []
if mail.email_to:
email_list.append(self.send_get_email_dict(cr, uid, mail, context=context))
for partner in mail.recipient_ids:
email_list.append(self.send_get_email_dict(cr, uid, mail, partner=partner, context=context))
# headers
headers = {}
bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context)
catchall_domain = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.domain", context=context)
if bounce_alias and catchall_domain:
if mail.model and mail.res_id:
headers['Return-Path'] = '%s-%d-%s-%d@%s' % (bounce_alias, mail.id, mail.model, mail.res_id, catchall_domain)
else:
headers['Return-Path'] = '%s-%d@%s' % (bounce_alias, mail.id, catchall_domain)
if mail.headers:
try:
headers.update(eval(mail.headers))
except Exception:
pass
# Writing on the mail object may fail (e.g. lock on user) which
# would trigger a rollback *after* actually sending the email.
# To avoid sending twice the same email, provoke the failure earlier
mail.write({'state': 'exception'})
mail_sent = False
# build an RFC2822 email.message.Message object and send it without queuing
res = None
for email in email_list:
msg = ir_mail_server.build_email(
email_from=mail.email_from,
email_to=email.get('email_to'),
subject=email.get('subject'),
body=email.get('body'),
body_alternative=email.get('body_alternative'),
email_cc=tools.email_split(mail.email_cc),
reply_to=mail.reply_to,
attachments=attachments,
message_id=mail.message_id,
references=mail.references,
object_id=mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),
subtype='html',
subtype_alternative='plain',
headers=headers)
try:
res = ir_mail_server.send_email(cr, uid, msg,
mail_server_id=mail.mail_server_id.id,
context=context)
except AssertionError as error:
if error.message == ir_mail_server.NO_VALID_RECIPIENT:
# No valid recipient found for this particular
# mail item -> ignore error to avoid blocking
# delivery to next recipients, if any. If this is
# the only recipient, the mail will show as failed.
_logger.warning("Ignoring invalid recipients for mail.mail %s: %s",
mail.message_id, email.get('email_to'))
else:
raise
if res:
mail.write({'state': 'sent', 'message_id': res})
mail_sent = True
# /!\ can't use mail.state here, as mail.refresh() will cause an error
# see revid:[email protected] in 6.1
if mail_sent:
_logger.info('Mail with ID %r and Message-Id %r successfully sent', mail.id, mail.message_id)
self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=mail_sent)
except MemoryError:
# prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
# instead of marking the mail as failed
_logger.exception('MemoryError while processing mail with ID %r and Msg-Id %r. '\
'Consider raising the --limit-memory-hard startup option',
mail.id, mail.message_id)
raise
except psycopg2.Error:
# If an error with the database occurs, chances are that the cursor is unusable.
# This will lead to an `psycopg2.InternalError` being raised when trying to write
# `state`, shadowing the original exception and forbid a retry on concurrent
# update. Let's bubble it.
raise
except Exception as e:
_logger.exception('failed sending mail.mail %s', mail.id)
mail.write({'state': 'exception'})
self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=False)
if raise_exception:
if isinstance(e, AssertionError):
# get the args of the original error, wrap into a value and throw a MailDeliveryException
# that is an except_orm, with name and value as arguments
value = '. '.join(e.args)
raise MailDeliveryException(_("Mail Delivery Failed"), value)
raise
if auto_commit is True:
cr.commit()
return True
| agpl-3.0 |
dsajkl/reqiop | common/lib/xmodule/xmodule/modulestore/draft_and_published.py | 116 | 4797 | """
This module provides an abstraction for Module Stores that support Draft and Published branches.
"""
import threading
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from . import ModuleStoreEnum
# Things w/ these categories should never be marked as version=DRAFT
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
class BranchSettingMixin(object):
"""
A mixin to manage a module store's branch setting.
The order of override is (from higher precedence to lower):
1. thread-specific setting temporarily set using the branch_setting contextmanager
2. the return value of the branch_setting_func passed into this mixin's init method
3. the default branch setting being ModuleStoreEnum.Branch.published_only
"""
def __init__(self, *args, **kwargs):
"""
:param branch_setting_func: a function that returns the default branch setting for this object.
If not specified, ModuleStoreEnum.Branch.published_only is used as the default setting.
"""
self.default_branch_setting_func = kwargs.pop(
'branch_setting_func',
lambda: ModuleStoreEnum.Branch.published_only
)
super(BranchSettingMixin, self).__init__(*args, **kwargs)
# cache the branch setting on a local thread to support a multi-threaded environment
self.thread_cache = threading.local()
@contextmanager
def branch_setting(self, branch_setting, course_id=None): # pylint: disable=unused-argument
"""
A context manager for temporarily setting a store's branch value on the current thread.
"""
previous_thread_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
try:
self.thread_cache.branch_setting = branch_setting
yield
finally:
self.thread_cache.branch_setting = previous_thread_branch_setting
def get_branch_setting(self, course_id=None): # pylint: disable=unused-argument
"""
Returns the current branch_setting on the store.
Returns the thread-local setting, if set.
Otherwise, returns the default value of the setting function set during the store's initialization.
"""
# first check the thread-local cache
thread_local_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
if thread_local_branch_setting:
return thread_local_branch_setting
else:
# return the default value
return self.default_branch_setting_func()
class ModuleStoreDraftAndPublished(BranchSettingMixin):
"""
A mixin for a read-write database backend that supports two branches, Draft and Published, with
options to prefer Draft and fallback to Published.
"""
__metaclass__ = ABCMeta
@abstractmethod
def delete_item(self, location, user_id, revision=None, **kwargs):
raise NotImplementedError
@abstractmethod
def get_parent_location(self, location, revision=None, **kwargs):
raise NotImplementedError
@abstractmethod
def has_changes(self, xblock):
raise NotImplementedError
@abstractmethod
def publish(self, location, user_id):
raise NotImplementedError
@abstractmethod
def unpublish(self, location, user_id):
raise NotImplementedError
@abstractmethod
def revert_to_published(self, location, user_id):
raise NotImplementedError
@abstractmethod
def has_published_version(self, xblock):
raise NotImplementedError
@abstractmethod
def convert_to_draft(self, location, user_id):
raise NotImplementedError
@abstractmethod
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
Import the given xblock into the current branch setting: import completely overwrites any
existing block of the same id.
In ModuleStoreDraftAndPublished, importing a published block ensures that access from the draft
will get a block (either the one imported or a preexisting one). See xml_importer
"""
raise NotImplementedError
class UnsupportedRevisionError(ValueError):
"""
This error is raised if a method is called with an unsupported revision parameter.
"""
def __init__(self, allowed_revisions=None):
if not allowed_revisions:
allowed_revisions = [
None,
ModuleStoreEnum.RevisionOption.published_only,
ModuleStoreEnum.RevisionOption.draft_only
]
super(UnsupportedRevisionError, self).__init__('revision not one of {}'.format(allowed_revisions))
| agpl-3.0 |
Teagan42/home-assistant | homeassistant/components/zha/config_flow.py | 3 | 2521 | """Config flow for ZHA."""
import asyncio
from collections import OrderedDict
import os
import voluptuous as vol
from homeassistant import config_entries
from .core.const import (
CONF_RADIO_TYPE,
CONF_USB_PATH,
CONTROLLER,
DEFAULT_BAUDRATE,
DEFAULT_DATABASE_NAME,
DOMAIN,
ZHA_GW_RADIO,
RadioType,
)
from .core.registries import RADIO_TYPES
@config_entries.HANDLERS.register(DOMAIN)
class ZhaFlowHandler(config_entries.ConfigFlow):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Handle a zha config flow start."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
errors = {}
fields = OrderedDict()
fields[vol.Required(CONF_USB_PATH)] = str
fields[vol.Optional(CONF_RADIO_TYPE, default="ezsp")] = vol.In(RadioType.list())
if user_input is not None:
database = os.path.join(self.hass.config.config_dir, DEFAULT_DATABASE_NAME)
test = await check_zigpy_connection(
user_input[CONF_USB_PATH], user_input[CONF_RADIO_TYPE], database
)
if test:
return self.async_create_entry(
title=user_input[CONF_USB_PATH], data=user_input
)
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="user", data_schema=vol.Schema(fields), errors=errors
)
async def async_step_import(self, import_info):
"""Handle a zha config import."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
return self.async_create_entry(
title=import_info[CONF_USB_PATH], data=import_info
)
async def check_zigpy_connection(usb_path, radio_type, database_path):
"""Test zigpy radio connection."""
try:
radio = RADIO_TYPES[radio_type][ZHA_GW_RADIO]()
controller_application = RADIO_TYPES[radio_type][CONTROLLER]
except KeyError:
return False
try:
await radio.connect(usb_path, DEFAULT_BAUDRATE)
controller = controller_application(radio, database_path)
await asyncio.wait_for(controller.startup(auto_form=True), timeout=30)
await controller.shutdown()
except Exception: # pylint: disable=broad-except
return False
return True
| apache-2.0 |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_fleet_runtime.py | 2 | 2562 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import os
class TestFleetRuntime(unittest.TestCase):
def test_fleet_runtime_base(self):
import paddle.distributed.fleet.runtime
base = paddle.distributed.fleet.runtime.runtime_base.RuntimeBase()
base._run_worker()
base._init_server()
base._run_server()
base._stop_worker()
base._save_inference_model()
base._save_persistables()
def test_fleet_collective_runtime(self):
import paddle.distributed.fleet.runtime
collective_runtime = paddle.distributed.fleet.runtime.CollectiveRuntime(
)
collective_runtime._init_worker()
collective_runtime._run_worker()
collective_runtime._init_worker()
collective_runtime._run_server()
collective_runtime._stop_worker()
collective_runtime._save_inference_model()
collective_runtime._save_persistables()
def test_fleet_ps_runtime(self):
ps_runtime = paddle.distributed.fleet.runtime.ParameterServerRuntime()
self.assertRaises(Exception, ps_runtime._get_optimizer_status,
"test_op", None)
reshaped_names, origin_names = ps_runtime._get_optimizer_status("adam",
"param")
self.assertTrue(
len(reshaped_names) == 2 and
reshaped_names[0] == 'param_moment1_0' and
reshaped_names[1] == 'param_moment2_0')
self.assertTrue(
len(origin_names) == 2 and
origin_names[0] == 'param_beta1_pow_acc_0' and
origin_names[1] == 'param_beta2_pow_acc_0')
reshaped_names, origin_names = ps_runtime._get_optimizer_status("sgd",
"param")
self.assertTrue(len(reshaped_names) == 0 and len(origin_names) == 0)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
CubicERP/geraldo | site/newsite/site-geraldo/django/contrib/localflavor/fr/fr_department.py | 39 | 3391 | # -*- coding: utf-8 -*-
DEPARTMENT_ASCII_CHOICES = (
('01', '01 - Ain'),
('02', '02 - Aisne'),
('03', '03 - Allier'),
('04', '04 - Alpes-de-Haute-Provence'),
('05', '05 - Hautes-Alpes'),
('06', '06 - Alpes-Maritimes'),
('07', '07 - Ardeche'),
('08', '08 - Ardennes'),
('09', '09 - Ariege'),
('10', '10 - Aube'),
('11', '11 - Aude'),
('12', '12 - Aveyron'),
('13', '13 - Bouches-du-Rhone'),
('14', '14 - Calvados'),
('15', '15 - Cantal'),
('16', '16 - Charente'),
('17', '17 - Charente-Maritime'),
('18', '18 - Cher'),
('19', '19 - Correze'),
('21', '21 - Cote-d\'Or'),
('22', '22 - Cotes-d\'Armor'),
('23', '23 - Creuse'),
('24', '24 - Dordogne'),
('25', '25 - Doubs'),
('26', '26 - Drome'),
('27', '27 - Eure'),
('28', '28 - Eure-et-Loire'),
('29', '29 - Finistere'),
('2A', '2A - Corse-du-Sud'),
('2B', '2B - Haute-Corse'),
('30', '30 - Gard'),
('31', '31 - Haute-Garonne'),
('32', '32 - Gers'),
('33', '33 - Gironde'),
('34', '34 - Herault'),
('35', '35 - Ille-et-Vilaine'),
('36', '36 - Indre'),
('37', '37 - Indre-et-Loire'),
('38', '38 - Isere'),
('39', '39 - Jura'),
('40', '40 - Landes'),
('41', '41 - Loir-et-Cher'),
('42', '42 - Loire'),
('43', '43 - Haute-Loire'),
('44', '44 - Loire-Atlantique'),
('45', '45 - Loiret'),
('46', '46 - Lot'),
('47', '47 - Lot-et-Garonne'),
('48', '48 - Lozere'),
('49', '49 - Maine-et-Loire'),
('50', '50 - Manche'),
('51', '51 - Marne'),
('52', '52 - Haute-Marne'),
('53', '53 - Mayenne'),
('54', '54 - Meurthe-et-Moselle'),
('55', '55 - Meuse'),
('56', '56 - Morbihan'),
('57', '57 - Moselle'),
('58', '58 - Nievre'),
('59', '59 - Nord'),
('60', '60 - Oise'),
('61', '61 - Orne'),
('62', '62 - Pas-de-Calais'),
('63', '63 - Puy-de-Dome'),
('64', '64 - Pyrenees-Atlantiques'),
('65', '65 - Hautes-Pyrenees'),
('66', '66 - Pyrenees-Orientales'),
('67', '67 - Bas-Rhin'),
('68', '68 - Haut-Rhin'),
('69', '69 - Rhone'),
('70', '70 - Haute-Saone'),
('71', '71 - Saone-et-Loire'),
('72', '72 - Sarthe'),
('73', '73 - Savoie'),
('74', '74 - Haute-Savoie'),
('75', '75 - Paris'),
('76', '76 - Seine-Maritime'),
('77', '77 - Seine-et-Marne'),
('78', '78 - Yvelines'),
('79', '79 - Deux-Sevres'),
('80', '80 - Somme'),
('81', '81 - Tarn'),
('82', '82 - Tarn-et-Garonne'),
('83', '83 - Var'),
('84', '84 - Vaucluse'),
('85', '85 - Vendee'),
('86', '86 - Vienne'),
('87', '87 - Haute-Vienne'),
('88', '88 - Vosges'),
('89', '89 - Yonne'),
('90', '90 - Territoire de Belfort'),
('91', '91 - Essonne'),
('92', '92 - Hauts-de-Seine'),
('93', '93 - Seine-Saint-Denis'),
('94', '94 - Val-de-Marne'),
('95', '95 - Val-d\'Oise'),
('2A', '2A - Corse du sud'),
('2B', '2B - Haute Corse'),
('971', '971 - Guadeloupe'),
('972', '972 - Martinique'),
('973', '973 - Guyane'),
('974', '974 - La Reunion'),
('975', '975 - Saint-Pierre-et-Miquelon'),
('976', '976 - Mayotte'),
('984', '984 - Terres Australes et Antarctiques'),
('986', '986 - Wallis et Futuna'),
('987', '987 - Polynesie Francaise'),
('988', '988 - Nouvelle-Caledonie'),
)
| lgpl-3.0 |
mixxorz/wagtail | wagtail/tests/testapp/migrations/0018_multiselect_form_field.py | 24 | 1833 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-28 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0017_alwaysshowinmenuspage'),
]
operations = [
migrations.AlterField(
model_name='formfield',
name='field_type',
field=models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')], max_length=16, verbose_name='field type'),
),
migrations.AlterField(
model_name='jadeformfield',
name='field_type',
field=models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')], max_length=16, verbose_name='field type'),
),
migrations.AlterField(
model_name='formfieldwithcustomsubmission',
name='field_type',
field=models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')], max_length=16, verbose_name='field type'),
),
]
| bsd-3-clause |
breunigs/beets | beetsplug/embedart.py | 3 | 6398 | # This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows beets to embed album art into file metadata."""
import logging
import imghdr
from beets.plugins import BeetsPlugin
from beets import mediafile
from beets import ui
from beets.ui import decargs
from beets.util import syspath, normpath, displayable_path
from beets.util.artresizer import ArtResizer
from beets import config
log = logging.getLogger('beets')
def _embed(path, items, maxwidth=0):
"""Embed an image file, located at `path`, into each item.
"""
if maxwidth:
path = ArtResizer.shared.resize(maxwidth, syspath(path))
data = open(syspath(path), 'rb').read()
kindstr = imghdr.what(None, data)
if kindstr not in ('jpeg', 'png'):
log.error('A file of type %s is not allowed as coverart.' % kindstr)
return
# Add art to each file.
log.debug('Embedding album art.')
for item in items:
try:
f = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.warn('Could not embed art in {0}: {1}'.format(
displayable_path(item.path), exc
))
continue
f.art = data
f.save()
class EmbedCoverArtPlugin(BeetsPlugin):
"""Allows albumart to be embedded into the actual files.
"""
def __init__(self):
super(EmbedCoverArtPlugin, self).__init__()
self.config.add({
'maxwidth': 0,
'auto': True,
})
if self.config['maxwidth'].get(int) and \
not ArtResizer.shared.local:
self.config['maxwidth'] = 0
log.warn("embedart: ImageMagick or PIL not found; "
"'maxwidth' option ignored")
def commands(self):
# Embed command.
embed_cmd = ui.Subcommand('embedart',
help='embed image files into file metadata')
embed_cmd.parser.add_option('-f', '--file', metavar='PATH',
help='the image file to embed')
def embed_func(lib, opts, args):
if opts.file:
imagepath = normpath(opts.file)
embed(lib, imagepath, decargs(args))
else:
embed_current(lib, decargs(args))
embed_cmd.func = embed_func
# Extract command.
extract_cmd = ui.Subcommand('extractart',
help='extract an image from file metadata')
extract_cmd.parser.add_option('-o', dest='outpath',
help='image output file')
def extract_func(lib, opts, args):
outpath = normpath(opts.outpath or 'cover')
extract(lib, outpath, decargs(args))
extract_cmd.func = extract_func
# Clear command.
clear_cmd = ui.Subcommand('clearart',
help='remove images from file metadata')
def clear_func(lib, opts, args):
clear(lib, decargs(args))
clear_cmd.func = clear_func
return [embed_cmd, extract_cmd, clear_cmd]
# "embedart" command with --file argument.
def embed(lib, imagepath, query):
albums = lib.albums(query)
for i_album in albums:
album = i_album
break
else:
log.error('No album matches query.')
return
log.info('Embedding album art into %s - %s.' % \
(album.albumartist, album.album))
_embed(imagepath, album.items(),
config['embedart']['maxwidth'].get(int))
# "embedart" command without explicit file.
def embed_current(lib, query):
albums = lib.albums(query)
for album in albums:
if not album.artpath:
log.info(u'No album art present: {0} - {1}'.
format(album.albumartist, album.album))
continue
log.info(u'Embedding album art into {0} - {1}'.
format(album.albumartist, album.album))
_embed(album.artpath, album.items(),
config['embedart']['maxwidth'].get(int))
# "extractart" command.
def extract(lib, outpath, query):
items = lib.items(query)
for i_item in items:
item = i_item
break
else:
log.error('No item matches query.')
return
# Extract the art.
try:
mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.error(u'Could not extract art from {0}: {1}'.format(
displayable_path(item.path), exc
))
return
art = mf.art
if not art:
log.error('No album art present in %s - %s.' %
(item.artist, item.title))
return
# Add an extension to the filename.
ext = imghdr.what(None, h=art)
if not ext:
log.error('Unknown image type.')
return
outpath += '.' + ext
log.info('Extracting album art from: %s - %s\n'
'To: %s' % \
(item.artist, item.title, outpath))
with open(syspath(outpath), 'wb') as f:
f.write(art)
# "clearart" command.
def clear(lib, query):
log.info('Clearing album art from items:')
for item in lib.items(query):
log.info(u'%s - %s' % (item.artist, item.title))
try:
mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.error(u'Could not clear art from {0}: {1}'.format(
displayable_path(item.path), exc
))
continue
mf.art = None
mf.save()
# Automatically embed art into imported albums.
@EmbedCoverArtPlugin.listen('album_imported')
def album_imported(lib, album):
if album.artpath and config['embedart']['auto']:
_embed(album.artpath, album.items(),
config['embedart']['maxwidth'].get(int))
| mit |
Endika/OpenUpgrade | addons/account_budget/account_budget.py | 194 | 9368 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from openerp.osv import fields, osv
from openerp.tools import ustr, DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
# ---------------------------------------------------------
# Utils
# ---------------------------------------------------------
def strToDate(dt):
return date(int(dt[0:4]), int(dt[5:7]), int(dt[8:10]))
def strToDatetime(strdate):
return datetime.strptime(strdate, DEFAULT_SERVER_DATE_FORMAT)
# ---------------------------------------------------------
# Budgets
# ---------------------------------------------------------
class account_budget_post(osv.osv):
_name = "account.budget.post"
_description = "Budgetary Position"
_columns = {
'code': fields.char('Code', size=64, required=True),
'name': fields.char('Name', required=True),
'account_ids': fields.many2many('account.account', 'account_budget_rel', 'budget_id', 'account_id', 'Accounts'),
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'general_budget_id', 'Budget Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.budget.post', context=c)
}
_order = "name"
class crossovered_budget(osv.osv):
_name = "crossovered.budget"
_description = "Budget"
_columns = {
'name': fields.char('Name', required=True, states={'done':[('readonly',True)]}),
'code': fields.char('Code', size=16, required=True, states={'done':[('readonly',True)]}),
'creating_user_id': fields.many2one('res.users', 'Responsible User'),
'validating_user_id': fields.many2one('res.users', 'Validate User', readonly=True),
'date_from': fields.date('Start Date', required=True, states={'done':[('readonly',True)]}),
'date_to': fields.date('End Date', required=True, states={'done':[('readonly',True)]}),
'state' : fields.selection([('draft','Draft'),('cancel', 'Cancelled'),('confirm','Confirmed'),('validate','Validated'),('done','Done')], 'Status', select=True, required=True, readonly=True, copy=False),
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'crossovered_budget_id', 'Budget Lines', states={'done':[('readonly',True)]}, copy=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'state': 'draft',
'creating_user_id': lambda self, cr, uid, context: uid,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.budget.post', context=c)
}
def budget_confirm(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'confirm'
})
return True
def budget_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'draft'
})
return True
def budget_validate(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'validate',
'validating_user_id': uid,
})
return True
def budget_cancel(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'cancel'
})
return True
def budget_done(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'done'
})
return True
class crossovered_budget_lines(osv.osv):
def _prac_amt(self, cr, uid, ids, context=None):
res = {}
result = 0.0
if context is None:
context = {}
account_obj = self.pool.get('account.account')
for line in self.browse(cr, uid, ids, context=context):
acc_ids = [x.id for x in line.general_budget_id.account_ids]
if not acc_ids:
raise osv.except_osv(_('Error!'),_("The Budget '%s' has no accounts!") % ustr(line.general_budget_id.name))
acc_ids = account_obj._get_children_and_consol(cr, uid, acc_ids, context=context)
date_to = line.date_to
date_from = line.date_from
if line.analytic_account_id.id:
cr.execute("SELECT SUM(amount) FROM account_analytic_line WHERE account_id=%s AND (date "
"between to_date(%s,'yyyy-mm-dd') AND to_date(%s,'yyyy-mm-dd')) AND "
"general_account_id=ANY(%s)", (line.analytic_account_id.id, date_from, date_to,acc_ids,))
result = cr.fetchone()[0]
if result is None:
result = 0.00
res[line.id] = result
return res
def _prac(self, cr, uid, ids, name, args, context=None):
res={}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = self._prac_amt(cr, uid, [line.id], context=context)[line.id]
return res
def _theo_amt(self, cr, uid, ids, context=None):
if context is None:
context = {}
res = {}
for line in self.browse(cr, uid, ids, context=context):
today = datetime.now()
if line.paid_date:
if strToDate(line.date_to) <= strToDate(line.paid_date):
theo_amt = 0.00
else:
theo_amt = line.planned_amount
else:
line_timedelta = strToDatetime(line.date_to) - strToDatetime(line.date_from)
elapsed_timedelta = today - (strToDatetime(line.date_from))
if elapsed_timedelta.days < 0:
# If the budget line has not started yet, theoretical amount should be zero
theo_amt = 0.00
elif line_timedelta.days > 0 and today < strToDatetime(line.date_to):
# If today is between the budget line date_from and date_to
theo_amt = (elapsed_timedelta.total_seconds() / line_timedelta.total_seconds()) * line.planned_amount
else:
theo_amt = line.planned_amount
res[line.id] = theo_amt
return res
def _theo(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = self._theo_amt(cr, uid, [line.id], context=context)[line.id]
return res
def _perc(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
if line.theoritical_amount <> 0.00:
res[line.id] = float((line.practical_amount or 0.0) / line.theoritical_amount) * 100
else:
res[line.id] = 0.00
return res
_name = "crossovered.budget.lines"
_description = "Budget Line"
_columns = {
'crossovered_budget_id': fields.many2one('crossovered.budget', 'Budget', ondelete='cascade', select=True, required=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'general_budget_id': fields.many2one('account.budget.post', 'Budgetary Position',required=True),
'date_from': fields.date('Start Date', required=True),
'date_to': fields.date('End Date', required=True),
'paid_date': fields.date('Paid Date'),
'planned_amount':fields.float('Planned Amount', required=True, digits_compute=dp.get_precision('Account')),
'practical_amount':fields.function(_prac, string='Practical Amount', type='float', digits_compute=dp.get_precision('Account')),
'theoritical_amount':fields.function(_theo, string='Theoretical Amount', type='float', digits_compute=dp.get_precision('Account')),
'percentage':fields.function(_perc, string='Percentage', type='float'),
'company_id': fields.related('crossovered_budget_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
_columns = {
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'analytic_account_id', 'Budget Lines'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
angr/angr | angr/exploration_techniques/oppologist.py | 1 | 3597 | from collections import defaultdict
import claripy
import functools
import logging
l = logging.getLogger(name=__name__)
from ..errors import AngrError, SimError, SimUnsupportedError, SimCCallError
from .. import sim_options
from ..engines.successors import SimSuccessors
exc_list = (AngrError, SimError, claripy.ClaripyError, TypeError, ValueError, ArithmeticError, MemoryError)
from . import ExplorationTechnique
class Oppologist(ExplorationTechnique):
"""
The Oppologist is an exploration technique that forces uncooperative code through qemu.
"""
def __init__(self):
ExplorationTechnique.__init__(self)
@staticmethod
def _restore_state(old, new):
new.release_plugin('unicorn')
new.register_plugin('unicorn', old.unicorn.copy())
new.options = old.options.copy()
def _oppologize(self, simgr, state, pn, **kwargs):
l.debug("... pn: %s", pn)
pn.options.add(sim_options.UNICORN)
pn.options.add(sim_options.UNICORN_AGGRESSIVE_CONCRETIZATION)
pn.unicorn.max_steps = 1
pn.unicorn.countdown_symbolic_stop = 0
pn.unicorn.countdown_unsupported_stop = 0
pn.unicorn.countdown_nonunicorn_blocks = 0
pn.unicorn.countdown_stop_point = 0
ss = simgr.successors(pn, throw=True, **kwargs)
fixup = functools.partial(self._restore_state, state)
l.debug("... successors: %s", ss)
for s in ss.flat_successors + ss.unconstrained_successors + ss.unsat_successors + ss.successors:
fixup(s)
return ss
@staticmethod
def _combine_results(*results):
all_results = defaultdict(list)
final = SimSuccessors(results[0].addr, results[0].initial_state)
final.description = 'Oppology'
final.sort = 'Oppologist'
for med in results:
final.processed = True
final.successors.extend(med.successors)
final.all_successors.extend(med.all_successors)
final.flat_successors.extend(med.flat_successors)
final.unsat_successors.extend(med.unsat_successors)
final.unconstrained_successors.extend(med.unsat_successors)
return final
def _delayed_oppology(self, simgr, state, e, **kwargs):
ss = simgr.successors(state, num_inst=e.executed_instruction_count, **kwargs)
need_oppologizing = [ s for s in ss.flat_successors if s.addr == e.ins_addr ]
ss.flat_successors = [ s for s in ss.flat_successors if s.addr != e.ins_addr ]
results = [ss]
results.extend(map(functools.partial(self._oppologize, simgr, state, **kwargs), need_oppologizing))
return self._combine_results(*results)
def successors(self, simgr, state, **kwargs):
try:
kwargs.pop('throw', None)
return simgr.successors(state, **kwargs)
except (SimUnsupportedError, SimCCallError) as e:
l.debug("Errored on path %s after %d instructions", state, e.executed_instruction_count)
try:
if e.executed_instruction_count:
return self._delayed_oppology(simgr, state, e, **kwargs)
else:
return self._oppologize(simgr, state, state.copy(), **kwargs)
except exc_list: #pylint:disable=broad-except
l.error("Oppologizer hit an error while trying to perform repairs", exc_info=True)
raise e
except Exception: #pylint:disable=broad-except
l.error("Original block hit an unsupported error", exc_info=True)
raise
| bsd-2-clause |
fwpz/WeiPython | mysite/wsgi.py | 27 | 1134 | """
WSGI config for mysite project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit |
xbmc/atv2 | xbmc/lib/libPython/Python/Mac/Tools/IDE/Wkeys.py | 4 | 1324 | spacekey = ' '
returnkey = '\r'
tabkey = '\t'
enterkey = '\003'
backspacekey = '\010'
deletekey = '\177'
clearkey = '\033'
helpkey = '\005'
leftarrowkey = '\034'
rightarrowkey = '\035'
uparrowkey = '\036'
downarrowkey = '\037'
arrowkeys = [leftarrowkey, rightarrowkey, uparrowkey, downarrowkey]
topkey = '\001'
bottomkey = '\004'
pageupkey = '\013'
pagedownkey = '\014'
scrollkeys = [topkey, bottomkey, pageupkey, pagedownkey]
navigationkeys = arrowkeys + scrollkeys
keycodes = {
"space" : ' ',
"return" : '\r',
"tab" : '\t',
"enter" : '\003',
"backspace" : '\010',
"delete" : '\177',
"help" : '\005',
"leftarrow" : '\034',
"rightarrow" : '\035',
"uparrow" : '\036',
"downarrow" : '\037',
"top" : '\001',
"bottom" : '\004',
"pageup" : '\013',
"pagedown" : '\014'
}
keynames = {}
for k, v in keycodes.items():
keynames[v] = k
del k, v
| gpl-2.0 |
espenfjo/android_kernel_samsung_n8000 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
scottferg/web-console | django/contrib/admin/models.py | 10 | 2189 | from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.contrib.admin.util import quote
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
from django.utils.safestring import mark_safe
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
e = self.model(None, None, user_id, content_type_id, smart_unicode(object_id), object_repr[:200], action_flag, change_message)
e.save()
class LogEntry(models.Model):
action_time = models.DateTimeField(_('action time'), auto_now=True)
user = models.ForeignKey(User)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_unicode(self.action_time)
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
This is relative to the Django admin index page.
"""
return mark_safe(u"%s/%s/%s/" % (self.content_type.app_label, self.content_type.model, quote(self.object_id)))
| bsd-3-clause |
wagnerand/amo-validator | tests/test_content_overlays.py | 2 | 4049 | from helper import MockXPI
from validator.chromemanifest import ChromeManifest
import validator.testcases.content as content
from validator.errorbundler import ErrorBundle
def test_marking_overlays():
"""
Mark an overlay, then test that it marks the scripts within the overlay.
"""
err = ErrorBundle()
err.supported_versions = {}
c = ChromeManifest("""
content ns1 foo/
overlay chrome://foo chrome://ns1/content/main.xul
""", 'chrome.manifest')
err.save_resource('chrome.manifest', c)
err.save_resource('chrome.manifest_nopush', c)
xpi = MockXPI({'foo/main.xul': 'tests/resources/content/script_list.xul'})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource('marked_scripts')
assert marked_scripts == set(['chrome://ns1/foo.js',
'chrome://ns1/bar.js',
'chrome://asdf/foo.js'])
def test_marking_overlays_root_package():
"""
Tests that '/' resolves correctly as a chrome content package.
"""
err = ErrorBundle()
err.supported_versions = {}
manifest = ChromeManifest("""
content ns1 /
overlay chrome://foo chrome://ns1/content/main.xul
""", 'chrome.manifest')
err.save_resource('chrome.manifest', manifest)
err.save_resource('chrome.manifest_nopush', manifest)
xpi = MockXPI({'main.xul': 'tests/resources/content/script_list.xul'})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource('marked_scripts')
assert marked_scripts == set(['chrome://ns1/foo.js',
'chrome://ns1/bar.js',
'chrome://asdf/foo.js'])
def test_marking_overlays_no_overlay():
"""
Test that unmarked overlays don't mark scripts as being potentially
pollutable.
"""
err = ErrorBundle()
err.supported_versions = {}
c = ChromeManifest("""
content ns1 foo/
#overlay chrome://foo chrome://ns1/main.xul
""", 'chrome.manifest')
err.save_resource('chrome.manifest', c)
err.save_resource('chrome.manifest_nopush', c)
xpi = MockXPI({'foo/main.xul': 'tests/resources/content/script_list.xul'})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource('marked_scripts')
print marked_scripts
assert not marked_scripts
def test_marking_overlays_subdir():
"""
Mark an overlay in a subdirectory, then test that it marks the scripts
within the overlay. Make sure it properly figures out relative URLs.
"""
err = ErrorBundle()
err.supported_versions = {}
c = ChromeManifest("""
content ns1 foo/
overlay chrome://foo chrome://ns1/content/subdir/main.xul
""", 'chrome.manifest')
err.save_resource('chrome.manifest', c)
err.save_resource('chrome.manifest_nopush', c)
xpi = MockXPI({'foo/subdir/main.xul':
'tests/resources/content/script_list.xul'})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource('marked_scripts')
print marked_scripts
assert marked_scripts
assert marked_scripts == set(['chrome://ns1/subdir/foo.js',
'chrome://ns1/bar.js',
'chrome://asdf/foo.js'])
def test_script_scraping():
"""Test that scripts are gathered up during the validation process."""
err = ErrorBundle()
err.supported_versions = {}
xpi = MockXPI({'foo.js': 'tests/resources/junk.xpi',
'dir/bar.jsm': 'tests/resources/junk.xpi'})
content.test_packed_packages(err, xpi)
assert not err.failed()
scripts = err.get_resource('scripts')
print scripts
assert scripts
for bundle in scripts:
assert 'foo.js' in bundle['scripts']
assert 'dir/bar.jsm' in bundle['scripts']
assert bundle['package'] == xpi
assert bundle['state'] == []
| bsd-3-clause |
jimyx17/jimh | headphones/getXldProfile.py | 2 | 8286 | import os.path
import plistlib
import sys
import xml.parsers.expat as expat
import commands
from headphones import logger
def getXldProfile(xldProfile):
xldProfileNotFound = xldProfile
expandedPath = os.path.expanduser('~/Library/Preferences/jp.tmkk.XLD.plist')
try:
preferences = plistlib.Plist.fromFile(expandedPath)
except (expat.ExpatError):
os.system("/usr/bin/plutil -convert xml1 %s" % expandedPath )
try:
preferences = plistlib.Plist.fromFile(expandedPath)
except (ImportError):
os.system("/usr/bin/plutil -convert binary1 %s" % expandedPath )
logger.info('The plist at "%s" has a date in it, and therefore is not useable.' % expandedPath)
return(xldProfileNotFound, None, None)
except (ImportError):
logger.info('The plist at "%s" has a date in it, and therefore is not useable.' % expandedPath)
except:
logger.info('Unexpected error:', sys.exc_info()[0])
return(xldProfileNotFound, None, None)
xldProfile = xldProfile.lower()
profiles = preferences.get('Profiles')
for profile in profiles:
profilename = profile.get('XLDProfileManager_ProfileName')
xldProfileForCmd = profilename
profilename = profilename.lower()
xldFormat = None
xldBitrate = None
if profilename == xldProfile:
OutputFormatName = profile.get('OutputFormatName')
ShortDesc = profile.get('ShortDesc')
# Determine format and bitrate
if OutputFormatName == 'WAV':
xldFormat = 'wav'
elif OutputFormatName == 'AIFF':
xldFormat = 'aiff'
elif 'PCM' in OutputFormatName:
xldFormat = 'pcm'
elif OutputFormatName == 'Wave64':
xldFormat = 'w64'
elif OutputFormatName == 'MPEG-4 AAC':
xldFormat = 'm4a'
if 'CBR' in ShortDesc or 'ABR' in ShortDesc or 'CVBR' in ShortDesc:
xldBitrate = int(profile.get('XLDAacOutput2_Bitrate'))
elif 'TVBR' in ShortDesc:
XLDAacOutput2_VBRQuality = int(profile.get('XLDAacOutput2_VBRQuality'))
if XLDAacOutput2_VBRQuality > 122:
xldBitrate = 320
elif XLDAacOutput2_VBRQuality > 113 and XLDAacOutput2_VBRQuality <= 122:
xldBitrate = 285
elif XLDAacOutput2_VBRQuality > 104 and XLDAacOutput2_VBRQuality <= 113:
xldBitrate = 255
elif XLDAacOutput2_VBRQuality > 95 and XLDAacOutput2_VBRQuality <= 104:
xldBitrate = 225
elif XLDAacOutput2_VBRQuality > 86 and XLDAacOutput2_VBRQuality <= 95:
xldBitrate = 195
elif XLDAacOutput2_VBRQuality > 77 and XLDAacOutput2_VBRQuality <= 86:
xldBitrate = 165
elif XLDAacOutput2_VBRQuality > 68 and XLDAacOutput2_VBRQuality <= 77:
xldBitrate = 150
elif XLDAacOutput2_VBRQuality > 58 and XLDAacOutput2_VBRQuality <= 68:
xldBitrate = 135
elif XLDAacOutput2_VBRQuality > 49 and XLDAacOutput2_VBRQuality <= 58:
xldBitrate = 115
elif XLDAacOutput2_VBRQuality > 40 and XLDAacOutput2_VBRQuality <= 49:
xldBitrate = 105
elif XLDAacOutput2_VBRQuality > 31 and XLDAacOutput2_VBRQuality <= 40:
xldBitrate = 95
elif XLDAacOutput2_VBRQuality > 22 and XLDAacOutput2_VBRQuality <= 31:
xldBitrate = 80
elif XLDAacOutput2_VBRQuality > 13 and XLDAacOutput2_VBRQuality <= 22:
xldBitrate = 75
elif XLDAacOutput2_VBRQuality > 4 and XLDAacOutput2_VBRQuality <= 13:
xldBitrate = 45
elif XLDAacOutput2_VBRQuality >= 0 and XLDAacOutput2_VBRQuality <= 4:
xldBitrate = 40
elif OutputFormatName == 'Apple Lossless':
xldFormat = 'm4a'
elif OutputFormatName == 'FLAC':
if 'ogg' in ShortDesc:
xldFormat = 'oga'
else:
xldFormat = 'flac'
elif OutputFormatName == 'MPEG-4 HE-AAC':
xldFormat = 'm4a'
xldBitrate = int(profile.get('Bitrate'))
elif OutputFormatName == 'LAME MP3':
xldFormat = 'mp3'
if 'VBR' in ShortDesc:
VbrQuality = float(profile.get('VbrQuality'))
if VbrQuality < 1:
xldBitrate = 260
elif VbrQuality >= 1 and VbrQuality < 2:
xldBitrate = 250
elif VbrQuality >= 2 and VbrQuality < 3:
xldBitrate = 210
elif VbrQuality >= 3 and VbrQuality < 4:
xldBitrate = 195
elif VbrQuality >= 4 and VbrQuality < 5:
xldBitrate = 185
elif VbrQuality >= 5 and VbrQuality < 6:
xldBitrate = 150
elif VbrQuality >= 6 and VbrQuality < 7:
xldBitrate = 130
elif VbrQuality >= 7 and VbrQuality < 8:
xldBitrate = 120
elif VbrQuality >= 8 and VbrQuality < 9:
xldBitrate = 105
elif VbrQuality >= 9:
xldBitrate = 85
elif 'CBR' in ShortDesc:
xldBitrate = int(profile.get('Bitrate'))
elif 'ABR' in ShortDesc:
xldBitrate = int(profile.get('AbrBitrate'))
elif OutputFormatName == 'Opus':
xldFormat = 'opus'
xldBitrate = int(profile.get('XLDOpusOutput_Bitrate'))
elif OutputFormatName == 'Ogg Vorbis':
xldFormat = 'ogg'
XLDVorbisOutput_Quality = float(profile.get('XLDVorbisOutput_Quality'))
if XLDVorbisOutput_Quality <= -2:
xldBitrate = 32
elif XLDVorbisOutput_Quality > -2 and XLDVorbisOutput_Quality <= -1:
xldBitrate = 48
elif XLDVorbisOutput_Quality > -1 and XLDVorbisOutput_Quality <= 0:
xldBitrate = 64
elif XLDVorbisOutput_Quality > 0 and XLDVorbisOutput_Quality <= 1:
xldBitrate = 80
elif XLDVorbisOutput_Quality > 1 and XLDVorbisOutput_Quality <= 2:
xldBitrate = 96
elif XLDVorbisOutput_Quality > 2 and XLDVorbisOutput_Quality <= 3:
xldBitrate = 112
elif XLDVorbisOutput_Quality > 3 and XLDVorbisOutput_Quality <= 4:
xldBitrate = 128
elif XLDVorbisOutput_Quality > 4 and XLDVorbisOutput_Quality <= 5:
xldBitrate = 160
elif XLDVorbisOutput_Quality > 5 and XLDVorbisOutput_Quality <= 6:
xldBitrate = 192
elif XLDVorbisOutput_Quality > 6 and XLDVorbisOutput_Quality <= 7:
xldBitrate = 224
elif XLDVorbisOutput_Quality > 7 and XLDVorbisOutput_Quality <= 8:
xldBitrate = 256
elif XLDVorbisOutput_Quality > 8 and XLDVorbisOutput_Quality <= 9:
xldBitrate = 320
elif XLDVorbisOutput_Quality > 9:
xldBitrate = 500
elif OutputFormatName == 'WavPack':
xldFormat = 'wv'
if ShortDesc != 'normal':
xldBitrate = int(profile.get('XLDWavpackOutput_BitRate'))
# Lossless
if xldFormat and not xldBitrate:
xldBitrate = 500
return(xldProfileForCmd, xldFormat, xldBitrate)
return(xldProfileNotFound, None, None) | gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.